repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
forgottenlabs/wklejorg | _files/apps/wklej-0.1.5.py | 2 | 2966 | #!/usr/bin/env python
"""
Simple wklej.org paste script
license: GPLv2
version 0.1.4
thanks ch4os for help
"""
from optparse import OptionParser
import commands
import sys
import xmlrpclib
syntaxes = [
'text', 'apache', 'actionscript', 'actionscript3', 'bash', 'bat', 'bbcode', 'befuge', 'boo', 'brainfuck', 'c-objdump',
'c', 'cheetah', 'clojure', 'common-lisp', 'control', 'cpp', 'cpp-objdump', 'csharp', 'css+django', 'css+ruby', 'css+genshi', 'css+mako',
'css+myghty', 'css+php', 'css+smarty', 'css', 'd-objdump', 'd', 'delphi', 'diff', 'django', 'dpatch', 'dylan', 'erb', 'erlang', 'fortran',
'gas', 'genshi', 'genshitext', 'gnuplot', 'groff', 'haskell', 'html+cheetah', 'html+django', 'html+genshi', 'html+mako', 'html+myghty',
'html+php', 'html+smarty', 'html', 'ini', 'io', 'irc', 'java', 'js+cheetah', 'js+django', 'js+ruby', 'js+genshi', 'js+mako', 'js+myghty',
'js+php', 'js+smarty', 'js', 'jsp', 'literate-haskell', 'lighttpd', 'llvm', 'logtalk', 'lua', 'make', 'basemake', 'mako', 'matlab',
'matlabsession', 'minid', 'moocode', 'mupad', 'myghty', 'mysql', 'nasm', 'nginx', 'numpy', 'objdump', 'objective-c', 'ocaml', 'perl',
'php', 'pot', 'pov', 'py3tb', 'pycon', 'pytb', 'python', 'python3', 'raw', 'ruby', 'rbcon', 'redcode', 'rhtml', 'restructuredtext',
'scala', 'scheme', 'smalltalk', 'smarty', 'sourceslist', 'splus', 'sql', 'sqlite3', 'squidconf', 'tcl', 'tcsh', 'latex', 'trac-wiki',
'vbnet', 'vim', 'xml+cheetah', 'xml+django', 'xml+ruby', 'xml+mako', 'xml+myghty', 'xml+php', 'xml+smarty', 'xml', 'xslt', 'yaml',
]
syntaxy = ""
def getAuthor():
try:
author = commands.getoutput('whoami')
except:
author = 'Anonim'
return author
def checkSyntax(syn):
if syn in syntaxes:
pass
else:
print "WRONG SYNTAX"
sys.exit(1)
def getTresc(input):
if input:
tresc = input
else:
print "Input is empty!"
sys.exit(1)
return tresc
for i in syntaxes:
syntaxy = syntaxy + " " + i
usage = """
To paste something:
$ echo 'something' | wklej -s syntax
$ cat file | wklej -s syntax
$ wklej -s syntax"""
parser = OptionParser(usage=usage)
parser.add_option("-s", "--syntax", dest="syntax", help="Choose one:" + syntaxy, default="text")
parser.add_option("-a", "--author", dest="author", help="", default=getAuthor())
parser.add_option("-p", "--private", action="store_true", dest="private", default=False)
def main(args=sys.argv):
(options, args) = parser.parse_args()
checkSyntax(options.syntax)
tresc = getTresc(sys.stdin.read())
rpc_srv = xmlrpclib.ServerProxy("http://wklej.org/xmlrpc/")
if options.private:
result = rpc_srv.dodaj_prywatny_wpis(tresc, options.syntax, options.author)
else:
result = rpc_srv.dodaj_wpis(tresc, options.syntax, options.author)
print "http://wklej.org%s" % result
sys.exit(0)
if __name__=='__main__':
try:
main()
except KeyboardInterrupt:
pass
| mit |
iansf/sky_engine | third_party/jinja2/_compat.py | 638 | 4042 | # -*- coding: utf-8 -*-
"""
jinja2._compat
~~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: Copyright 2013 by the Jinja team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
PY2 = sys.version_info[0] == 2
PYPY = hasattr(sys, 'pypy_translation_info')
_identity = lambda x: x
if not PY2:
unichr = chr
range_type = range
text_type = str
string_types = (str,)
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
import pickle
from io import BytesIO, StringIO
NativeStringIO = StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
ifilter = filter
imap = map
izip = zip
intern = sys.intern
implements_iterator = _identity
implements_to_string = _identity
encode_filename = _identity
get_next = lambda x: x.__next__
else:
unichr = unichr
text_type = unicode
range_type = xrange
string_types = (str, unicode)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
import cPickle as pickle
from cStringIO import StringIO as BytesIO, StringIO
NativeStringIO = BytesIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
from itertools import imap, izip, ifilter
intern = intern
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
get_next = lambda x: x.next
def encode_filename(filename):
if isinstance(filename, unicode):
return filename.encode('utf-8')
return filename
try:
next = next
except NameError:
def next(it):
return it.next()
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instanciation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
try:
from collections import Mapping as mapping_types
except ImportError:
import UserDict
mapping_types = (UserDict.UserDict, UserDict.DictMixin, dict)
# common types. These do exist in the special types module too which however
# does not exist in IronPython out of the box. Also that way we don't have
# to deal with implementation specific stuff here
class _C(object):
def method(self): pass
def _func():
yield None
function_type = type(_func)
generator_type = type(_func())
method_type = type(_C().method)
code_type = type(_C.method.__code__)
try:
raise TypeError()
except TypeError:
_tb = sys.exc_info()[2]
traceback_type = type(_tb)
frame_type = type(_tb.tb_frame)
try:
from urllib.parse import quote_from_bytes as url_quote
except ImportError:
from urllib import quote as url_quote
try:
from thread import allocate_lock
except ImportError:
try:
from threading import Lock as allocate_lock
except ImportError:
from dummy_thread import allocate_lock
| bsd-3-clause |
vperron/sentry | src/sentry/migrations/0098_auto__add_user__chg_field_team_owner__chg_field_activity_user__chg_fie.py | 36 | 28778 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models, connections
class Migration(SchemaMigration):
def forwards(self, orm):
if 'auth_user' in connections['default'].introspection.table_names():
return
self.create_auth(orm)
def create_auth(self, orm):
# Adding model 'User'
db.create_table('auth_user', (
(u'id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('password', self.gf('django.db.models.fields.CharField')(max_length=128)),
('last_login', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('is_superuser', self.gf('django.db.models.fields.BooleanField')(default=False)),
('username', self.gf('django.db.models.fields.CharField')(unique=True, max_length=30)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('is_staff', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('date_joined', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal(u'auth', ['User'])
# Adding M2M table for field groups on 'User'
db.create_table('auth_user_groups', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('user', self.gf('sentry.db.models.fields.FlexibleForeignKey')(orm[u'sentry.user'], null=False)),
('group', self.gf('sentry.db.models.fields.FlexibleForeignKey')(orm[u'auth.group'], null=False))
))
db.create_unique('auth_user_groups', ['user_id', 'group_id'])
# Adding M2M table for field user_permissions on 'User'
db.create_table('auth_user_user_permissions', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('user', self.gf('sentry.db.models.fields.FlexibleForeignKey')(orm[u'sentry.user'], null=False)),
('permission', self.gf('sentry.db.models.fields.FlexibleForeignKey')(orm[u'auth.permission'], null=False))
))
db.create_unique('auth_user_user_permissions', ['user_id', 'permission_id'])
def backwards(self, orm):
pass
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
u'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']", 'null': 'True'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']", 'null': 'True'})
},
u'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']", 'null': 'True'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': u"orm['sentry.AlertRelatedGroup']", 'to': u"orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
u'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
u'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': u"orm['sentry.Group']"}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
u'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"})
},
u'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': u"orm['sentry.Group']"}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': u"orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': u"orm['sentry.User']"})
},
u'sentry.groupcountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'GroupCountByMinute', 'db_table': "'sentry_messagecountbyminute'"},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
u'sentry.grouptag': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTag', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']", 'unique': 'True'})
},
u'sentry.option': {
'Meta': {'object_name': 'Option'},
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
u'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
u'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': u"orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Team']", 'null': 'True'})
},
u'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': u"orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': u"orm['sentry.User']"})
},
u'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
u'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Group']"}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': u"orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': u"orm['sentry.TeamMember']", 'to': u"orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': u"orm['sentry.User']"})
},
u'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
u'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': u"orm['sentry.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry']
| bsd-3-clause |
mantidproject/mantid | qt/applications/workbench/workbench/plugins/test/test_exception_handler.py | 3 | 1875 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantid import UsageService
from unittest.mock import patch
from mantidqt.utils.testing.mocks.mock_qt import MockQWidget
from workbench.plugins.exception_handler import exception_logger
class ExceptionHandlerTest(unittest.TestCase):
@classmethod
def tearDownClass(cls):
UsageService.setEnabled(False)
@patch('workbench.plugins.exception_handler.WorkbenchErrorMessageBox')
@patch('workbench.plugins.exception_handler.logger')
def test_exception_logged_no_UsageService(self, mock_logger, mock_WorkbenchErrorMessageBox):
UsageService.setEnabled(False)
widget = MockQWidget()
mock_errorbox = MockQWidget()
mock_WorkbenchErrorMessageBox.return_value = mock_errorbox
exception_logger(widget, ValueError, None, None)
self.assertEqual(1, mock_logger.error.call_count)
self.assertEqual(1, mock_WorkbenchErrorMessageBox.call_count)
mock_errorbox.exec_.assert_called_once_with()
@patch('workbench.plugins.exception_handler.CrashReportPage')
@patch('workbench.plugins.exception_handler.logger')
def test_exception_logged(self, mock_logger, mock_CrashReportPage):
UsageService.setEnabled(True)
widget = MockQWidget()
exception_logger(widget, ValueError, None, None)
self.assertEqual(1, mock_logger.error.call_count)
mock_CrashReportPage.assert_called_once_with(show_continue_terminate=True)
# 'user selects' continue working by default
self.assertEqual(0, widget.close.call_count)
| gpl-3.0 |
inodb/cbioportal | core/src/main/scripts/importer/cbioportal_common.py | 2 | 34127 | #! /usr/bin/env python
# ------------------------------------------------------------------------------
# Common components used by various cbioportal scripts.
# ------------------------------------------------------------------------------
import os
import sys
import csv
import logging.handlers
from collections import OrderedDict
from subprocess import Popen, PIPE, STDOUT
# ------------------------------------------------------------------------------
# globals
ERROR_FILE = sys.stderr
OUTPUT_FILE = sys.stdout
# global variables to check `source_stable_id` for `genomic_profile_link`
expression_stable_ids = []
gsva_scores_stable_id = ""
expression_zscores_source_stable_ids = {}
gsva_scores_source_stable_id = ""
gsva_pvalues_source_stable_id = ""
expression_zscores_filename = ""
gsva_scores_filename = ""
gsva_pvalues_filename = ""
IMPORT_STUDY_CLASS = "org.mskcc.cbio.portal.scripts.ImportCancerStudy"
UPDATE_STUDY_STATUS_CLASS = "org.mskcc.cbio.portal.scripts.UpdateCancerStudy"
REMOVE_STUDY_CLASS = "org.mskcc.cbio.portal.scripts.RemoveCancerStudy"
IMPORT_CANCER_TYPE_CLASS = "org.mskcc.cbio.portal.scripts.ImportTypesOfCancers"
IMPORT_CASE_LIST_CLASS = "org.mskcc.cbio.portal.scripts.ImportSampleList"
ADD_CASE_LIST_CLASS = "org.mskcc.cbio.portal.scripts.AddCaseList"
VERSION_UTIL_CLASS = "org.mskcc.cbio.portal.util.VersionUtil"
class MetaFileTypes(object):
"""how we differentiate between data types."""
STUDY = 'meta_study'
CANCER_TYPE = 'meta_cancer_type'
SAMPLE_ATTRIBUTES = 'meta_clinical_sample'
PATIENT_ATTRIBUTES = 'meta_clinical_patient'
CNA = 'meta_CNA'
CNA_LOG2 = 'meta_log2CNA'
CNA_CONTINUOUS = 'meta_contCNA'
SEG = 'meta_segment'
EXPRESSION = 'meta_expression'
MUTATION = 'meta_mutations_extended'
METHYLATION = 'meta_methylation'
FUSION = 'meta_fusions'
PROTEIN = 'meta_protein'
GISTIC_GENES = 'meta_gistic_genes'
TIMELINE = 'meta_timeline'
CASE_LIST = 'case_list'
MUTATION_SIGNIFICANCE = 'meta_mutsig'
GENE_PANEL_MATRIX = 'meta_gene_panel_matrix'
GSVA_SCORES = 'meta_gsva_scores'
GSVA_PVALUES = 'meta_gsva_pvalues'
# fields allowed in each meta file type, maps to True if required
META_FIELD_MAP = {
MetaFileTypes.CANCER_TYPE: {
'genetic_alteration_type': True,
'datatype': True,
'data_filename': True
},
MetaFileTypes.STUDY: {
'cancer_study_identifier': True,
'type_of_cancer': True,
'name': True,
'description': True,
'short_name': True,
'citation': False,
'pmid': False,
'groups': False,
'add_global_case_list': False
},
MetaFileTypes.SAMPLE_ATTRIBUTES: {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'data_filename': True
},
MetaFileTypes.PATIENT_ATTRIBUTES: {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'data_filename': True
},
MetaFileTypes.CNA: {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'data_filename': True,
'gene_panel': False
},
MetaFileTypes.CNA_LOG2: {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'data_filename': True,
'gene_panel': False
},
MetaFileTypes.CNA_CONTINUOUS: {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'data_filename': True,
'gene_panel': False
},
MetaFileTypes.SEG: {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'reference_genome_id': True,
'data_filename': True,
'description': True
},
MetaFileTypes.MUTATION: {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'data_filename': True,
'normal_samples_list': False,
'swissprot_identifier': False,
'gene_panel': False,
'variant_classification_filter': False
},
MetaFileTypes.EXPRESSION: {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'source_stable_id': False,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'data_filename': True,
'gene_panel': False
},
MetaFileTypes.METHYLATION: {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'data_filename': True,
'gene_panel': False
},
MetaFileTypes.PROTEIN: {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'data_filename': True,
'gene_panel': False
},
MetaFileTypes.FUSION: {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'show_profile_in_analysis_tab': True,
'profile_name': True,
'profile_description': True,
'data_filename': True,
'gene_panel': False
},
MetaFileTypes.GISTIC_GENES: {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'reference_genome_id': True,
'data_filename': True
},
MetaFileTypes.TIMELINE: {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'data_filename': True
},
MetaFileTypes.CASE_LIST: {
'cancer_study_identifier': True,
'stable_id': True,
'case_list_name': True,
'case_list_description': True,
'case_list_ids': True,
'case_list_category': False # TODO this is used in org.mskcc.cbio.portal.model.AnnotatedPatientSets.getDefaultPatientList(), decide whether to keeep, see #494
},
MetaFileTypes.MUTATION_SIGNIFICANCE: {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'data_filename': True
},
MetaFileTypes.GENE_PANEL_MATRIX: {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'data_filename': True
},
MetaFileTypes.GSVA_PVALUES: {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'source_stable_id': True,
'profile_name': True,
'profile_description': True,
'data_filename': True,
'geneset_def_version': True
},
MetaFileTypes.GSVA_SCORES: {
'cancer_study_identifier': True,
'genetic_alteration_type': True,
'datatype': True,
'stable_id': True,
'source_stable_id': True,
'profile_name': True,
'profile_description': True,
'data_filename': True,
'show_profile_in_analysis_tab': True,
'geneset_def_version': True
}
}
IMPORTER_CLASSNAME_BY_META_TYPE = {
MetaFileTypes.STUDY: IMPORT_STUDY_CLASS,
MetaFileTypes.CANCER_TYPE: IMPORT_CANCER_TYPE_CLASS,
MetaFileTypes.SAMPLE_ATTRIBUTES: "org.mskcc.cbio.portal.scripts.ImportClinicalData",
MetaFileTypes.PATIENT_ATTRIBUTES: "org.mskcc.cbio.portal.scripts.ImportClinicalData",
MetaFileTypes.CNA: "org.mskcc.cbio.portal.scripts.ImportProfileData",
MetaFileTypes.CNA_LOG2: "org.mskcc.cbio.portal.scripts.ImportProfileData",
MetaFileTypes.CNA_CONTINUOUS: "org.mskcc.cbio.portal.scripts.ImportProfileData",
MetaFileTypes.SEG: "org.mskcc.cbio.portal.scripts.ImportCopyNumberSegmentData",
MetaFileTypes.EXPRESSION: "org.mskcc.cbio.portal.scripts.ImportProfileData",
MetaFileTypes.MUTATION: "org.mskcc.cbio.portal.scripts.ImportProfileData",
MetaFileTypes.METHYLATION: "org.mskcc.cbio.portal.scripts.ImportProfileData",
MetaFileTypes.FUSION: "org.mskcc.cbio.portal.scripts.ImportProfileData",
MetaFileTypes.PROTEIN: "org.mskcc.cbio.portal.scripts.ImportProfileData",
MetaFileTypes.GISTIC_GENES: "org.mskcc.cbio.portal.scripts.ImportGisticData",
MetaFileTypes.TIMELINE: "org.mskcc.cbio.portal.scripts.ImportTimelineData",
MetaFileTypes.CASE_LIST: IMPORT_CASE_LIST_CLASS,
MetaFileTypes.MUTATION_SIGNIFICANCE: "org.mskcc.cbio.portal.scripts.ImportMutSigData",
MetaFileTypes.GENE_PANEL_MATRIX: "org.mskcc.cbio.portal.scripts.ImportGenePanelProfileMap",
MetaFileTypes.GSVA_SCORES: "org.mskcc.cbio.portal.scripts.ImportProfileData",
MetaFileTypes.GSVA_PVALUES: "org.mskcc.cbio.portal.scripts.ImportProfileData"
}
IMPORTER_REQUIRES_METADATA = {
"org.mskcc.cbio.portal.scripts.ImportClinicalData" : True,
"org.mskcc.cbio.portal.scripts.ImportCopyNumberSegmentData" : True,
"org.mskcc.cbio.portal.scripts.ImportGisticData" : False,
"org.mskcc.cbio.portal.scripts.ImportMutSigData" : False,
"org.mskcc.cbio.portal.scripts.ImportProfileData" : True,
"org.mskcc.cbio.portal.scripts.ImportTimelineData" : True,
"org.mskcc.cbio.portal.scripts.ImportGenePanelProfileMap" : False
}
# ------------------------------------------------------------------------------
# class definitions
class ValidationMessageFormatter(logging.Formatter):
"""Logging formatter with optional fields for data validation messages.
These fields are:
filename_ - the path to the file the message is about (if applicable)
line_number - a line number within the above file (if applicable)
column_number - a column number within the above file (if applicable)
cause - the unexpected value found in the input (if applicable)
If instead a message pertains to multiple values of one of these
fields (as the result of aggregation by CollapsingLogMessageHandler),
these will be expected in the field <fieldname>_list.
"""
def format(self, record, *args, **kwargs):
"""Check consistency of expected fields and format the record."""
if (
(
hasattr(record, 'line_number') or
hasattr(record, 'line_number_list') or
hasattr(record, 'column_number') or
hasattr(record, 'column_number_list'))
and not hasattr(record, 'filename_')):
raise ValueError(
'Tried to log about a line/column with no filename')
return super(ValidationMessageFormatter, self).format(record,
*args,
**kwargs)
@staticmethod
def format_aggregated(record,
field_name,
single_fmt='%s',
multiple_fmt='[%s]',
join_string=', ',
max_join=3,
optional=False):
"""Format a human-readable string for a field or its <field>_list.
From log records as generated by the flush() method of
CollapsingLogMessageHandler. If the field was not aggregated, format
it according to the format string `single_fmt`. If it was, coerce the
first `max_join` values to strings, concatenate them separated by
`join_string`, and format the result according to `multiple_fmt`.
If `max_join` is None, join all values and apply no maximum length.
If `optional` is True and both the field and its list are absent,
return an empty string.
"""
attr_val = getattr(record, field_name, None)
attr_list = getattr(record, field_name + '_list', None)
if attr_val is not None:
attr_indicator = single_fmt % attr_val
elif attr_list is not None:
# treat None as 'format all of them, no maximum'
if max_join is None:
max_join = len(attr_list)
string_list = list(str(val) for val in attr_list[:max_join])
num_skipped = len(attr_list) - len(string_list)
if num_skipped != 0:
string_list.append('(%d more)' % num_skipped)
attr_indicator = multiple_fmt % join_string.join(string_list)
elif optional:
attr_indicator = ''
else:
raise ValueError(
"Tried to format an absent non-optional log field: '%s'" %
field_name)
return attr_indicator
class LogfileStyleFormatter(ValidationMessageFormatter):
"""Formatter for validation messages in a simple one-per-line format."""
def __init__(self, study_dir):
"""Initialize a logging Formatter with an appropriate format string."""
super(LogfileStyleFormatter, self).__init__(
fmt='%(levelname)s: %(file_indicator)s:'
'%(line_indicator)s%(column_indicator)s'
' %(message)s%(cause_indicator)s')
self.study_dir = study_dir
self.previous_filename = None
def format(self, record):
"""Generate descriptions for optional fields and format the record."""
if not hasattr(record, 'filename_'):
record.file_indicator = '-'
else:
record.file_indicator = os.path.relpath(record.filename_.strip(),
self.study_dir)
record.line_indicator = self.format_aggregated(
record,
'line_number',
' line %d:',
' lines [%s]:',
optional=True)
record.column_indicator = self.format_aggregated(
record,
'column_number',
' column %d:',
' columns [%s]:',
optional=True)
record.cause_indicator = self.format_aggregated(
record,
'cause',
"; value encountered: '%s'",
"; values encountered: ['%s']",
join_string="', '",
optional=True)
# format the string based on these fields
formatted_result = super(LogfileStyleFormatter, self).format(record)
# prepend an empty line if the filename is different than before
current_filename = getattr(record, 'filename_', '')
if (self.previous_filename is not None and
current_filename != self.previous_filename):
formatted_result = '\n' + formatted_result
self.previous_filename = current_filename
return formatted_result
class CollapsingLogMessageHandler(logging.handlers.MemoryHandler):
"""Logging handler that aggregates repeated log messages into one.
This collapses validation LogRecords based on the source code line that
emitted them and their formatted message, and flushes the resulting
records to another handler.
"""
# TODO make it collapse messages on emit, instead of keeping it all in RAM
def flush(self):
"""Aggregate LogRecords by message and send them to the target handler.
Fields that occur with multiple different values in LogRecords
emitted from the same line with the same message (and optional
'filename_' attribute) will be collected in a field named
<field_name>_list.
"""
# group buffered LogRecords by their source code line and message
grouping_dict = OrderedDict()
for record in self.buffer:
identifying_tuple = (record.module,
record.lineno,
getattr(record, 'filename_', None),
record.getMessage())
if identifying_tuple not in grouping_dict:
grouping_dict[identifying_tuple] = []
grouping_dict[identifying_tuple].append(record)
aggregated_buffer = []
# for each list of same-message records
for record_list in grouping_dict.values():
# make a dict to collect the fields for the aggregate record
aggregated_field_dict = {}
# for each field found in (the first of) the records
for field_name in record_list[0].__dict__:
# collect the values found for this field across the records.
# Use the keys of an OrderedDict, as OrderedSet is for some
# reason not to be found in the Python standard library.
field_values = OrderedDict((record.__dict__[field_name], None)
for record in record_list)
# if this field has the same value in all records
if len(field_values) == 1:
# use that value in the new dict
aggregated_field_dict[field_name] = field_values.popitem()[0]
else:
# set a <field>_list field instead
aggregated_field_dict[field_name + '_list'] = \
list(field_values.keys())
# add a new log record with these fields tot the output buffer
aggregated_buffer.append(
logging.makeLogRecord(aggregated_field_dict))
# replace the buffer with the aggregated one and flush
self.buffer = aggregated_buffer
super(CollapsingLogMessageHandler, self).flush()
def shouldFlush(self, record):
"""Collapse and flush every time a debug message is emitted."""
return (record.levelno == logging.DEBUG or
super(CollapsingLogMessageHandler, self).shouldFlush(record))
# ------------------------------------------------------------------------------
# sub-routines
def get_meta_file_type(meta_dictionary, logger, filename):
"""
Returns one of the metatypes found in MetaFileTypes
NB: a subset of these types (combined with allowed_data_types.txt)
is also tracked in org.cbioportal.model.GeneticProfile.java. If you add
things here, please make sure to update there as well if it regards a
genetic profile data type.
"""
# The following dictionary is required to define the MetaFileType for all
# combinations, which are used in validateData to determine which validator
# should be used. There is some redundancy with allowed_data_types.txt, which
# also contains genetic alteration types and datatype combinations, but is used
# to check if the correct stable id is used.
# GENETIC_ALTERATION_TYPE DATATYPE meta
alt_type_datatype_to_meta = {
# cancer type
("CANCER_TYPE", "CANCER_TYPE"): MetaFileTypes.CANCER_TYPE,
# clinical and timeline
("CLINICAL", "PATIENT_ATTRIBUTES"): MetaFileTypes.PATIENT_ATTRIBUTES,
("CLINICAL", "SAMPLE_ATTRIBUTES"): MetaFileTypes.SAMPLE_ATTRIBUTES,
("CLINICAL", "TIMELINE"): MetaFileTypes.TIMELINE,
# rppa and mass spectrometry
("PROTEIN_LEVEL", "LOG2-VALUE"): MetaFileTypes.PROTEIN,
("PROTEIN_LEVEL", "Z-SCORE"): MetaFileTypes.PROTEIN,
("PROTEIN_LEVEL", "CONTINUOUS"): MetaFileTypes.PROTEIN,
# cna
("COPY_NUMBER_ALTERATION", "DISCRETE"): MetaFileTypes.CNA,
("COPY_NUMBER_ALTERATION", "CONTINUOUS"): MetaFileTypes.CNA_CONTINUOUS,
("COPY_NUMBER_ALTERATION", "LOG2-VALUE"): MetaFileTypes.CNA_LOG2,
("COPY_NUMBER_ALTERATION", "SEG"): MetaFileTypes.SEG,
# expression
("MRNA_EXPRESSION", "CONTINUOUS"): MetaFileTypes.EXPRESSION,
("MRNA_EXPRESSION", "Z-SCORE"): MetaFileTypes.EXPRESSION,
("MRNA_EXPRESSION", "DISCRETE"): MetaFileTypes.EXPRESSION,
# mutations
("MUTATION_EXTENDED", "MAF"): MetaFileTypes.MUTATION,
# others
("METHYLATION", "CONTINUOUS"): MetaFileTypes.METHYLATION,
("FUSION", "FUSION"): MetaFileTypes.FUSION,
("GENE_PANEL_MATRIX", "GENE_PANEL_MATRIX"): MetaFileTypes.GENE_PANEL_MATRIX,
# cross-sample molecular statistics (for gene selection)
("GISTIC_GENES_AMP", "Q-VALUE"): MetaFileTypes.GISTIC_GENES,
("GISTIC_GENES_DEL", "Q-VALUE"): MetaFileTypes.GISTIC_GENES,
("MUTSIG", "Q-VALUE"): MetaFileTypes.MUTATION_SIGNIFICANCE,
("GENESET_SCORE", "GSVA-SCORE"): MetaFileTypes.GSVA_SCORES,
("GENESET_SCORE", "P-VALUE"): MetaFileTypes.GSVA_PVALUES
}
result = None
if 'genetic_alteration_type' in meta_dictionary and 'datatype' in meta_dictionary:
genetic_alteration_type = meta_dictionary['genetic_alteration_type']
data_type = meta_dictionary['datatype']
if (genetic_alteration_type, data_type) in alt_type_datatype_to_meta:
result = alt_type_datatype_to_meta[(genetic_alteration_type, data_type)]
else:
logger.error(
'Could not determine the file type. Please check your meta files for correct configuration.',
extra={'filename_': filename,
'cause': ('genetic_alteration_type: %s, '
'datatype: %s' % (
meta_dictionary['genetic_alteration_type'],
meta_dictionary['datatype']))})
elif 'cancer_study_identifier' in meta_dictionary and 'type_of_cancer' in meta_dictionary:
result = MetaFileTypes.STUDY
elif 'type_of_cancer' in meta_dictionary:
result = MetaFileTypes.CANCER_TYPE
else:
logger.error('Could not determine the file type. Did not find expected meta file fields. Please check your meta files for correct configuration.',
extra={'filename_': filename})
return result
def validate_types_and_id(meta_dictionary, logger, filename):
"""Validate a genetic_alteration_type, datatype (and stable_id in some cases) against the predefined
allowed combinations found in ./allowed_data_types.txt
"""
result = True
# this validation only applies to items that have genetic_alteration_type and datatype and stable_id
if 'genetic_alteration_type' in meta_dictionary and 'datatype' in meta_dictionary and 'stable_id' in meta_dictionary:
alt_type_datatype_and_stable_id = {}
script_dir = os.path.dirname(__file__)
allowed_data_types_file_name = os.path.join(script_dir, "allowed_data_types.txt")
data_line_nr = 0
# build up map alt_type_datatype_and_stable_id:
with open(allowed_data_types_file_name) as allowed_data_types_file:
for line in allowed_data_types_file:
if line.startswith("#"):
continue
data_line_nr += 1
# skip header, so if line is not header then process as tab separated:
if (data_line_nr > 1):
line_cols = csv.reader([line], delimiter='\t').next()
genetic_alteration_type = line_cols[0]
data_type = line_cols[1]
# add to map:
if (genetic_alteration_type, data_type) not in alt_type_datatype_and_stable_id:
alt_type_datatype_and_stable_id[(genetic_alteration_type, data_type)] = []
alt_type_datatype_and_stable_id[(genetic_alteration_type, data_type)].append(line_cols[2])
# init:
stable_id = meta_dictionary['stable_id']
genetic_alteration_type = meta_dictionary['genetic_alteration_type']
data_type = meta_dictionary['datatype']
# validate the genetic_alteration_type/data_type combination:
if (genetic_alteration_type, data_type) not in alt_type_datatype_and_stable_id:
# unexpected as this is already validated in get_meta_file_type
raise RuntimeError('Unexpected error: genetic_alteration_type and data_type combination not found in allowed_data_types.txt.',
genetic_alteration_type, data_type)
# validate stable_id:
elif stable_id not in alt_type_datatype_and_stable_id[(genetic_alteration_type, data_type)]:
logger.error("Invalid stable id for genetic_alteration_type '%s', "
"data_type '%s'; expected one of [%s]",
genetic_alteration_type,
data_type,
', '.join(alt_type_datatype_and_stable_id[(genetic_alteration_type, data_type)]),
extra={'filename_': filename,
'cause': stable_id}
)
result = False
return result
def parse_metadata_file(filename,
logger,
study_id=None,
genome_name=None,
case_list=False):
"""Validate a metafile and return a dictionary of values read from it and
the meta_file_type according to get_meta_file_type.
`meta_file_type` will be `None` if the file is invalid. If `case_list`
is True, read the file as a case list instead of a meta file.
:param filename: name of the meta file
:param logger: the logging.Logger instance to log warnings and errors to
:param study_id: (optional - set if you want study_id to be validated)
cancer study id found in previous files (or None). All subsequent
meta files should comply to this in the field 'cancer_study_identifier'
:param genome_name: (optional - set if you want this to be validated)
supported reference genome name, for validation
:param case_list: whether this meta file is a case list (special case)
"""
logger.debug('Starting validation of meta file', extra={'filename_': filename})
# Read meta file
meta_dictionary = {}
with open(filename, 'rU') as metafile:
for line_index, line in enumerate(metafile):
# skip empty lines:
if line.strip() == '':
continue
if ':' not in line:
logger.error(
"Invalid %s file entry, no ':' found",
{True: 'case list', False: 'meta'}[case_list],
extra={'filename_': filename,
'line_number': line_index + 1})
meta_dictionary['meta_file_type'] = None
return meta_dictionary
key_value = line.split(':', 1)
if len(key_value) == 2:
meta_dictionary[key_value[0]] = key_value[1].strip()
# Determine meta file type
if case_list:
meta_file_type = MetaFileTypes.CASE_LIST
meta_dictionary['meta_file_type'] = meta_file_type
else:
meta_file_type = get_meta_file_type(meta_dictionary, logger, filename)
meta_dictionary['meta_file_type'] = meta_file_type
# if type could not be inferred, no further validations are possible
if meta_file_type is None:
return meta_dictionary
# Check for missing fields for this specific meta file type
missing_fields = []
for field in META_FIELD_MAP[meta_file_type]:
mandatory = META_FIELD_MAP[meta_file_type][field]
if field not in meta_dictionary and mandatory:
logger.error("Missing field '%s' in %s file",
field,
{True: 'case list', False: 'meta'}[case_list],
extra={'filename_': filename})
missing_fields.append(field)
if missing_fields:
# all further checks would depend on these fields being present
meta_dictionary['meta_file_type'] = None
return meta_dictionary
# validate genetic_alteration_type, datatype, stable_id
stable_id_mandatory = META_FIELD_MAP[meta_file_type].get('stable_id',
False)
if stable_id_mandatory:
valid_types_and_id = validate_types_and_id(meta_dictionary, logger, filename)
if not valid_types_and_id:
# invalid meta file type
meta_dictionary['meta_file_type'] = None
return meta_dictionary
# check for extra unrecognized fields
for field in meta_dictionary:
if field not in META_FIELD_MAP[meta_file_type]:
# Don't give warning for added 'meta_file_type'
if field == "meta_file_type":
pass
else:
logger.warning(
'Unrecognized field in %s file',
{True: 'case list', False: 'meta'}[case_list],
extra={'filename_': filename,
'cause': field})
# check that cancer study identifiers across files so far are consistent.
if (
study_id is not None and
'cancer_study_identifier' in meta_dictionary and
study_id != meta_dictionary['cancer_study_identifier']):
logger.error(
"Cancer study identifier is not consistent across "
"files, expected '%s'",
study_id,
extra={'filename_': filename,
'cause': meta_dictionary['cancer_study_identifier']})
# not a valid meta file in this study
meta_dictionary['meta_file_type'] = None
return meta_dictionary
# type-specific validations
if meta_file_type in (MetaFileTypes.SEG, MetaFileTypes.GISTIC_GENES):
if genome_name is not None and meta_dictionary['reference_genome_id'] != genome_name:
logger.error(
'Reference_genome_id is not %s',
genome_name,
extra={'filename_': filename,
'cause': meta_dictionary['reference_genome_id']})
#meta_file_type = None
meta_dictionary['meta_file_type'] = None
if meta_file_type == MetaFileTypes.MUTATION:
if ('swissprot_identifier' in meta_dictionary and
meta_dictionary['swissprot_identifier'] not in ('name',
'accession')):
logger.error(
"Invalid swissprot_identifier specification, must be either "
"'name' or 'accession'",
extra={'filename_': filename,
'cause': meta_dictionary['swissprot_identifier']})
meta_dictionary['meta_file_type'] = None
# Save information regarding `source_stable_id`, so that after all meta files are validated,
# we can validate fields between meta files in validate_dependencies() in validateData.py
global gsva_scores_stable_id
global gsva_scores_source_stable_id
global gsva_pvalues_source_stable_id
global gsva_scores_filename
global gsva_pvalues_filename
# save all expression `stable_id` in list
if meta_file_type is MetaFileTypes.EXPRESSION:
if 'stable_id' in meta_dictionary:
expression_stable_ids.append(meta_dictionary['stable_id'])
# Save all zscore expression `source_stable_id` in dictionary with their filenames.
# Multiple zscore expression files are possible, and we want to validate all their
# source_stable_ids with expression stable ids
if meta_dictionary['datatype'] == "Z-SCORE":
if 'source_stable_id' in meta_dictionary:
expression_zscores_source_stable_ids[meta_dictionary['source_stable_id']] = filename
# save stable_id and source_stable_id of GSVA Scores
if meta_file_type is MetaFileTypes.GSVA_SCORES:
gsva_scores_filename = filename
if 'source_stable_id' in meta_dictionary:
gsva_scores_source_stable_id = meta_dictionary['source_stable_id']
# save 'stable_id' to check the 'source_stable_id' in GSVA_PVALUES file
if 'stable_id' in meta_dictionary:
gsva_scores_stable_id = meta_dictionary['stable_id']
# save stable_id and source_stable_id of GSVA Pvalues
if meta_file_type is MetaFileTypes.GSVA_PVALUES:
gsva_pvalues_filename = filename
if 'source_stable_id' in meta_dictionary:
gsva_pvalues_source_stable_id = meta_dictionary['source_stable_id']
logger.info('Validation of meta file complete', extra={'filename_': filename})
return meta_dictionary
def run_java(*args):
java_home = os.environ.get('JAVA_HOME', '')
if java_home:
java_command = os.path.join(java_home, 'bin', 'java')
else:
java_command = 'java'
process = Popen([java_command] + list(args), stdout=PIPE, stderr=STDOUT,
universal_newlines=True)
ret = []
while process.poll() is None:
line = process.stdout.readline()
if line != '' and line.endswith('\n'):
print >> OUTPUT_FILE, line.strip()
ret.append(line[:-1])
ret.append(process.returncode)
# if cmd line parameters error:
if process.returncode == 64 or process.returncode == 2:
raise RuntimeError('Aborting. Step failed due to wrong parameters passed to subprocess.')
# any other error:
elif process.returncode != 0:
raise RuntimeError('Aborting due to error while executing step.')
return ret
| agpl-3.0 |
sauloal/cufflinksviewer | venvwin/Lib/encodings/cp1251.py | 93 | 13924 | """ Python Character Mapping Codec cp1251 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1251.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1251',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u0402' # 0x80 -> CYRILLIC CAPITAL LETTER DJE
u'\u0403' # 0x81 -> CYRILLIC CAPITAL LETTER GJE
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0453' # 0x83 -> CYRILLIC SMALL LETTER GJE
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u20ac' # 0x88 -> EURO SIGN
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\u0409' # 0x8A -> CYRILLIC CAPITAL LETTER LJE
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u040a' # 0x8C -> CYRILLIC CAPITAL LETTER NJE
u'\u040c' # 0x8D -> CYRILLIC CAPITAL LETTER KJE
u'\u040b' # 0x8E -> CYRILLIC CAPITAL LETTER TSHE
u'\u040f' # 0x8F -> CYRILLIC CAPITAL LETTER DZHE
u'\u0452' # 0x90 -> CYRILLIC SMALL LETTER DJE
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\ufffe' # 0x98 -> UNDEFINED
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\u0459' # 0x9A -> CYRILLIC SMALL LETTER LJE
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u045a' # 0x9C -> CYRILLIC SMALL LETTER NJE
u'\u045c' # 0x9D -> CYRILLIC SMALL LETTER KJE
u'\u045b' # 0x9E -> CYRILLIC SMALL LETTER TSHE
u'\u045f' # 0x9F -> CYRILLIC SMALL LETTER DZHE
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u040e' # 0xA1 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045e' # 0xA2 -> CYRILLIC SMALL LETTER SHORT U
u'\u0408' # 0xA3 -> CYRILLIC CAPITAL LETTER JE
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\u0490' # 0xA5 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\u0401' # 0xA8 -> CYRILLIC CAPITAL LETTER IO
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u0404' # 0xAA -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\u0407' # 0xAF -> CYRILLIC CAPITAL LETTER YI
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u0406' # 0xB2 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0456' # 0xB3 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0491' # 0xB4 -> CYRILLIC SMALL LETTER GHE WITH UPTURN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u0451' # 0xB8 -> CYRILLIC SMALL LETTER IO
u'\u2116' # 0xB9 -> NUMERO SIGN
u'\u0454' # 0xBA -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u0458' # 0xBC -> CYRILLIC SMALL LETTER JE
u'\u0405' # 0xBD -> CYRILLIC CAPITAL LETTER DZE
u'\u0455' # 0xBE -> CYRILLIC SMALL LETTER DZE
u'\u0457' # 0xBF -> CYRILLIC SMALL LETTER YI
u'\u0410' # 0xC0 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0xC1 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0xC2 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0xC3 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0xC4 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0xC5 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0xC6 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0xC7 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0xC8 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0xC9 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0xCA -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0xCB -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0xCC -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0xCD -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0xCE -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0xCF -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0xD0 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0xD1 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0xD2 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0xD3 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0xD4 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0xD5 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0xD6 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0xD7 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0xD8 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0xD9 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0xDA -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0xDB -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0xDC -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0xDD -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0xDE -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0xDF -> CYRILLIC CAPITAL LETTER YA
u'\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xED -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xEE -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE
u'\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0xFD -> CYRILLIC SMALL LETTER E
u'\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU
u'\u044f' # 0xFF -> CYRILLIC SMALL LETTER YA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
txje/compatible-intervals | util.py | 1 | 1475 |
# ideally, find the maximum total 1:1 overlap between the two sets of intervals
# in reality, use only those overlapping regions which are the maximum overlap for both intervals
# I think it is currently O(n) in the number of intervals on one side since intervals may only overlap two others,
# ... so it is at most double overlap on both sides or 2*2 = 4n
def compareIntervals(invs0, invs1):
j = 0
score = 0
for i in xrange(len(invs0)):
maxoverlap = None
jmax = None
while j < len(invs1) and invs1[j][1] < invs0[i][0]:
j += 1
l = j
while l < len(invs1) and invs1[l][0] <= invs0[i][1]:
overlap = min(invs0[i][1], invs1[l][1]) - max(invs0[i][0], invs1[l][0])
if maxoverlap == None or overlap > maxoverlap: # we keep track of only the FIRST maximum overlap occurrence - this is always best
maxoverlap = overlap
jmax = l
l += 1
if jmax != None: # at least one overlapping interval
k = i + 1
while k < len(invs0) and invs0[k][0] <= invs1[jmax][1]:
overlap = min(invs0[k][1], invs1[jmax][1]) - max(invs0[k][0], invs1[jmax][0])
if overlap > maxoverlap:
break
k += 1
else: # maximum overlap for both
score += maxoverlap
j = jmax + 1 # we have to move ahead so this doesn't get used again
return score
| mit |
IronLanguages/ironpython3 | Src/StdLib/Lib/test/test_tcl.py | 4 | 27564 | import unittest
import re
import subprocess
import sys
import os
from test import support
# Skip this test if the _tkinter module wasn't built.
_tkinter = support.import_module('_tkinter')
# Make sure tkinter._fix runs to set up the environment
tkinter = support.import_fresh_module('tkinter')
from tkinter import Tcl
from _tkinter import TclError
try:
from _testcapi import INT_MAX, PY_SSIZE_T_MAX
except ImportError:
INT_MAX = PY_SSIZE_T_MAX = sys.maxsize
tcl_version = tuple(map(int, _tkinter.TCL_VERSION.split('.')))
_tk_patchlevel = None
def get_tk_patchlevel():
global _tk_patchlevel
if _tk_patchlevel is None:
tcl = Tcl()
patchlevel = tcl.call('info', 'patchlevel')
m = re.fullmatch(r'(\d+)\.(\d+)([ab.])(\d+)', patchlevel)
major, minor, releaselevel, serial = m.groups()
major, minor, serial = int(major), int(minor), int(serial)
releaselevel = {'a': 'alpha', 'b': 'beta', '.': 'final'}[releaselevel]
if releaselevel == 'final':
_tk_patchlevel = major, minor, serial, releaselevel, 0
else:
_tk_patchlevel = major, minor, 0, releaselevel, serial
return _tk_patchlevel
class TkinterTest(unittest.TestCase):
def testFlattenLen(self):
# flatten(<object with no length>)
self.assertRaises(TypeError, _tkinter._flatten, True)
class TclTest(unittest.TestCase):
def setUp(self):
self.interp = Tcl()
self.wantobjects = self.interp.tk.wantobjects()
def testEval(self):
tcl = self.interp
tcl.eval('set a 1')
self.assertEqual(tcl.eval('set a'),'1')
def test_eval_null_in_result(self):
tcl = self.interp
self.assertEqual(tcl.eval('set a "a\\0b"'), 'a\x00b')
def testEvalException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.eval,'set a')
def testEvalException2(self):
tcl = self.interp
self.assertRaises(TclError,tcl.eval,'this is wrong')
def testCall(self):
tcl = self.interp
tcl.call('set','a','1')
self.assertEqual(tcl.call('set','a'),'1')
def testCallException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.call,'set','a')
def testCallException2(self):
tcl = self.interp
self.assertRaises(TclError,tcl.call,'this','is','wrong')
def testSetVar(self):
tcl = self.interp
tcl.setvar('a','1')
self.assertEqual(tcl.eval('set a'),'1')
def testSetVarArray(self):
tcl = self.interp
tcl.setvar('a(1)','1')
self.assertEqual(tcl.eval('set a(1)'),'1')
def testGetVar(self):
tcl = self.interp
tcl.eval('set a 1')
self.assertEqual(tcl.getvar('a'),'1')
def testGetVarArray(self):
tcl = self.interp
tcl.eval('set a(1) 1')
self.assertEqual(tcl.getvar('a(1)'),'1')
def testGetVarException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.getvar,'a')
def testGetVarArrayException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.getvar,'a(1)')
def testUnsetVar(self):
tcl = self.interp
tcl.setvar('a',1)
self.assertEqual(tcl.eval('info exists a'),'1')
tcl.unsetvar('a')
self.assertEqual(tcl.eval('info exists a'),'0')
def testUnsetVarArray(self):
tcl = self.interp
tcl.setvar('a(1)',1)
tcl.setvar('a(2)',2)
self.assertEqual(tcl.eval('info exists a(1)'),'1')
self.assertEqual(tcl.eval('info exists a(2)'),'1')
tcl.unsetvar('a(1)')
self.assertEqual(tcl.eval('info exists a(1)'),'0')
self.assertEqual(tcl.eval('info exists a(2)'),'1')
def testUnsetVarException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.unsetvar,'a')
def get_integers(self):
integers = (0, 1, -1, 2**31-1, -2**31)
if tcl_version >= (8, 4): # wideInt was added in Tcl 8.4
integers += (2**31, -2**31-1, 2**63-1, -2**63)
# bignum was added in Tcl 8.5, but its support is able only since 8.5.8
if (get_tk_patchlevel() >= (8, 6, 0, 'final') or
(8, 5, 8) <= get_tk_patchlevel() < (8, 6)):
integers += (2**63, -2**63-1, 2**1000, -2**1000)
return integers
def test_getint(self):
tcl = self.interp.tk
for i in self.get_integers():
self.assertEqual(tcl.getint(' %d ' % i), i)
if tcl_version >= (8, 5):
self.assertEqual(tcl.getint(' %#o ' % i), i)
self.assertEqual(tcl.getint((' %#o ' % i).replace('o', '')), i)
self.assertEqual(tcl.getint(' %#x ' % i), i)
if tcl_version < (8, 5): # bignum was added in Tcl 8.5
self.assertRaises(TclError, tcl.getint, str(2**1000))
self.assertEqual(tcl.getint(42), 42)
self.assertRaises(TypeError, tcl.getint)
self.assertRaises(TypeError, tcl.getint, '42', '10')
self.assertRaises(TypeError, tcl.getint, b'42')
self.assertRaises(TypeError, tcl.getint, 42.0)
self.assertRaises(TclError, tcl.getint, 'a')
self.assertRaises((TypeError, ValueError, TclError),
tcl.getint, '42\0')
self.assertRaises((UnicodeEncodeError, ValueError, TclError),
tcl.getint, '42\ud800')
def test_getdouble(self):
tcl = self.interp.tk
self.assertEqual(tcl.getdouble(' 42 '), 42.0)
self.assertEqual(tcl.getdouble(' 42.5 '), 42.5)
self.assertEqual(tcl.getdouble(42.5), 42.5)
self.assertRaises(TypeError, tcl.getdouble)
self.assertRaises(TypeError, tcl.getdouble, '42.5', '10')
self.assertRaises(TypeError, tcl.getdouble, b'42.5')
self.assertRaises(TypeError, tcl.getdouble, 42)
self.assertRaises(TclError, tcl.getdouble, 'a')
self.assertRaises((TypeError, ValueError, TclError),
tcl.getdouble, '42.5\0')
self.assertRaises((UnicodeEncodeError, ValueError, TclError),
tcl.getdouble, '42.5\ud800')
def test_getboolean(self):
tcl = self.interp.tk
self.assertIs(tcl.getboolean('on'), True)
self.assertIs(tcl.getboolean('1'), True)
self.assertIs(tcl.getboolean(42), True)
self.assertIs(tcl.getboolean(0), False)
self.assertRaises(TypeError, tcl.getboolean)
self.assertRaises(TypeError, tcl.getboolean, 'on', '1')
self.assertRaises(TypeError, tcl.getboolean, b'on')
self.assertRaises(TypeError, tcl.getboolean, 1.0)
self.assertRaises(TclError, tcl.getboolean, 'a')
self.assertRaises((TypeError, ValueError, TclError),
tcl.getboolean, 'on\0')
self.assertRaises((UnicodeEncodeError, ValueError, TclError),
tcl.getboolean, 'on\ud800')
def testEvalFile(self):
tcl = self.interp
with open(support.TESTFN, 'w') as f:
self.addCleanup(support.unlink, support.TESTFN)
f.write("""set a 1
set b 2
set c [ expr $a + $b ]
""")
tcl.evalfile(support.TESTFN)
self.assertEqual(tcl.eval('set a'),'1')
self.assertEqual(tcl.eval('set b'),'2')
self.assertEqual(tcl.eval('set c'),'3')
def test_evalfile_null_in_result(self):
tcl = self.interp
with open(support.TESTFN, 'w') as f:
self.addCleanup(support.unlink, support.TESTFN)
f.write("""
set a "a\0b"
set b "a\\0b"
""")
tcl.evalfile(support.TESTFN)
self.assertEqual(tcl.eval('set a'), 'a\x00b')
self.assertEqual(tcl.eval('set b'), 'a\x00b')
def testEvalFileException(self):
tcl = self.interp
filename = "doesnotexists"
try:
os.remove(filename)
except Exception as e:
pass
self.assertRaises(TclError,tcl.evalfile,filename)
def testPackageRequireException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.eval,'package require DNE')
@unittest.skipUnless(sys.platform == 'win32', 'Requires Windows')
def testLoadWithUNC(self):
# Build a UNC path from the regular path.
# Something like
# \\%COMPUTERNAME%\c$\python27\python.exe
fullname = os.path.abspath(sys.executable)
if fullname[1] != ':':
raise unittest.SkipTest('Absolute path should have drive part')
unc_name = r'\\%s\%s$\%s' % (os.environ['COMPUTERNAME'],
fullname[0],
fullname[3:])
if not os.path.exists(unc_name):
raise unittest.SkipTest('Cannot connect to UNC Path')
with support.EnvironmentVarGuard() as env:
env.unset("TCL_LIBRARY")
stdout = subprocess.check_output(
[unc_name, '-c', 'import tkinter; print(tkinter)'])
self.assertIn(b'tkinter', stdout)
def test_exprstring(self):
tcl = self.interp
tcl.call('set', 'a', 3)
tcl.call('set', 'b', 6)
def check(expr, expected):
result = tcl.exprstring(expr)
self.assertEqual(result, expected)
self.assertIsInstance(result, str)
self.assertRaises(TypeError, tcl.exprstring)
self.assertRaises(TypeError, tcl.exprstring, '8.2', '+6')
self.assertRaises(TypeError, tcl.exprstring, b'8.2 + 6')
self.assertRaises(TclError, tcl.exprstring, 'spam')
check('', '0')
check('8.2 + 6', '14.2')
check('3.1 + $a', '6.1')
check('2 + "$a.$b"', '5.6')
check('4*[llength "6 2"]', '8')
check('{word one} < "word $a"', '0')
check('4*2 < 7', '0')
check('hypot($a, 4)', '5.0')
check('5 / 4', '1')
check('5 / 4.0', '1.25')
check('5 / ( [string length "abcd"] + 0.0 )', '1.25')
check('20.0/5.0', '4.0')
check('"0x03" > "2"', '1')
check('[string length "a\xbd\u20ac"]', '3')
check(r'[string length "a\xbd\u20ac"]', '3')
check('"abc"', 'abc')
check('"a\xbd\u20ac"', 'a\xbd\u20ac')
check(r'"a\xbd\u20ac"', 'a\xbd\u20ac')
check(r'"a\0b"', 'a\x00b')
if tcl_version >= (8, 5): # bignum was added in Tcl 8.5
check('2**64', str(2**64))
def test_exprdouble(self):
tcl = self.interp
tcl.call('set', 'a', 3)
tcl.call('set', 'b', 6)
def check(expr, expected):
result = tcl.exprdouble(expr)
self.assertEqual(result, expected)
self.assertIsInstance(result, float)
self.assertRaises(TypeError, tcl.exprdouble)
self.assertRaises(TypeError, tcl.exprdouble, '8.2', '+6')
self.assertRaises(TypeError, tcl.exprdouble, b'8.2 + 6')
self.assertRaises(TclError, tcl.exprdouble, 'spam')
check('', 0.0)
check('8.2 + 6', 14.2)
check('3.1 + $a', 6.1)
check('2 + "$a.$b"', 5.6)
check('4*[llength "6 2"]', 8.0)
check('{word one} < "word $a"', 0.0)
check('4*2 < 7', 0.0)
check('hypot($a, 4)', 5.0)
check('5 / 4', 1.0)
check('5 / 4.0', 1.25)
check('5 / ( [string length "abcd"] + 0.0 )', 1.25)
check('20.0/5.0', 4.0)
check('"0x03" > "2"', 1.0)
check('[string length "a\xbd\u20ac"]', 3.0)
check(r'[string length "a\xbd\u20ac"]', 3.0)
self.assertRaises(TclError, tcl.exprdouble, '"abc"')
if tcl_version >= (8, 5): # bignum was added in Tcl 8.5
check('2**64', float(2**64))
def test_exprlong(self):
tcl = self.interp
tcl.call('set', 'a', 3)
tcl.call('set', 'b', 6)
def check(expr, expected):
result = tcl.exprlong(expr)
self.assertEqual(result, expected)
self.assertIsInstance(result, int)
self.assertRaises(TypeError, tcl.exprlong)
self.assertRaises(TypeError, tcl.exprlong, '8.2', '+6')
self.assertRaises(TypeError, tcl.exprlong, b'8.2 + 6')
self.assertRaises(TclError, tcl.exprlong, 'spam')
check('', 0)
check('8.2 + 6', 14)
check('3.1 + $a', 6)
check('2 + "$a.$b"', 5)
check('4*[llength "6 2"]', 8)
check('{word one} < "word $a"', 0)
check('4*2 < 7', 0)
check('hypot($a, 4)', 5)
check('5 / 4', 1)
check('5 / 4.0', 1)
check('5 / ( [string length "abcd"] + 0.0 )', 1)
check('20.0/5.0', 4)
check('"0x03" > "2"', 1)
check('[string length "a\xbd\u20ac"]', 3)
check(r'[string length "a\xbd\u20ac"]', 3)
self.assertRaises(TclError, tcl.exprlong, '"abc"')
if tcl_version >= (8, 5): # bignum was added in Tcl 8.5
self.assertRaises(TclError, tcl.exprlong, '2**64')
def test_exprboolean(self):
tcl = self.interp
tcl.call('set', 'a', 3)
tcl.call('set', 'b', 6)
def check(expr, expected):
result = tcl.exprboolean(expr)
self.assertEqual(result, expected)
self.assertIsInstance(result, int)
self.assertNotIsInstance(result, bool)
self.assertRaises(TypeError, tcl.exprboolean)
self.assertRaises(TypeError, tcl.exprboolean, '8.2', '+6')
self.assertRaises(TypeError, tcl.exprboolean, b'8.2 + 6')
self.assertRaises(TclError, tcl.exprboolean, 'spam')
check('', False)
for value in ('0', 'false', 'no', 'off'):
check(value, False)
check('"%s"' % value, False)
check('{%s}' % value, False)
for value in ('1', 'true', 'yes', 'on'):
check(value, True)
check('"%s"' % value, True)
check('{%s}' % value, True)
check('8.2 + 6', True)
check('3.1 + $a', True)
check('2 + "$a.$b"', True)
check('4*[llength "6 2"]', True)
check('{word one} < "word $a"', False)
check('4*2 < 7', False)
check('hypot($a, 4)', True)
check('5 / 4', True)
check('5 / 4.0', True)
check('5 / ( [string length "abcd"] + 0.0 )', True)
check('20.0/5.0', True)
check('"0x03" > "2"', True)
check('[string length "a\xbd\u20ac"]', True)
check(r'[string length "a\xbd\u20ac"]', True)
self.assertRaises(TclError, tcl.exprboolean, '"abc"')
if tcl_version >= (8, 5): # bignum was added in Tcl 8.5
check('2**64', True)
@unittest.skipUnless(tcl_version >= (8, 5), 'requires Tcl version >= 8.5')
def test_booleans(self):
tcl = self.interp
def check(expr, expected):
result = tcl.call('expr', expr)
if tcl.wantobjects():
self.assertEqual(result, expected)
self.assertIsInstance(result, int)
else:
self.assertIn(result, (expr, str(int(expected))))
self.assertIsInstance(result, str)
check('true', True)
check('yes', True)
check('on', True)
check('false', False)
check('no', False)
check('off', False)
check('1 < 2', True)
check('1 > 2', False)
def test_expr_bignum(self):
tcl = self.interp
for i in self.get_integers():
result = tcl.call('expr', str(i))
if self.wantobjects:
self.assertEqual(result, i)
self.assertIsInstance(result, int)
else:
self.assertEqual(result, str(i))
self.assertIsInstance(result, str)
if tcl_version < (8, 5): # bignum was added in Tcl 8.5
self.assertRaises(TclError, tcl.call, 'expr', str(2**1000))
def test_passing_values(self):
def passValue(value):
return self.interp.call('set', '_', value)
self.assertEqual(passValue(True), True if self.wantobjects else '1')
self.assertEqual(passValue(False), False if self.wantobjects else '0')
self.assertEqual(passValue('string'), 'string')
self.assertEqual(passValue('string\u20ac'), 'string\u20ac')
self.assertEqual(passValue('str\x00ing'), 'str\x00ing')
self.assertEqual(passValue('str\x00ing\xbd'), 'str\x00ing\xbd')
self.assertEqual(passValue('str\x00ing\u20ac'), 'str\x00ing\u20ac')
self.assertEqual(passValue(b'str\x00ing'),
b'str\x00ing' if self.wantobjects else 'str\x00ing')
self.assertEqual(passValue(b'str\xc0\x80ing'),
b'str\xc0\x80ing' if self.wantobjects else 'str\xc0\x80ing')
self.assertEqual(passValue(b'str\xbding'),
b'str\xbding' if self.wantobjects else 'str\xbding')
for i in self.get_integers():
self.assertEqual(passValue(i), i if self.wantobjects else str(i))
if tcl_version < (8, 5): # bignum was added in Tcl 8.5
self.assertEqual(passValue(2**1000), str(2**1000))
for f in (0.0, 1.0, -1.0, 1/3,
sys.float_info.min, sys.float_info.max,
-sys.float_info.min, -sys.float_info.max):
if self.wantobjects:
self.assertEqual(passValue(f), f)
else:
self.assertEqual(float(passValue(f)), f)
if self.wantobjects:
f = passValue(float('nan'))
self.assertNotEqual(f, f)
self.assertEqual(passValue(float('inf')), float('inf'))
self.assertEqual(passValue(-float('inf')), -float('inf'))
else:
self.assertEqual(float(passValue(float('inf'))), float('inf'))
self.assertEqual(float(passValue(-float('inf'))), -float('inf'))
# XXX NaN representation can be not parsable by float()
self.assertEqual(passValue((1, '2', (3.4,))),
(1, '2', (3.4,)) if self.wantobjects else '1 2 3.4')
def test_user_command(self):
result = None
def testfunc(arg):
nonlocal result
result = arg
return arg
self.interp.createcommand('testfunc', testfunc)
self.addCleanup(self.interp.tk.deletecommand, 'testfunc')
def check(value, expected=None, *, eq=self.assertEqual):
if expected is None:
expected = value
nonlocal result
result = None
r = self.interp.call('testfunc', value)
self.assertIsInstance(result, str)
eq(result, expected)
self.assertIsInstance(r, str)
eq(r, expected)
def float_eq(actual, expected):
self.assertAlmostEqual(float(actual), expected,
delta=abs(expected) * 1e-10)
check(True, '1')
check(False, '0')
check('string')
check('string\xbd')
check('string\u20ac')
check('')
check(b'string', 'string')
check(b'string\xe2\x82\xac', 'string\xe2\x82\xac')
check(b'string\xbd', 'string\xbd')
check(b'', '')
check('str\x00ing')
check('str\x00ing\xbd')
check('str\x00ing\u20ac')
check(b'str\x00ing', 'str\x00ing')
check(b'str\xc0\x80ing', 'str\xc0\x80ing')
check(b'str\xc0\x80ing\xe2\x82\xac', 'str\xc0\x80ing\xe2\x82\xac')
for i in self.get_integers():
check(i, str(i))
if tcl_version < (8, 5): # bignum was added in Tcl 8.5
check(2**1000, str(2**1000))
for f in (0.0, 1.0, -1.0):
check(f, repr(f))
for f in (1/3.0, sys.float_info.min, sys.float_info.max,
-sys.float_info.min, -sys.float_info.max):
check(f, eq=float_eq)
check(float('inf'), eq=float_eq)
check(-float('inf'), eq=float_eq)
# XXX NaN representation can be not parsable by float()
check((), '')
check((1, (2,), (3, 4), '5 6', ()), '1 2 {3 4} {5 6} {}')
def test_splitlist(self):
splitlist = self.interp.tk.splitlist
call = self.interp.tk.call
self.assertRaises(TypeError, splitlist)
self.assertRaises(TypeError, splitlist, 'a', 'b')
self.assertRaises(TypeError, splitlist, 2)
testcases = [
('2', ('2',)),
('', ()),
('{}', ('',)),
('""', ('',)),
('a\n b\t\r c\n ', ('a', 'b', 'c')),
(b'a\n b\t\r c\n ', ('a', 'b', 'c')),
('a \u20ac', ('a', '\u20ac')),
(b'a \xe2\x82\xac', ('a', '\u20ac')),
(b'a\xc0\x80b c\xc0\x80d', ('a\x00b', 'c\x00d')),
('a {b c}', ('a', 'b c')),
(r'a b\ c', ('a', 'b c')),
(('a', 'b c'), ('a', 'b c')),
('a 2', ('a', '2')),
(('a', 2), ('a', 2)),
('a 3.4', ('a', '3.4')),
(('a', 3.4), ('a', 3.4)),
((), ()),
(call('list', 1, '2', (3.4,)),
(1, '2', (3.4,)) if self.wantobjects else
('1', '2', '3.4')),
]
if tcl_version >= (8, 5):
if not self.wantobjects or get_tk_patchlevel() < (8, 5, 5):
# Before 8.5.5 dicts were converted to lists through string
expected = ('12', '\u20ac', '\xe2\x82\xac', '3.4')
else:
expected = (12, '\u20ac', b'\xe2\x82\xac', (3.4,))
testcases += [
(call('dict', 'create', 12, '\u20ac', b'\xe2\x82\xac', (3.4,)),
expected),
]
for arg, res in testcases:
self.assertEqual(splitlist(arg), res, msg=arg)
self.assertRaises(TclError, splitlist, '{')
def test_split(self):
split = self.interp.tk.split
call = self.interp.tk.call
self.assertRaises(TypeError, split)
self.assertRaises(TypeError, split, 'a', 'b')
self.assertRaises(TypeError, split, 2)
testcases = [
('2', '2'),
('', ''),
('{}', ''),
('""', ''),
('{', '{'),
('a\n b\t\r c\n ', ('a', 'b', 'c')),
(b'a\n b\t\r c\n ', ('a', 'b', 'c')),
('a \u20ac', ('a', '\u20ac')),
(b'a \xe2\x82\xac', ('a', '\u20ac')),
(b'a\xc0\x80b', 'a\x00b'),
(b'a\xc0\x80b c\xc0\x80d', ('a\x00b', 'c\x00d')),
(b'{a\xc0\x80b c\xc0\x80d', '{a\x00b c\x00d'),
('a {b c}', ('a', ('b', 'c'))),
(r'a b\ c', ('a', ('b', 'c'))),
(('a', b'b c'), ('a', ('b', 'c'))),
(('a', 'b c'), ('a', ('b', 'c'))),
('a 2', ('a', '2')),
(('a', 2), ('a', 2)),
('a 3.4', ('a', '3.4')),
(('a', 3.4), ('a', 3.4)),
(('a', (2, 3.4)), ('a', (2, 3.4))),
((), ()),
(call('list', 1, '2', (3.4,)),
(1, '2', (3.4,)) if self.wantobjects else
('1', '2', '3.4')),
]
if tcl_version >= (8, 5):
if not self.wantobjects or get_tk_patchlevel() < (8, 5, 5):
# Before 8.5.5 dicts were converted to lists through string
expected = ('12', '\u20ac', '\xe2\x82\xac', '3.4')
else:
expected = (12, '\u20ac', b'\xe2\x82\xac', (3.4,))
testcases += [
(call('dict', 'create', 12, '\u20ac', b'\xe2\x82\xac', (3.4,)),
expected),
]
for arg, res in testcases:
self.assertEqual(split(arg), res, msg=arg)
def test_splitdict(self):
splitdict = tkinter._splitdict
tcl = self.interp.tk
arg = '-a {1 2 3} -something foo status {}'
self.assertEqual(splitdict(tcl, arg, False),
{'-a': '1 2 3', '-something': 'foo', 'status': ''})
self.assertEqual(splitdict(tcl, arg),
{'a': '1 2 3', 'something': 'foo', 'status': ''})
arg = ('-a', (1, 2, 3), '-something', 'foo', 'status', '{}')
self.assertEqual(splitdict(tcl, arg, False),
{'-a': (1, 2, 3), '-something': 'foo', 'status': '{}'})
self.assertEqual(splitdict(tcl, arg),
{'a': (1, 2, 3), 'something': 'foo', 'status': '{}'})
self.assertRaises(RuntimeError, splitdict, tcl, '-a b -c ')
self.assertRaises(RuntimeError, splitdict, tcl, ('-a', 'b', '-c'))
arg = tcl.call('list',
'-a', (1, 2, 3), '-something', 'foo', 'status', ())
self.assertEqual(splitdict(tcl, arg),
{'a': (1, 2, 3) if self.wantobjects else '1 2 3',
'something': 'foo', 'status': ''})
if tcl_version >= (8, 5):
arg = tcl.call('dict', 'create',
'-a', (1, 2, 3), '-something', 'foo', 'status', ())
if not self.wantobjects or get_tk_patchlevel() < (8, 5, 5):
# Before 8.5.5 dicts were converted to lists through string
expected = {'a': '1 2 3', 'something': 'foo', 'status': ''}
else:
expected = {'a': (1, 2, 3), 'something': 'foo', 'status': ''}
self.assertEqual(splitdict(tcl, arg), expected)
class BigmemTclTest(unittest.TestCase):
def setUp(self):
self.interp = Tcl()
@support.cpython_only
@unittest.skipUnless(INT_MAX < PY_SSIZE_T_MAX, "needs UINT_MAX < SIZE_MAX")
@support.bigmemtest(size=INT_MAX + 1, memuse=5, dry_run=False)
def test_huge_string_call(self, size):
value = ' ' * size
self.assertRaises(OverflowError, self.interp.call, 'set', '_', value)
@support.cpython_only
@unittest.skipUnless(INT_MAX < PY_SSIZE_T_MAX, "needs UINT_MAX < SIZE_MAX")
@support.bigmemtest(size=INT_MAX + 1, memuse=9, dry_run=False)
def test_huge_string_builtins(self, size):
value = '1' + ' ' * size
self.assertRaises(OverflowError, self.interp.tk.getint, value)
self.assertRaises(OverflowError, self.interp.tk.getdouble, value)
self.assertRaises(OverflowError, self.interp.tk.getboolean, value)
self.assertRaises(OverflowError, self.interp.eval, value)
self.assertRaises(OverflowError, self.interp.evalfile, value)
self.assertRaises(OverflowError, self.interp.record, value)
self.assertRaises(OverflowError, self.interp.adderrorinfo, value)
self.assertRaises(OverflowError, self.interp.setvar, value, 'x', 'a')
self.assertRaises(OverflowError, self.interp.setvar, 'x', value, 'a')
self.assertRaises(OverflowError, self.interp.unsetvar, value)
self.assertRaises(OverflowError, self.interp.unsetvar, 'x', value)
self.assertRaises(OverflowError, self.interp.adderrorinfo, value)
self.assertRaises(OverflowError, self.interp.exprstring, value)
self.assertRaises(OverflowError, self.interp.exprlong, value)
self.assertRaises(OverflowError, self.interp.exprboolean, value)
self.assertRaises(OverflowError, self.interp.splitlist, value)
self.assertRaises(OverflowError, self.interp.split, value)
self.assertRaises(OverflowError, self.interp.createcommand, value, max)
self.assertRaises(OverflowError, self.interp.deletecommand, value)
def setUpModule():
if support.verbose:
tcl = Tcl()
print('patchlevel =', tcl.call('info', 'patchlevel'))
def test_main():
support.run_unittest(TclTest, TkinterTest, BigmemTclTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
BlastPy/django-ckeditor | ckeditor/management/commands/generateckeditorthumbnails.py | 8 | 1375 | import os
from django.conf import settings
from django.core.management.base import NoArgsCommand
from ckeditor.views import get_image_files
from ckeditor.utils import get_thumb_filename
from ckeditor.image_processing import get_backend
class Command(NoArgsCommand):
"""
Creates thumbnail files for the CKEditor file image browser.
Useful if starting to use django-ckeditor with existing images.
"""
def handle_noargs(self, **options):
if getattr(settings, 'CKEDITOR_IMAGE_BACKEND', None):
backend = get_backend()
for image in get_image_files():
if not self._thumbnail_exists(image):
self.stdout.write("Creating thumbnail for %s" % image)
try:
backend.create_thumbnail(image)
except Exception as e:
self.stdout.write("Couldn't create thumbnail for %s: %s" % (image, e))
self.stdout.write("Finished")
else:
self.stdout.write("No thumbnail backend is enabled")
def _thumbnail_exists(self, image_path):
thumb_path = self._to_absolute_path(
get_thumb_filename(image_path)
)
return os.path.isfile(thumb_path)
@staticmethod
def _to_absolute_path(image_path):
return os.path.join(settings.MEDIA_ROOT, image_path)
| bsd-3-clause |
aldian/tensorflow | tensorflow/contrib/data/python/kernel_tests/scan_dataset_op_test.py | 15 | 5707 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.contrib.data.python.kernel_tests import dataset_serialization_test_base
from tensorflow.contrib.data.python.ops import scan_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ScanDatasetTest(test.TestCase):
def _count(self, start, step):
return dataset_ops.Dataset.from_tensors(0).repeat(None).apply(
scan_ops.scan(start, lambda state, _: (state + step, state)))
def testCount(self):
start = array_ops.placeholder(dtypes.int32, shape=[])
step = array_ops.placeholder(dtypes.int32, shape=[])
take = array_ops.placeholder(dtypes.int64, shape=[])
iterator = self._count(start, step).take(take).make_initializable_iterator()
next_element = iterator.get_next()
with self.test_session() as sess:
for start_val, step_val, take_val in [(0, 1, 10), (0, 1, 0), (10, 1, 10),
(10, 2, 10), (10, -1, 10),
(10, -2, 10)]:
sess.run(iterator.initializer,
feed_dict={start: start_val, step: step_val, take: take_val})
for expected, _ in zip(
itertools.count(start_val, step_val), range(take_val)):
self.assertEqual(expected, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testFibonacci(self):
iterator = dataset_ops.Dataset.from_tensors(1).repeat(None).apply(
scan_ops.scan([0, 1], lambda a, _: ([a[1], a[0] + a[1]], a[1]))
).make_one_shot_iterator()
next_element = iterator.get_next()
with self.test_session() as sess:
self.assertEqual(1, sess.run(next_element))
self.assertEqual(1, sess.run(next_element))
self.assertEqual(2, sess.run(next_element))
self.assertEqual(3, sess.run(next_element))
self.assertEqual(5, sess.run(next_element))
self.assertEqual(8, sess.run(next_element))
def testChangingStateShape(self):
# Test the fixed-point shape invariant calculations: start with
# initial values with known shapes, and use a scan function that
# changes the size of the state on each element.
def _scan_fn(state, input_value):
# Statically known rank, but dynamic length.
ret_longer_vector = array_ops.concat([state[0], state[0]], 0)
# Statically unknown rank.
ret_larger_rank = array_ops.expand_dims(state[1], 0)
return (ret_longer_vector, ret_larger_rank), (state, input_value)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(5).apply(
scan_ops.scan(([0], 1), _scan_fn))
self.assertEqual([None], dataset.output_shapes[0][0].as_list())
self.assertIs(None, dataset.output_shapes[0][1].ndims)
self.assertEqual([], dataset.output_shapes[1].as_list())
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with self.test_session() as sess:
for i in range(5):
(longer_vector_val, larger_rank_val), _ = sess.run(next_element)
self.assertAllEqual([0] * (2**i), longer_vector_val)
self.assertAllEqual(np.array(1, ndmin=i), larger_rank_val)
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testIncorrectStateType(self):
def _scan_fn(state, _):
return constant_op.constant(1, dtype=dtypes.int64), state
dataset = dataset_ops.Dataset.range(10)
with self.assertRaisesRegexp(
TypeError,
"The element types for the new state must match the initial state."):
dataset.apply(
scan_ops.scan(constant_op.constant(1, dtype=dtypes.int32), _scan_fn))
def testIncorrectReturnType(self):
def _scan_fn(unused_state, unused_input_value):
return constant_op.constant(1, dtype=dtypes.int64)
dataset = dataset_ops.Dataset.range(10)
with self.assertRaisesRegexp(
TypeError,
"The scan function must return a pair comprising the new state and the "
"output value."):
dataset.apply(
scan_ops.scan(constant_op.constant(1, dtype=dtypes.int32), _scan_fn))
class ScanDatasetSerialzationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_dataset(self, num_elements):
return dataset_ops.Dataset.from_tensors(1).repeat(num_elements).apply(
scan_ops.scan([0, 1], lambda a, _: ([a[1], a[0] + a[1]], a[1])))
def testScanCore(self):
num_output = 5
self.run_core_tests(lambda: self._build_dataset(num_output),
lambda: self._build_dataset(2), num_output)
if __name__ == "__main__":
test.main()
| apache-2.0 |
denisshockwave/image_processing_ocr_server | venv/lib/python2.7/site-packages/wtforms/csrf/session.py | 193 | 3056 | """
A provided CSRF implementation which puts CSRF data in a session.
This can be used fairly comfortably with many `request.session` type
objects, including the Werkzeug/Flask session store, Django sessions, and
potentially other similar objects which use a dict-like API for storing
session keys.
The basic concept is a randomly generated value is stored in the user's
session, and an hmac-sha1 of it (along with an optional expiration time,
for extra security) is used as the value of the csrf_token. If this token
validates with the hmac of the random value + expiration time, and the
expiration time is not passed, the CSRF validation will pass.
"""
from __future__ import unicode_literals
import hmac
import os
from hashlib import sha1
from datetime import datetime, timedelta
from ..validators import ValidationError
from .core import CSRF
__all__ = ('SessionCSRF', )
class SessionCSRF(CSRF):
TIME_FORMAT = '%Y%m%d%H%M%S'
def setup_form(self, form):
self.form_meta = form.meta
return super(SessionCSRF, self).setup_form(form)
def generate_csrf_token(self, csrf_token_field):
meta = self.form_meta
if meta.csrf_secret is None:
raise Exception('must set `csrf_secret` on class Meta for SessionCSRF to work')
if meta.csrf_context is None:
raise TypeError('Must provide a session-like object as csrf context')
session = self.session
if 'csrf' not in session:
session['csrf'] = sha1(os.urandom(64)).hexdigest()
if self.time_limit:
expires = (self.now() + self.time_limit).strftime(self.TIME_FORMAT)
csrf_build = '%s%s' % (session['csrf'], expires)
else:
expires = ''
csrf_build = session['csrf']
hmac_csrf = hmac.new(meta.csrf_secret, csrf_build.encode('utf8'), digestmod=sha1)
return '%s##%s' % (expires, hmac_csrf.hexdigest())
def validate_csrf_token(self, form, field):
meta = self.form_meta
if not field.data or '##' not in field.data:
raise ValidationError(field.gettext('CSRF token missing'))
expires, hmac_csrf = field.data.split('##', 1)
check_val = (self.session['csrf'] + expires).encode('utf8')
hmac_compare = hmac.new(meta.csrf_secret, check_val, digestmod=sha1)
if hmac_compare.hexdigest() != hmac_csrf:
raise ValidationError(field.gettext('CSRF failed'))
if self.time_limit:
now_formatted = self.now().strftime(self.TIME_FORMAT)
if now_formatted > expires:
raise ValidationError(field.gettext('CSRF token expired'))
def now(self):
"""
Get the current time. Used for test mocking/overriding mainly.
"""
return datetime.now()
@property
def time_limit(self):
return getattr(self.form_meta, 'csrf_time_limit', timedelta(minutes=30))
@property
def session(self):
return getattr(self.form_meta.csrf_context, 'session', self.form_meta.csrf_context)
| gpl-3.0 |
blockstack/blockstack-server | integration_tests/blockstack_integration_tests/scenarios/name_pre_reg_up_atlas_mesh.py | 1 | 6210 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of Blockstack
Blockstack is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Blockstack. If not, see <http://www.gnu.org/licenses/>.
"""
import testlib
import virtualchain
import json
import time
import blockstack
import blockstack_zones
import virtualchain
import os
"""
TEST ENV BLOCKSTACK_ATLAS_NUM_NEIGHBORS 10
"""
wallets = [
testlib.Wallet( "5JesPiN68qt44Hc2nT8qmyZ1JDwHebfoh9KQ52Lazb1m1LaKNj9", 100000000000 ),
testlib.Wallet( "5KHqsiU9qa77frZb6hQy9ocV7Sus9RWJcQGYYBJJBb2Efj1o77e", 100000000000 ),
testlib.Wallet( "5Kg5kJbQHvk1B64rJniEmgbD83FpZpbw2RjdAZEzTefs9ihN3Bz", 100000000000 ),
testlib.Wallet( "5JuVsoS9NauksSkqEjbUZxWwgGDQbMwPsEfoRBSpLpgDX1RtLX7", 100000000000 ),
testlib.Wallet( "5KEpiSRr1BrT8vRD7LKGCEmudokTh1iMHbiThMQpLdwBwhDJB1T", 100000000000 ),
testlib.Wallet( "5KaSTdRgMfHLxSKsiWhF83tdhEj2hqugxdBNPUAw5NU8DMyBJji", 100000000000 )
]
consensus = "17ac43c1d8549c3181b200f1bf97eb7d"
synchronized = False
value_hash = None
def scenario( wallets, **kw ):
global synchronized, value_hash
import blockstack_integration_tests.atlas_network as atlas_network
testlib.blockstack_namespace_preorder( "test", wallets[1].addr, wallets[0].privkey )
testlib.next_block( **kw )
testlib.blockstack_namespace_reveal( "test", wallets[1].addr, 52595, 250, 4, [6,5,4,3,2,1,0,0,0,0,0,0,0,0,0,0], 10, 10, wallets[0].privkey )
testlib.next_block( **kw )
testlib.blockstack_namespace_ready( "test", wallets[1].privkey )
testlib.next_block( **kw )
testlib.blockstack_name_preorder( "foo.test", wallets[2].privkey, wallets[3].addr )
testlib.next_block( **kw )
testlib.blockstack_name_register( "foo.test", wallets[2].privkey, wallets[3].addr )
testlib.next_block( **kw )
# register 10 names
for i in xrange(0, 10):
res = testlib.blockstack_name_preorder( "foo_{}.test".format(i), wallets[2].privkey, wallets[3].addr )
if 'error' in res:
print json.dumps(res)
return False
testlib.next_block( **kw )
for i in xrange(0, 10):
res = testlib.blockstack_name_register( "foo_{}.test".format(i), wallets[2].privkey, wallets[3].addr )
if 'error' in res:
print json.dumps(res)
return False
testlib.next_block( **kw )
# make 10 empty zonefiles and propagate them
for i in xrange(0, 10):
empty_zonefile_str = testlib.make_empty_zonefile( "foo_{}.test".format(i), wallets[3].addr)
value_hash = blockstack.lib.storage.get_zonefile_data_hash(empty_zonefile_str)
res = testlib.blockstack_name_update( "foo_{}.test".format(i), value_hash, wallets[3].privkey )
if 'error' in res:
print json.dumps(res)
return False
testlib.next_block( **kw )
res = testlib.blockstack_put_zonefile(empty_zonefile_str)
if not res:
return False
# start up an Atlas test network with 9 nodes: the main one doing the test, and 8 subordinate ones that treat it as a seed peer.
# the network will ensure each node can reach each other node.
atlas_nodes = [17000, 17001, 17002, 17003, 17004, 17005, 17006, 17007]
atlas_topology = {}
for node_port in atlas_nodes:
atlas_topology[node_port] = [16264]
network_des = atlas_network.atlas_network_build( testlib.working_dir(**kw), atlas_nodes, atlas_topology, {}, os.path.join( testlib.working_dir(**kw), "atlas_network" ))
atlas_network.atlas_network_start( network_des )
print "Waiting 60 seconds for the altas peers to catch up"
time.sleep(60.0)
# wait at most 60 seconds for atlas network to converge
synchronized = False
for i in xrange(0, 60):
atlas_network.atlas_print_network_state( network_des )
if atlas_network.atlas_network_is_synchronized( network_des, testlib.last_block( **kw ) - 1, 1 ):
print "Synchronized!"
synchronized = True
break
else:
time.sleep(1.0)
# shut down
atlas_network.atlas_network_stop( network_des )
return synchronized
def check( state_engine ):
global synchronized
if not synchronized:
print "not synchronized"
return False
# not revealed, but ready
ns = state_engine.get_namespace_reveal( "test" )
if ns is not None:
print "namespace not ready"
return False
ns = state_engine.get_namespace( "test" )
if ns is None:
print "no namespace"
return False
if ns['namespace_id'] != 'test':
print "wrong namespace"
return False
for i in xrange(0, 10):
name = 'foo_{}.test'.format(i)
# not preordered
preorder = state_engine.get_name_preorder( name, virtualchain.make_payment_script(wallets[2].addr), wallets[3].addr )
if preorder is not None:
print "still have preorder"
return False
# registered
name_rec = state_engine.get_name( name )
if name_rec is None:
print "name does not exist"
return False
# owned
if name_rec['address'] != wallets[3].addr or name_rec['sender'] != virtualchain.make_payment_script(wallets[3].addr):
print "name has wrong owner"
return False
# updated
if name_rec['value_hash'] is None:
print "wrong value hash: %s" % name_rec['value_hash']
return False
return True
| gpl-3.0 |
i-rabot/tractogithub | tracformatter/trac/core.py | 1 | 8449 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2011 Edgewall Software
# Copyright (C) 2003-2004 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2004-2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
__all__ = ['Component', 'ExtensionPoint', 'implements', 'Interface',
'TracError']
def N_(string):
"""No-op translation marker, inlined here to avoid importing from
`trac.util`.
"""
return string
class TracError(Exception):
"""Exception base class for errors in Trac."""
title = N_('Trac Error')
def __init__(self, message, title=None, show_traceback=False):
"""If message is a genshi.builder.tag object, everything up to
the first <p> will be displayed in the red box, and everything
after will be displayed below the red box. If title is given,
it will be displayed as the large header above the error
message.
"""
from trac.util.translation import gettext
Exception.__init__(self, message)
self._message = message
self.title = title or gettext(self.title)
self.show_traceback = show_traceback
message = property(lambda self: self._message,
lambda self, v: setattr(self, '_message', v))
def __unicode__(self):
return unicode(self.message)
class Interface(object):
"""Marker base class for extension point interfaces."""
class ExtensionPoint(property):
"""Marker class for extension points in components."""
def __init__(self, interface):
"""Create the extension point.
:param interface: the `Interface` subclass that defines the
protocol for the extension point
"""
property.__init__(self, self.extensions)
self.interface = interface
self.__doc__ = ("List of components that implement `~%s.%s`" %
(self.interface.__module__, self.interface.__name__))
def extensions(self, component):
"""Return a list of components that declare to implement the
extension point interface.
"""
classes = ComponentMeta._registry.get(self.interface, ())
components = [component.compmgr[cls] for cls in classes]
return [c for c in components if c]
def __repr__(self):
"""Return a textual representation of the extension point."""
return '<ExtensionPoint %s>' % self.interface.__name__
class ComponentMeta(type):
"""Meta class for components.
Takes care of component and extension point registration.
"""
_components = []
_registry = {}
def __new__(mcs, name, bases, d):
"""Create the component class."""
new_class = type.__new__(mcs, name, bases, d)
if name == 'Component':
# Don't put the Component base class in the registry
return new_class
if d.get('abstract'):
# Don't put abstract component classes in the registry
return new_class
ComponentMeta._components.append(new_class)
registry = ComponentMeta._registry
for cls in new_class.__mro__:
for interface in cls.__dict__.get('_implements', ()):
classes = registry.setdefault(interface, [])
if new_class not in classes:
classes.append(new_class)
return new_class
def __call__(cls, *args, **kwargs):
"""Return an existing instance of the component if it has
already been activated, otherwise create a new instance.
"""
# If this component is also the component manager, just invoke that
if issubclass(cls, ComponentManager):
self = cls.__new__(cls)
self.compmgr = self
self.__init__(*args, **kwargs)
return self
# The normal case where the component is not also the component manager
compmgr = args[0]
self = compmgr.components.get(cls)
# Note that this check is racy, we intentionally don't use a
# lock in order to keep things simple and avoid the risk of
# deadlocks, as the impact of having temporarily two (or more)
# instances for a given `cls` is negligible.
if self is None:
self = cls.__new__(cls)
self.compmgr = compmgr
compmgr.component_activated(self)
self.__init__()
# Only register the instance once it is fully initialized (#9418)
compmgr.components[cls] = self
return self
class Component(object):
"""Base class for components.
Every component can declare what extension points it provides, as
well as what extension points of other components it extends.
"""
__metaclass__ = ComponentMeta
@staticmethod
def implements(*interfaces):
"""Can be used in the class definition of `Component`
subclasses to declare the extension points that are extended.
"""
import sys
frame = sys._getframe(1)
locals_ = frame.f_locals
# Some sanity checks
assert locals_ is not frame.f_globals and '__module__' in locals_, \
'implements() can only be used in a class definition'
locals_.setdefault('_implements', []).extend(interfaces)
implements = Component.implements
class ComponentManager(object):
"""The component manager keeps a pool of active components."""
def __init__(self):
"""Initialize the component manager."""
self.components = {}
self.enabled = {}
if isinstance(self, Component):
self.components[self.__class__] = self
def __contains__(self, cls):
"""Return wether the given class is in the list of active
components."""
return cls in self.components
def __getitem__(self, cls):
"""Activate the component instance for the given class, or
return the existing instance if the component has already been
activated.
"""
if not self.is_enabled(cls):
return None
component = self.components.get(cls)
if not component:
if cls not in ComponentMeta._components:
raise TracError('Component "%s" not registered' % cls.__name__)
try:
component = cls(self)
except TypeError, e:
raise TracError('Unable to instantiate component %r (%s)' %
(cls, e))
return component
def is_enabled(self, cls):
"""Return whether the given component class is enabled."""
if cls not in self.enabled:
self.enabled[cls] = self.is_component_enabled(cls)
return self.enabled[cls]
def disable_component(self, component):
"""Force a component to be disabled.
:param component: can be a class or an instance.
"""
if not isinstance(component, type):
component = component.__class__
self.enabled[component] = False
self.components[component] = None
def component_activated(self, component):
"""Can be overridden by sub-classes so that special
initialization for components can be provided.
"""
def is_component_enabled(self, cls):
"""Can be overridden by sub-classes to veto the activation of
a component.
If this method returns `False`, the component was disabled
explicitly. If it returns `None`, the component was neither
enabled nor disabled explicitly. In both cases, the component
with the given class will not be available.
"""
return True
| bsd-3-clause |
boompieman/iim_project | project_python2/lib/python2.7/site-packages/nltk/toolbox.py | 3 | 18532 | # coding: utf-8
# Natural Language Toolkit: Toolbox Reader
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Greg Aumann <greg_aumann@sil.org>
# URL: <http://nltk.org>
# For license information, see LICENSE.TXT
"""
Module for reading, writing and manipulating
Toolbox databases and settings files.
"""
from __future__ import print_function
import os, re, codecs
from xml.etree.ElementTree import ElementTree, TreeBuilder, Element, SubElement
from nltk.compat import StringIO, u, PY3
from nltk.data import PathPointer, ZipFilePathPointer, find
class StandardFormat(object):
"""
Class for reading and processing standard format marker files and strings.
"""
def __init__(self, filename=None, encoding=None):
self._encoding = encoding
if filename is not None:
self.open(filename)
def open(self, sfm_file):
"""
Open a standard format marker file for sequential reading.
:param sfm_file: name of the standard format marker input file
:type sfm_file: str
"""
if isinstance(sfm_file, PathPointer):
# [xx] We don't use 'rU' mode here -- do we need to?
# (PathPointer.open doesn't take a mode option)
self._file = sfm_file.open(self._encoding)
else:
self._file = codecs.open(sfm_file, 'rU', self._encoding)
def open_string(self, s):
"""
Open a standard format marker string for sequential reading.
:param s: string to parse as a standard format marker input file
:type s: str
"""
self._file = StringIO(s)
def raw_fields(self):
"""
Return an iterator that returns the next field in a (marker, value)
tuple. Linebreaks and trailing white space are preserved except
for the final newline in each field.
:rtype: iter(tuple(str, str))
"""
join_string = '\n'
line_regexp = r'^%s(?:\\(\S+)\s*)?(.*)$'
# discard a BOM in the first line
first_line_pat = re.compile(line_regexp % '(?:\xef\xbb\xbf)?')
line_pat = re.compile(line_regexp % '')
# need to get first line outside the loop for correct handling
# of the first marker if it spans multiple lines
file_iter = iter(self._file)
line = next(file_iter)
mobj = re.match(first_line_pat, line)
mkr, line_value = mobj.groups()
value_lines = [line_value,]
self.line_num = 0
for line in file_iter:
self.line_num += 1
mobj = re.match(line_pat, line)
line_mkr, line_value = mobj.groups()
if line_mkr:
yield (mkr, join_string.join(value_lines))
mkr = line_mkr
value_lines = [line_value,]
else:
value_lines.append(line_value)
self.line_num += 1
yield (mkr, join_string.join(value_lines))
def fields(self, strip=True, unwrap=True, encoding=None, errors='strict', unicode_fields=None):
"""
Return an iterator that returns the next field in a ``(marker, value)``
tuple, where ``marker`` and ``value`` are unicode strings if an ``encoding``
was specified in the ``fields()`` method. Otherwise they are non-unicode strings.
:param strip: strip trailing whitespace from the last line of each field
:type strip: bool
:param unwrap: Convert newlines in a field to spaces.
:type unwrap: bool
:param encoding: Name of an encoding to use. If it is specified then
the ``fields()`` method returns unicode strings rather than non
unicode strings.
:type encoding: str or None
:param errors: Error handling scheme for codec. Same as the ``decode()``
builtin string method.
:type errors: str
:param unicode_fields: Set of marker names whose values are UTF-8 encoded.
Ignored if encoding is None. If the whole file is UTF-8 encoded set
``encoding='utf8'`` and leave ``unicode_fields`` with its default
value of None.
:type unicode_fields: sequence
:rtype: iter(tuple(str, str))
"""
if encoding is None and unicode_fields is not None:
raise ValueError('unicode_fields is set but not encoding.')
unwrap_pat = re.compile(r'\n+')
for mkr, val in self.raw_fields():
if encoding and not PY3: # kludge - already decoded in PY3?
if unicode_fields is not None and mkr in unicode_fields:
val = val.decode('utf8', errors)
else:
val = val.decode(encoding, errors)
mkr = mkr.decode(encoding, errors)
if unwrap:
val = unwrap_pat.sub(' ', val)
if strip:
val = val.rstrip()
yield (mkr, val)
def close(self):
"""Close a previously opened standard format marker file or string."""
self._file.close()
try:
del self.line_num
except AttributeError:
pass
class ToolboxData(StandardFormat):
def parse(self, grammar=None, **kwargs):
if grammar:
return self._chunk_parse(grammar=grammar, **kwargs)
else:
return self._record_parse(**kwargs)
def _record_parse(self, key=None, **kwargs):
"""
Returns an element tree structure corresponding to a toolbox data file with
all markers at the same level.
Thus the following Toolbox database::
\_sh v3.0 400 Rotokas Dictionary
\_DateStampHasFourDigitYear
\lx kaa
\ps V.A
\ge gag
\gp nek i pas
\lx kaa
\ps V.B
\ge strangle
\gp pasim nek
after parsing will end up with the same structure (ignoring the extra
whitespace) as the following XML fragment after being parsed by
ElementTree::
<toolbox_data>
<header>
<_sh>v3.0 400 Rotokas Dictionary</_sh>
<_DateStampHasFourDigitYear/>
</header>
<record>
<lx>kaa</lx>
<ps>V.A</ps>
<ge>gag</ge>
<gp>nek i pas</gp>
</record>
<record>
<lx>kaa</lx>
<ps>V.B</ps>
<ge>strangle</ge>
<gp>pasim nek</gp>
</record>
</toolbox_data>
:param key: Name of key marker at the start of each record. If set to
None (the default value) the first marker that doesn't begin with
an underscore is assumed to be the key.
:type key: str
:param kwargs: Keyword arguments passed to ``StandardFormat.fields()``
:type kwargs: dict
:rtype: ElementTree._ElementInterface
:return: contents of toolbox data divided into header and records
"""
builder = TreeBuilder()
builder.start('toolbox_data', {})
builder.start('header', {})
in_records = False
for mkr, value in self.fields(**kwargs):
if key is None and not in_records and mkr[0] != '_':
key = mkr
if mkr == key:
if in_records:
builder.end('record')
else:
builder.end('header')
in_records = True
builder.start('record', {})
builder.start(mkr, {})
builder.data(value)
builder.end(mkr)
if in_records:
builder.end('record')
else:
builder.end('header')
builder.end('toolbox_data')
return builder.close()
def _tree2etree(self, parent):
from nltk.tree import Tree
root = Element(parent.label())
for child in parent:
if isinstance(child, Tree):
root.append(self._tree2etree(child))
else:
text, tag = child
e = SubElement(root, tag)
e.text = text
return root
def _chunk_parse(self, grammar=None, root_label='record', trace=0, **kwargs):
"""
Returns an element tree structure corresponding to a toolbox data file
parsed according to the chunk grammar.
:type grammar: str
:param grammar: Contains the chunking rules used to parse the
database. See ``chunk.RegExp`` for documentation.
:type root_label: str
:param root_label: The node value that should be used for the
top node of the chunk structure.
:type trace: int
:param trace: The level of tracing that should be used when
parsing a text. ``0`` will generate no tracing output;
``1`` will generate normal tracing output; and ``2`` or
higher will generate verbose tracing output.
:type kwargs: dict
:param kwargs: Keyword arguments passed to ``toolbox.StandardFormat.fields()``
:rtype: ElementTree._ElementInterface
"""
from nltk import chunk
from nltk.tree import Tree
cp = chunk.RegexpParser(grammar, root_label=root_label, trace=trace)
db = self.parse(**kwargs)
tb_etree = Element('toolbox_data')
header = db.find('header')
tb_etree.append(header)
for record in db.findall('record'):
parsed = cp.parse([(elem.text, elem.tag) for elem in record])
tb_etree.append(self._tree2etree(parsed))
return tb_etree
_is_value = re.compile(r"\S")
def to_sfm_string(tree, encoding=None, errors='strict', unicode_fields=None):
"""
Return a string with a standard format representation of the toolbox
data in tree (tree can be a toolbox database or a single record).
:param tree: flat representation of toolbox data (whole database or single record)
:type tree: ElementTree._ElementInterface
:param encoding: Name of an encoding to use.
:type encoding: str
:param errors: Error handling scheme for codec. Same as the ``encode()``
builtin string method.
:type errors: str
:param unicode_fields:
:type unicode_fields: dict(str) or set(str)
:rtype: str
"""
if tree.tag == 'record':
root = Element('toolbox_data')
root.append(tree)
tree = root
if tree.tag != 'toolbox_data':
raise ValueError("not a toolbox_data element structure")
if encoding is None and unicode_fields is not None:
raise ValueError("if encoding is not specified then neither should unicode_fields")
l = []
for rec in tree:
l.append('\n')
for field in rec:
mkr = field.tag
value = field.text
if encoding is not None:
if unicode_fields is not None and mkr in unicode_fields:
cur_encoding = 'utf8'
else:
cur_encoding = encoding
if re.search(_is_value, value):
l.append((u("\\%s %s\n") % (mkr, value)).encode(cur_encoding, errors))
else:
l.append((u("\\%s%s\n") % (mkr, value)).encode(cur_encoding, errors))
else:
if re.search(_is_value, value):
l.append("\\%s %s\n" % (mkr, value))
else:
l.append("\\%s%s\n" % (mkr, value))
return ''.join(l[1:])
class ToolboxSettings(StandardFormat):
"""This class is the base class for settings files."""
def __init__(self):
super(ToolboxSettings, self).__init__()
def parse(self, encoding=None, errors='strict', **kwargs):
"""
Return the contents of toolbox settings file with a nested structure.
:param encoding: encoding used by settings file
:type encoding: str
:param errors: Error handling scheme for codec. Same as ``decode()`` builtin method.
:type errors: str
:param kwargs: Keyword arguments passed to ``StandardFormat.fields()``
:type kwargs: dict
:rtype: ElementTree._ElementInterface
"""
builder = TreeBuilder()
for mkr, value in self.fields(encoding=encoding, errors=errors, **kwargs):
# Check whether the first char of the field marker
# indicates a block start (+) or end (-)
block=mkr[0]
if block in ("+", "-"):
mkr=mkr[1:]
else:
block=None
# Build tree on the basis of block char
if block == "+":
builder.start(mkr, {})
builder.data(value)
elif block == '-':
builder.end(mkr)
else:
builder.start(mkr, {})
builder.data(value)
builder.end(mkr)
return builder.close()
def to_settings_string(tree, encoding=None, errors='strict', unicode_fields=None):
# write XML to file
l = list()
_to_settings_string(tree.getroot(), l, encoding=encoding, errors=errors, unicode_fields=unicode_fields)
return ''.join(l)
def _to_settings_string(node, l, **kwargs):
# write XML to file
tag = node.tag
text = node.text
if len(node) == 0:
if text:
l.append('\\%s %s\n' % (tag, text))
else:
l.append('\\%s\n' % tag)
else:
if text:
l.append('\\+%s %s\n' % (tag, text))
else:
l.append('\\+%s\n' % tag)
for n in node:
_to_settings_string(n, l, **kwargs)
l.append('\\-%s\n' % tag)
return
def remove_blanks(elem):
"""
Remove all elements and subelements with no text and no child elements.
:param elem: toolbox data in an elementtree structure
:type elem: ElementTree._ElementInterface
"""
out = list()
for child in elem:
remove_blanks(child)
if child.text or len(child) > 0:
out.append(child)
elem[:] = out
def add_default_fields(elem, default_fields):
"""
Add blank elements and subelements specified in default_fields.
:param elem: toolbox data in an elementtree structure
:type elem: ElementTree._ElementInterface
:param default_fields: fields to add to each type of element and subelement
:type default_fields: dict(tuple)
"""
for field in default_fields.get(elem.tag, []):
if elem.find(field) is None:
SubElement(elem, field)
for child in elem:
add_default_fields(child, default_fields)
def sort_fields(elem, field_orders):
"""
Sort the elements and subelements in order specified in field_orders.
:param elem: toolbox data in an elementtree structure
:type elem: ElementTree._ElementInterface
:param field_orders: order of fields for each type of element and subelement
:type field_orders: dict(tuple)
"""
order_dicts = dict()
for field, order in field_orders.items():
order_dicts[field] = order_key = dict()
for i, subfield in enumerate(order):
order_key[subfield] = i
_sort_fields(elem, order_dicts)
def _sort_fields(elem, orders_dicts):
"""sort the children of elem"""
try:
order = orders_dicts[elem.tag]
except KeyError:
pass
else:
tmp = sorted([((order.get(child.tag, 1e9), i), child) for i, child in enumerate(elem)])
elem[:] = [child for key, child in tmp]
for child in elem:
if len(child):
_sort_fields(child, orders_dicts)
def add_blank_lines(tree, blanks_before, blanks_between):
"""
Add blank lines before all elements and subelements specified in blank_before.
:param elem: toolbox data in an elementtree structure
:type elem: ElementTree._ElementInterface
:param blank_before: elements and subelements to add blank lines before
:type blank_before: dict(tuple)
"""
try:
before = blanks_before[tree.tag]
between = blanks_between[tree.tag]
except KeyError:
for elem in tree:
if len(elem):
add_blank_lines(elem, blanks_before, blanks_between)
else:
last_elem = None
for elem in tree:
tag = elem.tag
if last_elem is not None and last_elem.tag != tag:
if tag in before and last_elem is not None:
e = last_elem.getiterator()[-1]
e.text = (e.text or "") + "\n"
else:
if tag in between:
e = last_elem.getiterator()[-1]
e.text = (e.text or "") + "\n"
if len(elem):
add_blank_lines(elem, blanks_before, blanks_between)
last_elem = elem
def demo():
from itertools import islice
# zip_path = find('corpora/toolbox.zip')
# lexicon = ToolboxData(ZipFilePathPointer(zip_path, 'toolbox/rotokas.dic')).parse()
file_path = find('corpora/toolbox/rotokas.dic')
lexicon = ToolboxData(file_path).parse()
print('first field in fourth record:')
print(lexicon[3][0].tag)
print(lexicon[3][0].text)
print('\nfields in sequential order:')
for field in islice(lexicon.find('record'), 10):
print(field.tag, field.text)
print('\nlx fields:')
for field in islice(lexicon.findall('record/lx'), 10):
print(field.text)
settings = ToolboxSettings()
file_path = find('corpora/toolbox/MDF/MDF_AltH.typ')
settings.open(file_path)
# settings.open(ZipFilePathPointer(zip_path, entry='toolbox/MDF/MDF_AltH.typ'))
tree = settings.parse(unwrap=False, encoding='cp1252')
print(tree.find('expset/expMDF/rtfPageSetup/paperSize').text)
settings_tree = ElementTree(tree)
print(to_settings_string(settings_tree).encode('utf8'))
if __name__ == '__main__':
demo()
| gpl-3.0 |
tokuhirom/ycmd | cpp/ycm/tests/gmock/scripts/gmock_doctor.py | 89 | 24090 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Converts compiler's errors in code using Google Mock to plain English."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import re
import sys
_VERSION = '1.0.3'
_EMAIL = 'googlemock@googlegroups.com'
_COMMON_GMOCK_SYMBOLS = [
# Matchers
'_',
'A',
'AddressSatisfies',
'AllOf',
'An',
'AnyOf',
'ContainerEq',
'Contains',
'ContainsRegex',
'DoubleEq',
'ElementsAre',
'ElementsAreArray',
'EndsWith',
'Eq',
'Field',
'FloatEq',
'Ge',
'Gt',
'HasSubstr',
'IsInitializedProto',
'Le',
'Lt',
'MatcherCast',
'Matches',
'MatchesRegex',
'NanSensitiveDoubleEq',
'NanSensitiveFloatEq',
'Ne',
'Not',
'NotNull',
'Pointee',
'Property',
'Ref',
'ResultOf',
'SafeMatcherCast',
'StartsWith',
'StrCaseEq',
'StrCaseNe',
'StrEq',
'StrNe',
'Truly',
'TypedEq',
'Value',
# Actions
'Assign',
'ByRef',
'DeleteArg',
'DoAll',
'DoDefault',
'IgnoreResult',
'Invoke',
'InvokeArgument',
'InvokeWithoutArgs',
'Return',
'ReturnNew',
'ReturnNull',
'ReturnRef',
'SaveArg',
'SetArgReferee',
'SetArgPointee',
'SetArgumentPointee',
'SetArrayArgument',
'SetErrnoAndReturn',
'Throw',
'WithArg',
'WithArgs',
'WithoutArgs',
# Cardinalities
'AnyNumber',
'AtLeast',
'AtMost',
'Between',
'Exactly',
# Sequences
'InSequence',
'Sequence',
# Misc
'DefaultValue',
'Mock',
]
# Regex for matching source file path and line number in the compiler's errors.
_GCC_FILE_LINE_RE = r'(?P<file>.*):(?P<line>\d+):(\d+:)?\s+'
_CLANG_FILE_LINE_RE = r'(?P<file>.*):(?P<line>\d+):(?P<column>\d+):\s+'
_CLANG_NON_GMOCK_FILE_LINE_RE = (
r'(?P<file>.*[/\\^](?!gmock-)[^/\\]+):(?P<line>\d+):(?P<column>\d+):\s+')
def _FindAllMatches(regex, s):
"""Generates all matches of regex in string s."""
r = re.compile(regex)
return r.finditer(s)
def _GenericDiagnoser(short_name, long_name, diagnoses, msg):
"""Diagnoses the given disease by pattern matching.
Can provide different diagnoses for different patterns.
Args:
short_name: Short name of the disease.
long_name: Long name of the disease.
diagnoses: A list of pairs (regex, pattern for formatting the diagnosis
for matching regex).
msg: Compiler's error messages.
Yields:
Tuples of the form
(short name of disease, long name of disease, diagnosis).
"""
for regex, diagnosis in diagnoses:
if re.search(regex, msg):
diagnosis = '%(file)s:%(line)s:' + diagnosis
for m in _FindAllMatches(regex, msg):
yield (short_name, long_name, diagnosis % m.groupdict())
def _NeedToReturnReferenceDiagnoser(msg):
"""Diagnoses the NRR disease, given the error messages by the compiler."""
gcc_regex = (r'In member function \'testing::internal::ReturnAction<R>.*\n'
+ _GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gmock-actions\.h.*error: creating array with negative size')
clang_regex = (r'error:.*array.*negative.*\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE +
r'note: in instantiation of function template specialization '
r'\'testing::internal::ReturnAction<(?P<type>.*)>'
r'::operator Action<.*>\' requested here')
clang11_re = (r'use_ReturnRef_instead_of_Return_to_return_a_reference.*'
r'(.*\n)*?' + _CLANG_NON_GMOCK_FILE_LINE_RE)
diagnosis = """
You are using a Return() action in a function that returns a reference to
%(type)s. Please use ReturnRef() instead."""
return _GenericDiagnoser('NRR', 'Need to Return Reference',
[(clang_regex, diagnosis),
(clang11_re, diagnosis % {'type': 'a type'}),
(gcc_regex, diagnosis % {'type': 'a type'})],
msg)
def _NeedToReturnSomethingDiagnoser(msg):
"""Diagnoses the NRS disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'(instantiated from here\n.'
r'*gmock.*actions\.h.*error: void value not ignored)'
r'|(error: control reaches end of non-void function)')
clang_regex1 = (_CLANG_FILE_LINE_RE +
r'error: cannot initialize return object '
r'of type \'Result\' \(aka \'(?P<return_type>.*)\'\) '
r'with an rvalue of type \'void\'')
clang_regex2 = (_CLANG_FILE_LINE_RE +
r'error: cannot initialize return object '
r'of type \'(?P<return_type>.*)\' '
r'with an rvalue of type \'void\'')
diagnosis = """
You are using an action that returns void, but it needs to return
%(return_type)s. Please tell it *what* to return. Perhaps you can use
the pattern DoAll(some_action, Return(some_value))?"""
return _GenericDiagnoser(
'NRS',
'Need to Return Something',
[(gcc_regex, diagnosis % {'return_type': '*something*'}),
(clang_regex1, diagnosis),
(clang_regex2, diagnosis)],
msg)
def _NeedToReturnNothingDiagnoser(msg):
"""Diagnoses the NRN disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gmock-actions\.h.*error: instantiation of '
r'\'testing::internal::ReturnAction<R>::Impl<F>::value_\' '
r'as type \'void\'')
clang_regex1 = (r'error: field has incomplete type '
r'\'Result\' \(aka \'void\'\)(\r)?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::ReturnAction<(?P<return_type>.*)>'
r'::operator Action<void \(.*\)>\' requested here')
clang_regex2 = (r'error: field has incomplete type '
r'\'Result\' \(aka \'void\'\)(\r)?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::DoBothAction<.*>'
r'::operator Action<(?P<return_type>.*) \(.*\)>\' '
r'requested here')
diagnosis = """
You are using an action that returns %(return_type)s, but it needs to return
void. Please use a void-returning action instead.
All actions but the last in DoAll(...) must return void. Perhaps you need
to re-arrange the order of actions in a DoAll(), if you are using one?"""
return _GenericDiagnoser(
'NRN',
'Need to Return Nothing',
[(gcc_regex, diagnosis % {'return_type': '*something*'}),
(clang_regex1, diagnosis),
(clang_regex2, diagnosis)],
msg)
def _IncompleteByReferenceArgumentDiagnoser(msg):
"""Diagnoses the IBRA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*gtest-printers\.h.*error: invalid application of '
r'\'sizeof\' to incomplete type \'(?P<type>.*)\'')
clang_regex = (r'.*gtest-printers\.h.*error: invalid application of '
r'\'sizeof\' to an incomplete type '
r'\'(?P<type>.*)( const)?\'\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE +
r'note: in instantiation of member function '
r'\'testing::internal2::TypeWithoutFormatter<.*>::'
r'PrintValue\' requested here')
diagnosis = """
In order to mock this function, Google Mock needs to see the definition
of type "%(type)s" - declaration alone is not enough. Either #include
the header that defines it, or change the argument to be passed
by pointer."""
return _GenericDiagnoser('IBRA', 'Incomplete By-Reference Argument Type',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedFunctionMatcherDiagnoser(msg):
"""Diagnoses the OFM disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Truly\(<unresolved overloaded function type>\)')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Truly')
diagnosis = """
The argument you gave to Truly() is an overloaded function. Please tell
your compiler which overloaded version you want to use.
For example, if you want to use the version whose signature is
bool Foo(int n);
you should write
Truly(static_cast<bool (*)(int n)>(Foo))"""
return _GenericDiagnoser('OFM', 'Overloaded Function Matcher',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedFunctionActionDiagnoser(msg):
"""Diagnoses the OFA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for call to '
r'\'Invoke\(<unresolved overloaded function type>')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching '
r'function for call to \'Invoke\'\r?\n'
r'(.*\n)*?'
r'.*\bgmock-\w+-actions\.h:\d+:\d+:\s+'
r'note: candidate template ignored:\s+'
r'couldn\'t infer template argument \'FunctionImpl\'')
diagnosis = """
Function you are passing to Invoke is overloaded. Please tell your compiler
which overloaded version you want to use.
For example, if you want to use the version whose signature is
bool MyFunction(int n, double x);
you should write something like
Invoke(static_cast<bool (*)(int n, double x)>(MyFunction))"""
return _GenericDiagnoser('OFA', 'Overloaded Function Action',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _OverloadedMethodActionDiagnoser(msg):
"""Diagnoses the OMA disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: no matching function for '
r'call to \'Invoke\(.+, <unresolved overloaded function '
r'type>\)')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: no matching function '
r'for call to \'Invoke\'\r?\n'
r'(.*\n)*?'
r'.*\bgmock-\w+-actions\.h:\d+:\d+: '
r'note: candidate function template not viable: '
r'requires .*, but 2 (arguments )?were provided')
diagnosis = """
The second argument you gave to Invoke() is an overloaded method. Please
tell your compiler which overloaded version you want to use.
For example, if you want to use the version whose signature is
class Foo {
...
bool Bar(int n, double x);
};
you should write something like
Invoke(foo, static_cast<bool (Foo::*)(int n, double x)>(&Foo::Bar))"""
return _GenericDiagnoser('OMA', 'Overloaded Method Action',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
def _MockObjectPointerDiagnoser(msg):
"""Diagnoses the MOP disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: request for member '
r'\'gmock_(?P<method>.+)\' in \'(?P<mock_object>.+)\', '
r'which is of non-class type \'(.*::)*(?P<class_name>.+)\*\'')
clang_regex = (_CLANG_FILE_LINE_RE + r'error: member reference type '
r'\'(?P<class_name>.*?) *\' is a pointer; '
r'maybe you meant to use \'->\'\?')
diagnosis = """
The first argument to ON_CALL() and EXPECT_CALL() must be a mock *object*,
not a *pointer* to it. Please write '*(%(mock_object)s)' instead of
'%(mock_object)s' as your first argument.
For example, given the mock class:
class %(class_name)s : public ... {
...
MOCK_METHOD0(%(method)s, ...);
};
and the following mock instance:
%(class_name)s* mock_ptr = ...
you should use the EXPECT_CALL like this:
EXPECT_CALL(*mock_ptr, %(method)s(...));"""
return _GenericDiagnoser(
'MOP',
'Mock Object Pointer',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis % {'mock_object': 'mock_object',
'method': 'method',
'class_name': '%(class_name)s'})],
msg)
def _NeedToUseSymbolDiagnoser(msg):
"""Diagnoses the NUS disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE + r'error: \'(?P<symbol>.+)\' '
r'(was not declared in this scope|has not been declared)')
clang_regex = (_CLANG_FILE_LINE_RE +
r'error: (use of undeclared identifier|unknown type name|'
r'no template named) \'(?P<symbol>[^\']+)\'')
diagnosis = """
'%(symbol)s' is defined by Google Mock in the testing namespace.
Did you forget to write
using testing::%(symbol)s;
?"""
for m in (list(_FindAllMatches(gcc_regex, msg)) +
list(_FindAllMatches(clang_regex, msg))):
symbol = m.groupdict()['symbol']
if symbol in _COMMON_GMOCK_SYMBOLS:
yield ('NUS', 'Need to Use Symbol', diagnosis % m.groupdict())
def _NeedToUseReturnNullDiagnoser(msg):
"""Diagnoses the NRNULL disease, given the error messages by the compiler."""
gcc_regex = ('instantiated from \'testing::internal::ReturnAction<R>'
'::operator testing::Action<Func>\(\) const.*\n' +
_GCC_FILE_LINE_RE + r'instantiated from here\n'
r'.*error: no matching function for call to \'ImplicitCast_\('
r'(:?long )?int&\)')
clang_regex = (r'\bgmock-actions.h:.* error: no matching function for '
r'call to \'ImplicitCast_\'\r?\n'
r'(.*\n)*?' +
_CLANG_NON_GMOCK_FILE_LINE_RE + r'note: in instantiation '
r'of function template specialization '
r'\'testing::internal::ReturnAction<(int|long)>::operator '
r'Action<(?P<type>.*)\(\)>\' requested here')
diagnosis = """
You are probably calling Return(NULL) and the compiler isn't sure how to turn
NULL into %(type)s. Use ReturnNull() instead.
Note: the line number may be off; please fix all instances of Return(NULL)."""
return _GenericDiagnoser(
'NRNULL', 'Need to use ReturnNull',
[(clang_regex, diagnosis),
(gcc_regex, diagnosis % {'type': 'the right type'})],
msg)
def _TypeInTemplatedBaseDiagnoser(msg):
"""Diagnoses the TTB disease, given the error messages by the compiler."""
# This version works when the type is used as the mock function's return
# type.
gcc_4_3_1_regex_type_in_retval = (
r'In member function \'int .*\n' + _GCC_FILE_LINE_RE +
r'error: a function call cannot appear in a constant-expression')
gcc_4_4_0_regex_type_in_retval = (
r'error: a function call cannot appear in a constant-expression'
+ _GCC_FILE_LINE_RE + r'error: template argument 1 is invalid\n')
# This version works when the type is used as the mock function's sole
# parameter type.
gcc_regex_type_of_sole_param = (
_GCC_FILE_LINE_RE +
r'error: \'(?P<type>.+)\' was not declared in this scope\n'
r'.*error: template argument 1 is invalid\n')
# This version works when the type is used as a parameter of a mock
# function that has multiple parameters.
gcc_regex_type_of_a_param = (
r'error: expected `;\' before \'::\' token\n'
+ _GCC_FILE_LINE_RE +
r'error: \'(?P<type>.+)\' was not declared in this scope\n'
r'.*error: template argument 1 is invalid\n'
r'.*error: \'.+\' was not declared in this scope')
clang_regex_type_of_retval_or_sole_param = (
_CLANG_FILE_LINE_RE +
r'error: use of undeclared identifier \'(?P<type>.*)\'\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):\d+: error: '
r'non-friend class member \'Result\' cannot have a qualified name'
)
clang_regex_type_of_a_param = (
_CLANG_FILE_LINE_RE +
r'error: C\+\+ requires a type specifier for all declarations\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):(?P=column): error: '
r'C\+\+ requires a type specifier for all declarations'
)
clang_regex_unknown_type = (
_CLANG_FILE_LINE_RE +
r'error: unknown type name \'(?P<type>[^\']+)\''
)
diagnosis = """
In a mock class template, types or typedefs defined in the base class
template are *not* automatically visible. This is how C++ works. Before
you can use a type or typedef named %(type)s defined in base class Base<T>, you
need to make it visible. One way to do it is:
typedef typename Base<T>::%(type)s %(type)s;"""
for diag in _GenericDiagnoser(
'TTB', 'Type in Template Base',
[(gcc_4_3_1_regex_type_in_retval, diagnosis % {'type': 'Foo'}),
(gcc_4_4_0_regex_type_in_retval, diagnosis % {'type': 'Foo'}),
(gcc_regex_type_of_sole_param, diagnosis),
(gcc_regex_type_of_a_param, diagnosis),
(clang_regex_type_of_retval_or_sole_param, diagnosis),
(clang_regex_type_of_a_param, diagnosis % {'type': 'Foo'})],
msg):
yield diag
# Avoid overlap with the NUS pattern.
for m in _FindAllMatches(clang_regex_unknown_type, msg):
type_ = m.groupdict()['type']
if type_ not in _COMMON_GMOCK_SYMBOLS:
yield ('TTB', 'Type in Template Base', diagnosis % m.groupdict())
def _WrongMockMethodMacroDiagnoser(msg):
"""Diagnoses the WMM disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE +
r'.*this_method_does_not_take_(?P<wrong_args>\d+)_argument.*\n'
r'.*\n'
r'.*candidates are.*FunctionMocker<[^>]+A(?P<args>\d+)\)>')
clang_regex = (_CLANG_NON_GMOCK_FILE_LINE_RE +
r'error:.*array.*negative.*r?\n'
r'(.*\n)*?'
r'(?P=file):(?P=line):(?P=column): error: too few arguments '
r'to function call, expected (?P<args>\d+), '
r'have (?P<wrong_args>\d+)')
clang11_re = (_CLANG_NON_GMOCK_FILE_LINE_RE +
r'.*this_method_does_not_take_'
r'(?P<wrong_args>\d+)_argument.*')
diagnosis = """
You are using MOCK_METHOD%(wrong_args)s to define a mock method that has
%(args)s arguments. Use MOCK_METHOD%(args)s (or MOCK_CONST_METHOD%(args)s,
MOCK_METHOD%(args)s_T, MOCK_CONST_METHOD%(args)s_T as appropriate) instead."""
return _GenericDiagnoser('WMM', 'Wrong MOCK_METHODn Macro',
[(gcc_regex, diagnosis),
(clang11_re, diagnosis % {'wrong_args': 'm',
'args': 'n'}),
(clang_regex, diagnosis)],
msg)
def _WrongParenPositionDiagnoser(msg):
"""Diagnoses the WPP disease, given the error messages by the compiler."""
gcc_regex = (_GCC_FILE_LINE_RE +
r'error:.*testing::internal::MockSpec<.* has no member named \''
r'(?P<method>\w+)\'')
clang_regex = (_CLANG_NON_GMOCK_FILE_LINE_RE +
r'error: no member named \'(?P<method>\w+)\' in '
r'\'testing::internal::MockSpec<.*>\'')
diagnosis = """
The closing parenthesis of ON_CALL or EXPECT_CALL should be *before*
".%(method)s". For example, you should write:
EXPECT_CALL(my_mock, Foo(_)).%(method)s(...);
instead of:
EXPECT_CALL(my_mock, Foo(_).%(method)s(...));"""
return _GenericDiagnoser('WPP', 'Wrong Parenthesis Position',
[(gcc_regex, diagnosis),
(clang_regex, diagnosis)],
msg)
_DIAGNOSERS = [
_IncompleteByReferenceArgumentDiagnoser,
_MockObjectPointerDiagnoser,
_NeedToReturnNothingDiagnoser,
_NeedToReturnReferenceDiagnoser,
_NeedToReturnSomethingDiagnoser,
_NeedToUseReturnNullDiagnoser,
_NeedToUseSymbolDiagnoser,
_OverloadedFunctionActionDiagnoser,
_OverloadedFunctionMatcherDiagnoser,
_OverloadedMethodActionDiagnoser,
_TypeInTemplatedBaseDiagnoser,
_WrongMockMethodMacroDiagnoser,
_WrongParenPositionDiagnoser,
]
def Diagnose(msg):
"""Generates all possible diagnoses given the compiler error message."""
msg = re.sub(r'\x1b\[[^m]*m', '', msg) # Strips all color formatting.
# Assuming the string is using the UTF-8 encoding, replaces the left and
# the right single quote characters with apostrophes.
msg = re.sub(r'(\xe2\x80\x98|\xe2\x80\x99)', "'", msg)
diagnoses = []
for diagnoser in _DIAGNOSERS:
for diag in diagnoser(msg):
diagnosis = '[%s - %s]\n%s' % diag
if not diagnosis in diagnoses:
diagnoses.append(diagnosis)
return diagnoses
def main():
print ('Google Mock Doctor v%s - '
'diagnoses problems in code using Google Mock.' % _VERSION)
if sys.stdin.isatty():
print ('Please copy and paste the compiler errors here. Press c-D when '
'you are done:')
else:
print 'Waiting for compiler errors on stdin . . .'
msg = sys.stdin.read().strip()
diagnoses = Diagnose(msg)
count = len(diagnoses)
if not count:
print ("""
Your compiler complained:
8<------------------------------------------------------------
%s
------------------------------------------------------------>8
Uh-oh, I'm not smart enough to figure out what the problem is. :-(
However...
If you send your source code and the compiler's error messages to
%s, you can be helped and I can get smarter --
win-win for us!""" % (msg, _EMAIL))
else:
print '------------------------------------------------------------'
print 'Your code appears to have the following',
if count > 1:
print '%s diseases:' % (count,)
else:
print 'disease:'
i = 0
for d in diagnoses:
i += 1
if count > 1:
print '\n#%s:' % (i,)
print d
print ("""
How did I do? If you think I'm wrong or unhelpful, please send your
source code and the compiler's error messages to %s.
Then you can be helped and I can get smarter -- I promise I won't be upset!""" %
_EMAIL)
if __name__ == '__main__':
main()
| gpl-3.0 |
nickleefly/youtube-dl | youtube_dl/extractor/regiotv.py | 99 | 2261 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
sanitized_Request,
xpath_text,
xpath_with_ns,
)
class RegioTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?regio-tv\.de/video/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://www.regio-tv.de/video/395808.html',
'info_dict': {
'id': '395808',
'ext': 'mp4',
'title': 'Wir in Ludwigsburg',
'description': 'Mit unseren zuckersüßen Adventskindern, außerdem besuchen wir die Abendsterne!',
}
}, {
'url': 'http://www.regio-tv.de/video/395808',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
key = self._search_regex(
r'key\s*:\s*(["\'])(?P<key>.+?)\1', webpage, 'key', group='key')
title = self._og_search_title(webpage)
SOAP_TEMPLATE = '<?xml version="1.0" encoding="utf-8"?><soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"><soap:Body><{0} xmlns="http://v.telvi.de/"><key xsi:type="xsd:string">{1}</key></{0}></soap:Body></soap:Envelope>'
request = sanitized_Request(
'http://v.telvi.de/',
SOAP_TEMPLATE.format('GetHTML5VideoData', key).encode('utf-8'))
video_data = self._download_xml(request, video_id, 'Downloading video XML')
NS_MAP = {
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'soap': 'http://schemas.xmlsoap.org/soap/envelope/',
}
video_url = xpath_text(
video_data, xpath_with_ns('.//video', NS_MAP), 'video url', fatal=True)
thumbnail = xpath_text(
video_data, xpath_with_ns('.//image', NS_MAP), 'thumbnail')
description = self._og_search_description(
webpage) or self._html_search_meta('description', webpage)
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
| unlicense |
PalRist/SmartMirror | OLD/modules/tools/WeatherFunctions.py | 1 | 1212 | import requests
import json
import feedparser
import datetime as dt
# class Weather():
weather_api_token = '05e9697d6d051b6c5073f673544b5418' # create account at https://darksky.net/dev/
weather_latg = 'nb' # see https://darksky.net/dev/docs/forecast for full list of latguage parameters values
weather_unit = 'si' # see https://darksky.net/dev/docs/forecast for full list of unit parameters values
ThisDate = dt.datetime.now().replace(microsecond=0).isoformat()
TempThreshold = 1
def minWeatherAtLocation(latitude, longitude):
weather_req_url = "https://api.darksky.net/forecast/%s/%s,%s,%s?exclude=currently,flags&latg=%s&units=%s" % (weather_api_token, latitude, longitude, ThisDate, weather_latg, weather_unit)
# print(weather_req_url)
r = requests.get( weather_req_url )
weather_obj = json.loads(r.text)
ThisHour = dt.datetime.now().hour
ColdestTemp = 100
# ColdestHour = 25
for hour in range(24):
if hour <= ThisHour:
temperature = float(weather_obj['hourly']['data'][hour]['temperature'])
if temperature <= ColdestTemp:
ColdestTemp = temperature
# ColdestHour = hour
return ColdestTemp#, ColdestHour | mit |
woodymit/millstone_accidental_source | genome_designer/main/xhr_handlers.py | 1 | 45292 | """
Methods that handle Ajax requests from the frontend.
This module was created in response to views.py getting quite big, and so a
reasonable separation point is to separate page actions from Ajax actions.
"""
import copy
import csv
import json
import os
from StringIO import StringIO
import tempfile
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
from django.core.servers.basehttp import FileWrapper
from django.core.urlresolvers import reverse
from django.http import Http404
from django.http import HttpResponse
from django.http import HttpResponseBadRequest
from django.http import StreamingHttpResponse
from django.shortcuts import get_object_or_404
from django.views.decorators.http import require_GET
from django.views.decorators.http import require_POST
#from debug.debug_util import FakeException
from main.adapters import adapt_model_to_frontend
from main.adapters import adapt_experiment_samples_to_frontend
from main.exceptions import ValidationException
from main.model_views import adapt_gene_list_to_frontend
from main.model_views import get_all_fields
from main.model_views import adapt_variant_to_frontend
from main.models import AlignmentGroup
from main.models import Chromosome
from main.models import Dataset
from main.models import ExperimentSample
from main.models import ExperimentSampleToAlignment
from main.models import Project
from main.models import ReferenceGenome
from main.models import SavedVariantFilterQuery
from main.models import VariantCallerCommonData
from main.models import VariantAlternate
from main.models import VariantEvidence
from main.models import VariantSet
from main.models import S3File
from genome_finish import assembly
from utils.combine_reference_genomes import combine_list_allformats
from utils.data_export_util import export_melted_variant_view
from utils.import_util import add_dataset_to_entity
from utils.import_util import copy_and_add_dataset_source
from utils.import_util import create_samples_from_row_data
from utils.import_util import create_sample_models_for_eventual_upload
from utils.import_util import import_reference_genome_from_local_file
from utils.import_util import import_reference_genome_from_ncbi
from utils.import_util import import_samples_from_targets_file
from utils.import_util import import_variant_set_from_vcf
from utils.optmage_util import ReplicationOriginParams
from utils.optmage_util import print_mage_oligos
from utils.reference_genome_maker_util import generate_new_reference_genome
from variants.common import determine_visible_field_names
from variants.filter_key_map_constants import MAP_KEY__ALTERNATE
from variants.filter_key_map_constants import MAP_KEY__COMMON_DATA
from variants.filter_key_map_constants import MAP_KEY__EVIDENCE
from variants.gene_query import lookup_genes
from variants.materialized_variant_filter import lookup_variants
from variants.materialized_view_manager import MeltedVariantMaterializedViewManager
from variants.variant_sets import update_variant_in_set_memberships
from variants.variant_sets import update_variant_in_set_memberships__all_matching_filter
if settings.S3_ENABLED:
from utils.import_util import parse_targets_file, import_reference_genome_from_s3, import_samples_from_s3
from s3 import s3_get_string
@login_required
def project_delete(request, project_uid):
project = get_object_or_404(Project, owner=request.user.get_profile(),
uid=project_uid)
project.delete()
response_data = {'redirect': '/'}
return HttpResponse(json.dumps(response_data),
content_type='application/json')
@login_required
@require_POST
def create_ref_genome_from_browser_upload(request):
"""Handle request to create ReferenceGenome from local file.
"""
project = get_object_or_404(Project, owner=request.user.get_profile(),
uid=request.POST['projectUid'])
uploaded_file = request.FILES['refGenomeFile']
# Save uploaded ReferenceGenome to temp file, passing the original filename
# as the suffix for easier debug.
if not os.path.exists(settings.TEMP_FILE_ROOT):
os.mkdir(settings.TEMP_FILE_ROOT)
_, temp_file_location = tempfile.mkstemp(
suffix='_' + uploaded_file.name,
dir=settings.TEMP_FILE_ROOT)
with open(temp_file_location, 'w') as temp_file_fh:
temp_file_fh.write(request.FILES['refGenomeFile'].read())
error_string = ''
try:
import_reference_genome_from_local_file(
project,
request.POST['refGenomeLabel'],
temp_file_location,
request.POST['importFileFormat'],
move=True)
except Exception as e:
error_string = str(e)
result = {
'error': error_string,
}
return HttpResponse(json.dumps(result), content_type='application/json')
@login_required
@require_POST
def create_ref_genome_from_server_location(request):
"""Handle request to create ReferenceGenome from local file.
"""
project = get_object_or_404(Project, owner=request.user.get_profile(),
uid=request.POST['projectUid'])
error_string = ''
try:
import_reference_genome_from_local_file(
project,
request.POST['refGenomeLabel'],
request.POST['refGenomeFileLocation'],
request.POST['importFileFormat'])
except Exception as e:
error_string = str(e)
result = {
'error': error_string,
}
return HttpResponse(json.dumps(result), content_type='application/json')
@login_required
@require_POST
def create_ref_genome_from_ncbi(request):
"""Handle request to create ReferenceGenome from local file.
"""
project = get_object_or_404(Project, owner=request.user.get_profile(),
uid=request.POST['projectUid'])
error_string = ''
try:
import_reference_genome_from_ncbi(
project,
request.POST['refGenomeLabel'],
request.POST['refGenomeAccession'],
request.POST['importFileFormat'])
except Exception as e:
error_string = str(e)
result = {
'error': error_string,
}
return HttpResponse(json.dumps(result), content_type='application/json')
@login_required
@require_POST
def ref_genomes_delete(request):
"""Deletes ReferenceGenomes.
"""
request_data = json.loads(request.body)
ref_genome_uid_list = request_data.get('refGenomeUidList', [])
if len(ref_genome_uid_list) == 0:
raise Http404
# First make sure all the samples belong to this user.
ref_genomes_to_delete = ReferenceGenome.objects.filter(
project__owner=request.user.get_profile(),
uid__in=ref_genome_uid_list)
if not len(ref_genomes_to_delete) == len(ref_genome_uid_list):
raise Http404
# Validation successful, delete.
ref_genomes_to_delete.delete()
# Return success response.
return HttpResponse(json.dumps({}), content_type='application/json')
@login_required
@require_POST
def ref_genomes_concatenate(request):
"""Concatenates ReferenceGenomes.
"""
request_data = json.loads(request.POST['data'])
ref_genome_uid_list = request_data['refGenomeUidList']
if len(ref_genome_uid_list) == 0:
raise Http404
new_genome_label = request_data['newGenomeLabel']
if len(new_genome_label) == 0:
raise Http404
# First make sure all the samples belong to this user.
ref_genomes_to_concatenate = ReferenceGenome.objects.filter(
project__owner=request.user.get_profile(),
uid__in=ref_genome_uid_list)
if not len(ref_genomes_to_concatenate) == len(ref_genome_uid_list):
raise Http404
# Validation successful, concatenate.
project = ref_genomes_to_concatenate[0].project
return_data = combine_list_allformats(
ref_genomes_to_concatenate, new_genome_label, project)
# Return success response.
return HttpResponse(json.dumps({}), content_type='application/json')
@login_required
@require_GET
def ref_genomes_download(request):
"""Downloads requested fasta/genbank file
"""
file_format = request.GET['file_format']
reference_genome = get_object_or_404(ReferenceGenome,
uid=request.GET['reference_genome_uid'])
if file_format == 'fasta':
file_path = reference_genome.dataset_set.get(
type=Dataset.TYPE.REFERENCE_GENOME_FASTA).get_absolute_location()
file_name = '.'.join([reference_genome.label, 'fa'])
elif file_format == 'genbank':
file_path = reference_genome.dataset_set.get(
type=Dataset.TYPE.REFERENCE_GENOME_GENBANK).get_absolute_location()
file_name = '.'.join([reference_genome.label, 'gb'])
else:
raise Http404
wrapper = FileWrapper(file(file_path))
response = StreamingHttpResponse(wrapper, content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename="{0}"'.format(
file_name)
response['Content-Length'] = os.path.getsize(file_path)
return response
@login_required
@require_POST
def variant_sets_delete(request):
"""Deletes a list of variant sets.
"""
request_data = json.loads(request.body)
variant_set_uid_list = request_data.get('variantSetUidList')
#First make sure all the sets belong to this user.
variant_sets_to_delete = VariantSet.objects.filter(
reference_genome__project__owner=request.user.get_profile(),
uid__in=variant_set_uid_list)
if not len(variant_sets_to_delete) == len(variant_set_uid_list):
raise Http404
#Validation succcessful, delete
variant_sets_to_delete.delete()
#Return success response
return HttpResponse(json.dumps({}), content_type='application/json')
@login_required
@require_POST
def save_variant_filter(request):
# Read params / validate.
project = get_object_or_404(Project, owner=request.user.get_profile(),
uid=request.POST['projectUid'])
filter_text = request.POST.get('filterText', '')
if not filter_text:
raise Http404("Nothing to save.")
# Get or create the new filter.
svfq, _ = SavedVariantFilterQuery.objects.get_or_create(
owner=project.owner,
text=filter_text)
# Return new element in response so it can be rendered.
return HttpResponse(json.dumps({
'savedFilter': {
'uid': svfq.uid,
'text': svfq.text
}
}), content_type='application/json')
@login_required
@require_POST
def delete_variant_filter(request):
# Read params / validate.
project = get_object_or_404(Project, owner=request.user.get_profile(),
uid=request.POST['projectUid'])
filter_uid = request.POST.get('uid')
if not filter_uid:
return HttpResponseBadRequest();
# Get or create the new filter.
SavedVariantFilterQuery.objects.get(
owner=project.owner, uid=filter_uid).delete()
# Return new element in response so it can be rendered.
return HttpResponse(json.dumps({}), content_type='application/json')
@login_required
@require_POST
def upload_single_sample(request):
# Read params / validate.
project = get_object_or_404(Project, owner=request.user.get_profile(),
uid=request.POST['projectUid'])
if not 'fastq1' in request.FILES:
raise Http404
sample_label = request.POST.get('sampleLabel', None)
if not sample_label:
raise Http404
# Save uploaded Samples to temp location.
if not os.path.exists(settings.TEMP_FILE_ROOT):
os.mkdir(settings.TEMP_FILE_ROOT)
fastq1_uploaded_file = request.FILES['fastq1']
if not os.path.exists(settings.TEMP_FILE_ROOT):
os.mkdir(settings.TEMP_FILE_ROOT)
_, fastq1_temp_file_location = tempfile.mkstemp(
suffix='_' + fastq1_uploaded_file.name,
dir=settings.TEMP_FILE_ROOT)
with open(fastq1_temp_file_location, 'w') as temp_file_fh:
temp_file_fh.write(fastq1_uploaded_file.read())
# Maybe handle fastq2.
fastq2_uploaded_file = None
if 'fastq2' in request.FILES:
fastq2_uploaded_file = request.FILES['fastq2']
_, fastq2_temp_file_location = tempfile.mkstemp(
suffix='_' + fastq2_uploaded_file.name,
dir=settings.TEMP_FILE_ROOT)
with open(fastq2_temp_file_location, 'w') as temp_file_fh:
temp_file_fh.write(fastq2_uploaded_file.read())
result = {}
# Create the data structure that the util expects and create samples.
try:
data_source_list = [{
'Sample_Name': sample_label,
'Read_1_Path': fastq1_temp_file_location,
'Read_2_Path': fastq2_temp_file_location
}]
create_samples_from_row_data(project, data_source_list, move=False)
except Exception as e:
result['error'] = str(e)
return HttpResponse(json.dumps(result), content_type='application/json')
@login_required
@require_POST
def create_samples_from_server_location(request):
"""Handle request to create ReferenceGenome from local file.
"""
project = get_object_or_404(Project, owner=request.user.get_profile(),
uid=request.POST.get('projectUid', ''))
try:
import_samples_from_targets_file(
project,
request.FILES['targetsFile'])
except Exception as e:
result = {
'error': str(e)
}
return HttpResponse(json.dumps(result),
content_type='application/json')
return HttpResponse(json.dumps({}), content_type='application/json')
@login_required
@require_GET
def get_samples_awaiting_upload(request):
project = get_object_or_404(Project, owner=request.user.get_profile(),
uid=request.GET.get('projectUid', ''))
existing_sample_dataset_filename_list = [
os.path.split(ds.filesystem_location)[1]
for ds in Dataset.objects.filter(
experimentsample__project=project,
status=Dataset.STATUS.AWAITING_UPLOAD)]
result = {
'sampleFilenameList': existing_sample_dataset_filename_list
}
return HttpResponse(json.dumps(result), content_type='application/json')
@login_required
@require_POST
def samples_upload_through_browser_template(request):
"""Handle request to create ReferenceGenome from local file.
"""
project = get_object_or_404(Project, owner=request.user.get_profile(),
uid=request.POST.get('projectUid', ''))
try:
template_file = request.FILES['file']
except:
result = {
'error': 'Problem receiving file in request.'
}
return HttpResponse(json.dumps(result),
content_type='application/json')
try:
create_sample_models_for_eventual_upload(project, template_file)
except ValidationException as e:
result = {
'error': str(e)
}
return HttpResponse(json.dumps(result),
content_type='application/json')
return HttpResponse(json.dumps({}), content_type='application/json')
@login_required
@require_POST
def samples_upload_through_browser_sample_data(request):
project = get_object_or_404(Project, owner=request.user.get_profile(),
uid=request.POST.get('projectUid', ''))
# Grab the file from the request.
uploaded_file = request.FILES['file']
# Find the Dataset that matches the filename, validating at the same time.
experiment_sample_datasets_in_project = Dataset.objects.filter(
experimentsample__project=project)
datasets_matching_project_and_filename = []
for ds in experiment_sample_datasets_in_project:
expected_filename = os.path.split(ds.filesystem_location)[1]
if expected_filename == uploaded_file.name:
datasets_matching_project_and_filename.append(ds)
if len(datasets_matching_project_and_filename) == 0:
result = {
'error': 'UPLOAD ERROR: '
'Unexpected filename. Are you sure it\'s correct?'
}
return HttpResponse(json.dumps(result),
content_type='application/json')
# If this occurs, this is a bug. The upload should prevent Datasets with
# the same filename for a particular project.
assert len(datasets_matching_project_and_filename) == 1, (
"Each Dataset must have a unique name.")
# Identify the copy destination.
dataset = datasets_matching_project_and_filename[0]
copy_dest = dataset.get_absolute_location()
# Copy the file in chunks.
# TODO: Understand this better. Probably need error handling.
with open(copy_dest, 'w') as dest_fh:
for chunk in uploaded_file.chunks():
dest_fh.write(chunk)
# Update the status.
dataset.status = Dataset.STATUS.READY
dataset.save(update_fields=['status'])
return HttpResponse(json.dumps({}), content_type='application/json')
@login_required
@require_POST
def samples_delete(request):
"""Deletes ExperimentSamples that are not part of an AlignmentGroup.
"""
request_data = json.loads(request.body)
sample_uid_list = request_data.get('sampleUidList', [])
if len(sample_uid_list) == 0:
raise Http404
# First make sure all the samples belong to this user.
samples_to_delete = ExperimentSample.objects.filter(
project__owner=request.user.get_profile(),
uid__in=sample_uid_list)
if not len(samples_to_delete) == len(sample_uid_list):
raise Http404
# Next, make sure none of these samples are part of an AlignmentGroup.
samples_associated_with_alignment = []
for sample in samples_to_delete:
if (ExperimentSampleToAlignment.objects.filter(
experiment_sample=sample).count() > 0):
samples_associated_with_alignment.append(sample)
if len(samples_associated_with_alignment) > 0:
affected_samples = ', '.join([
s.label for s in samples_associated_with_alignment])
error_string = (
'%s associated with an alignment. You must delete '
'all related alignments for a sample before deleting it.' % (
affected_samples))
result = {
'error': error_string,
}
return HttpResponse(json.dumps(result), content_type='application/json')
# Validation successful, delete.
samples_to_delete.delete()
# Return success response.
return HttpResponse(json.dumps({}), content_type='application/json')
# Key in the GET params containing the string for filtering the variants.
VARIANT_LIST_REQUEST_KEY__FILTER_STRING = 'variantFilterString'
VARIANT_LIST_REQUEST_KEY__PROJECT_UID = 'projectUid'
VARIANT_LIST_REQUEST_KEY__REF_GENOME_UID = 'refGenomeUid'
VARIANT_LIST_RESPONSE_KEY__LIST = 'variant_list_json'
VARIANT_LIST_RESPONSE_KEY__TOTAL = 'num_total_variants'
VARIANT_LIST_RESPONSE_KEY__SET_LIST = 'variant_set_list_json'
VARIANT_LIST_RESPONSE_KEY__KEY_MAP = 'variant_key_filter_map_json'
VARIANT_LIST_RESPONSE_KEY__ERROR = 'error'
# Uncomment this and @profile statement to profile. This is the entry point
# to a monster SQL call so leaving this debugging code here commented out is
# useful.
#from debug.profiler import profile
#@profile('profile.log')
@login_required
@require_GET
def get_variant_list(request):
"""Returns a list of Variants, filtered by any filter parameters contained
in the request.
"""
# Parse the GET params.
ref_genome_uid = request.GET.get('refGenomeUid')
project_uid = request.GET.get('projectUid')
maybe_alignment_group_uid = request.GET.get('alignmentGroupUid', None)
# Get models and verify permissions.
reference_genome = get_object_or_404(ReferenceGenome,
project__uid=project_uid, uid=ref_genome_uid)
if maybe_alignment_group_uid:
alignment_group = get_object_or_404(AlignmentGroup,
reference_genome=reference_genome,
uid=maybe_alignment_group_uid)
else:
alignment_group = None
# Dictionary to hold all query specific parameters.
query_args = {}
# Get inputs to perform the query for Variants data.
# TODO: Combine with saved filter string.
query_args['filter_string'] = request.GET.get(
VARIANT_LIST_REQUEST_KEY__FILTER_STRING, '')
# Determine whether melted or cast view.
query_args['is_melted'] = request.GET.get('melt', 0) == '1'
# Get optional column to sort by.
# TODO shouldn't cast a client parameter to int outside of try-catch.
query_args['sortCol'] = int(request.GET.get('iSortCol_0', 0))
query_args['sort_by_direction'] = request.GET.get('sSortDir_0', 'asc')
# Want all results listed, so set count_only to false.
query_args['count_only'] = False
# Pagination.
query_args['pagination_start'] = int(request.GET.get('iDisplayStart', 0))
query_args['pagination_len'] = int(request.GET.get('iDisplayLength', 100))
# Any exception from here should be caused by a malformed query from the
# user and the data should return an error string, rather than throw a 500.
# Of course, it is possible that we have our bugs right now so devs should
# be wary of this big try-except.
try:
field_select_keys = json.loads(request.GET.get(
VARIANT_LIST_REQUEST_KEY__VISIBLE_KEYS, json.dumps([])))
query_args['visible_key_names'] = determine_visible_field_names(
field_select_keys, query_args['filter_string'],
reference_genome)
if query_args['sortCol']: # 1 indexed; 0 means no sort column
all_fields = get_all_fields(
reference_genome, query_args['visible_key_names'],
melted=query_args['is_melted'])
# Get rid of hidden fields for sorting consideration.
all_fields = [field for field in all_fields
if not ('hide' in field and field['hide'])]
if query_args['sortCol'] <= len(all_fields):
query_args['sort_by_column'] = \
all_fields[query_args['sortCol'] - 1]['field']
else:
query_args['sort_by_column'] = ''
else:
query_args['sort_by_column'] = ''
# Get the list of Variants (or melted representation) to display.
lookup_variant_result = lookup_variants(query_args, reference_genome,
alignment_group=alignment_group)
variant_list = lookup_variant_result.result_list
num_total_variants = lookup_variant_result.num_total_variants
# Adapt the Variants to display for the frontend.
variant_list_json = adapt_variant_to_frontend(variant_list,
reference_genome, query_args['visible_key_names'],
melted=query_args['is_melted'])
# Get all VariantSets that exist for this ReferenceGenome.
variant_set_list = VariantSet.objects.filter(
reference_genome=reference_genome)
# Query the keys valid for ReferenceGenome, and mark the ones that
# will be displayed so that the checkmarks in the visible field select
# are pre-filled in case the user wishes to change these.
variant_key_map_with_active_fields_marked = copy.deepcopy(
reference_genome.variant_key_map)
_mark_active_keys_in_variant_key_map(
variant_key_map_with_active_fields_marked,
query_args['visible_key_names'])
# Package up the response.
response_data = {
VARIANT_LIST_RESPONSE_KEY__LIST: variant_list_json,
VARIANT_LIST_RESPONSE_KEY__TOTAL: num_total_variants,
VARIANT_LIST_RESPONSE_KEY__SET_LIST: adapt_model_to_frontend(VariantSet,
obj_list=variant_set_list),
VARIANT_LIST_RESPONSE_KEY__KEY_MAP: json.dumps(
variant_key_map_with_active_fields_marked)
}
# Toggle which of the following exceptions is commented for debugging.
# except FakeException as e:
except Exception as e:
# TODO: More readable error reporting.
exception_as_string = str(type(e)) + ' ' + str(e)
response_data = {
VARIANT_LIST_RESPONSE_KEY__ERROR: exception_as_string
}
return HttpResponse(json.dumps(response_data),
content_type='application/json')
VARIANT_LIST_REQUEST_KEY__VISIBLE_KEYS = 'visibleKeyNames'
def _mark_active_keys_in_variant_key_map(variant_key_map, visible_key_names):
"""Mutates variant_key_map to mark fields that should be active based
on model class defaults.
"""
# In the current implementation, we mark the fields that are included
# in the relevant models' get_field_order() method.
def _update_model_class_key_map(model_class, variant_key_submap):
"""Helper method."""
for key in visible_key_names:
if key in variant_key_submap:
variant_key_submap[key]['checked'] = True
# TODO: Do we want to bring back this old default?
# default_keys = [el['field'] for el in model_class.get_field_order()]
# for key in default_keys:
# if key in variant_key_submap:
# variant_key_submap[key]['checked'] = True
_update_model_class_key_map(VariantCallerCommonData,
variant_key_map[MAP_KEY__COMMON_DATA])
_update_model_class_key_map(VariantAlternate,
variant_key_map[MAP_KEY__ALTERNATE])
_update_model_class_key_map(VariantEvidence,
variant_key_map[MAP_KEY__EVIDENCE])
@login_required
@require_POST
def modify_variant_in_set_membership(request):
"""Action that handles modifying the membership of a Variant in a
VariantSet.
"""
request_data = json.loads(request.body)
# Make sure the required keys are present.
# Validation.
REQUIRED_KEYS = [
'refGenomeUid',
'variantSetAction',
'variantSetUid']
if not all(key in request_data for key in REQUIRED_KEYS):
return HttpResponseBadRequest("Invalid request. Missing keys.")
add_all_matching_filter = False
if ('isAllMatchingFilterSelected' in request_data and
request_data['isAllMatchingFilterSelected']):
add_all_matching_filter = True
else:
if not 'variantUidList' in request_data:
return HttpResponseBadRequest("Invalid request. Missing keys.")
# Get the project and verify that the requesting user has the
# right permissions.
reference_genome = get_object_or_404(ReferenceGenome,
project__owner=request.user.get_profile(),
uid=request_data.get('refGenomeUid'))
# Perform the update.
if add_all_matching_filter:
update_memberships_result = (
update_variant_in_set_memberships__all_matching_filter(
reference_genome,
request_data.get('variantSetAction'),
request_data.get('variantSetUid'),
request_data.get('filterString'),
request_data.get('isMelted')))
else:
update_memberships_result = update_variant_in_set_memberships(
reference_genome,
request_data.get('variantUidList'),
request_data.get('variantSetAction'),
request_data.get('variantSetUid'))
return HttpResponse(json.dumps(update_memberships_result))
@login_required
@require_GET
def get_variant_set_list(request):
if 'refGenomeUid' in request.GET:
# Parse the GET params.
ref_genome_uid = request.GET.get('refGenomeUid')
reference_genome = get_object_or_404(ReferenceGenome,
project__owner=request.user.get_profile(),
uid=ref_genome_uid)
# Grab the VariantSet data.
variant_set_list = VariantSet.objects.filter(
reference_genome=reference_genome)
response_data = {
'variant_set_list_json': adapt_model_to_frontend(VariantSet,
obj_list=variant_set_list)
}
return HttpResponse(json.dumps(response_data),
content_type='application/json')
elif 'projectUid' in request.GET:
project_uid = request.GET.get('projectUid')
# Lookup the model and verify the owner is the user
project = get_object_or_404(Project,
owner=request.user.get_profile(),
uid=project_uid)
response_data = adapt_model_to_frontend(VariantSet,
{'reference_genome__project':project})
return HttpResponse(response_data,
content_type='application/json')
@login_required
@require_GET
def get_samples(request):
project_uid = request.GET.get('projectUid')
# Lookup the model and verify the owner is the user
project = get_object_or_404(Project,
owner=request.user.get_profile(),
uid=project_uid)
response_data = adapt_experiment_samples_to_frontend({'project': project})
return HttpResponse(response_data,
content_type='application/json')
@login_required
def get_gene_list(request):
"""Returns the Gene view data, showing Genes and aggregated counts.
"""
ag_uid = request.GET.get('alignmentGroupUid')
alignment_group = get_object_or_404(AlignmentGroup,
reference_genome__project__owner=request.user.get_profile(),
uid=ag_uid)
gene_view_list = lookup_genes(alignment_group)
response_data = {
'geneList': adapt_gene_list_to_frontend(gene_view_list, alignment_group)
}
return HttpResponse(json.dumps(response_data),
content_type='application/json')
@login_required
@require_GET
def refresh_materialized_variant_table(request):
"""Updates the materialized variant table corresponding to the
ReferenceGenome whose uid is provided in the GET params.
"""
# DEBUG: Profiling refresh time.
# import time
# profiling_time_start = time.time()
ref_genome_uid = request.GET.get('refGenomeUid')
reference_genome = get_object_or_404(ReferenceGenome,
project__owner=request.user.get_profile(),
uid=ref_genome_uid)
# NOTE: Call create() for now. It may be possible to make this quicker
# by calling refresh().
mvmvm = MeltedVariantMaterializedViewManager(reference_genome)
mvmvm.create()
# print 'REFRESH TOOK', time.time() - profiling_time_start
return HttpResponse('ok')
@require_GET
@login_required
def export_variants_as_csv(request):
"""Handles a request to download variants in .csv format.
"""
ref_genome_uid = request.GET.get('ref_genome_uid')
reference_genome = get_object_or_404(ReferenceGenome,
project__owner=request.user.get_profile(),
uid=ref_genome_uid)
filter_string = request.GET.get('filter_string', '')
response = StreamingHttpResponse(
export_melted_variant_view(reference_genome, filter_string),
content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="variants.csv"'
return response
@login_required
@require_GET
def get_alignment_groups(request):
"""Get list of AlignmentGroups for the provided ReferenceGenome uid.
If the request has a refGenomeUid, only return alignments for that
individual reference genome.
If the request has a projectUid, return all alignments for that
project.
TODO(gleb): Clarify in comments why we have these two cases.
"""
if 'refGenomeUid' in request.GET:
# Parse the GET params.
ref_genome_uid = request.GET.get('refGenomeUid')
# Lookup the model and verify the owner is hte user
reference_genome = get_object_or_404(ReferenceGenome,
project__owner=request.user.get_profile(),
uid=ref_genome_uid)
alignment_group_list = AlignmentGroup.objects.filter(
reference_genome=reference_genome).order_by('label')
response_data = [{
'label': ag.label,
'uid': ag.uid
} for ag in alignment_group_list]
return HttpResponse(json.dumps(response_data),
content_type='application/json')
elif 'projectUid' in request.GET:
# Parse the GET params.
project_uid = request.GET.get('projectUid')
# Lookup the model and verify the owner is hte user
project = get_object_or_404(Project,
owner=request.user.get_profile(),
uid=project_uid)
alignment_group_list = AlignmentGroup.objects.filter(
reference_genome__project=project).order_by('-start_time')
response_data = adapt_model_to_frontend(AlignmentGroup,
obj_list=alignment_group_list)
# Add bit to indicate whether any AlignmentGroups are running.
# NOTE: We do this wonky json.loads(), modify, json.dumps() becaause
# adapt_model_to_frontend() has the suboptimal interface of returning
# a json packaged object. It would be better to change this, but would
# require making this change safely everywhere else, but since we are
# lacking test coverage I'm not going to do that right now.
response_data_dict = json.loads(response_data)
response_data_dict['clientShouldRefresh'] = _are_any_alignments_running(
alignment_group_list)
response_data = json.dumps(response_data_dict)
return HttpResponse(response_data,
content_type='application/json')
def _are_any_alignments_running(alignment_group_list):
"""Determines whether any alignments in the list are running.
"""
for ag in alignment_group_list:
if ag.status in AlignmentGroup.PIPELINE_IS_RUNNING_STATUSES:
return True
return False
@login_required
@require_POST
def alignment_groups_delete(request):
"""Deletes AlignmentGroups.
"""
request_data = json.loads(request.body)
uid_list = request_data.get('uidList', [])
if len(uid_list) == 0:
raise Http404
# First make sure all the samples belong to this user.
to_delete = AlignmentGroup.objects.filter(
reference_genome__project__owner=request.user.get_profile(),
uid__in=uid_list)
if not len(to_delete) == len(uid_list):
raise Http404
# Validation successful, delete.
to_delete.delete()
# Return success response.
return HttpResponse(json.dumps({}), content_type='application/json')
@login_required
@require_GET
def is_materialized_view_valid(request):
"""Checks whether the materialized view is valid for this ReferenceGenome.
"""
ref_genome_uid = request.GET.get('refGenomeUid')
reference_genome = get_object_or_404(ReferenceGenome,
project__owner=request.user.get_profile(),
uid=ref_genome_uid)
response_data = json.dumps({
'isValid': reference_genome.is_materialized_variant_view_valid
})
return HttpResponse(response_data, content_type='application/json')
@login_required
@require_GET
def get_ref_genomes(request):
"""Get list of AlignmentGroups for the provided ReferenceGenome uid.
"""
# Parse the GET params.
project_uid = request.GET.get('projectUid')
# Lookup the model and verify the owner is the user
project = get_object_or_404(Project,
owner=request.user.get_profile(),
uid=project_uid)
filters = {'project' : project}
# If hiding de_novo_assemblies, generate a list of uids from non-assemblies
# from the metadata json field of a ReferenceGenome to use as a filter
# when adapting the model to the front end
show_de_novo = int(request.GET.get('showDeNovo', 1))
if not show_de_novo:
uid_list = []
for rg in ReferenceGenome.objects.all():
if not rg.metadata.get('is_from_de_novo_assembly', False):
uid_list.append(rg.uid)
filters['uid__in'] = uid_list
response_data = adapt_model_to_frontend(ReferenceGenome, filters)
return HttpResponse(response_data,
content_type='application/json')
@login_required
@require_GET
def get_single_ref_genome(request):
reference_genome_uid = request.GET.get('referenceGenomeUid')
response_data = adapt_model_to_frontend(Chromosome,
{'reference_genome__uid' : reference_genome_uid})
return HttpResponse(response_data,
content_type='application/json')
@login_required
@require_POST
def create_variant_set(request):
# Get the params.
ref_genome_uid = request.POST.get('refGenomeUid', '')
variant_set_name = request.POST.get('variantSetName', '')
create_set_type = request.POST.get('createSetType', '')
# Basic validation.
try:
assert create_set_type in ['from-file', 'empty']
assert ref_genome_uid != '', "Must provide Reference Genome"
assert variant_set_name != '', "Must provide Variant Set name"
except AssertionError as e:
return HttpResponseBadRequest(str(e))
# Model lookup / validation.
ref_genome = get_object_or_404(ReferenceGenome,
project__owner=request.user.get_profile(),
uid=ref_genome_uid)
# Create new variant set, depending on type of form submitted.
if create_set_type == 'from-file':
result = _create_variant_set_from_file(request, ref_genome,
variant_set_name)
else:
result = _create_variant_set_empty(ref_genome, variant_set_name)
return HttpResponse(json.dumps(result), content_type='application/json')
def _create_variant_set_from_file(request, ref_genome, variant_set_name):
"""Creates a variant set from uploaded vcf file.
Returns:
Dictionary with keys:
* error_str: Either empty string or description of error that occurred
"""
error_string = ''
path = default_storage.save('tmp/tmp_varset.vcf',
ContentFile(request.FILES['vcfFile'].read()))
variant_set_file = os.path.join(settings.MEDIA_ROOT, path)
try:
file_variant_set = import_variant_set_from_vcf(ref_genome, variant_set_name,
variant_set_file)
except Exception as e:
error_string = 'Import error: ' + str(e)
finally:
os.remove(variant_set_file)
result = {
'error': error_string,
}
return result
def _create_variant_set_empty(ref_genome, variant_set_name):
"""Creates an empty variant set.
A VariantSet with the given name can't exist already.
Returns:
Dictionary with keys:
* error_str: Either empty string or description of error that occurred
* variant_set_uid: uid of the new VariantSet
"""
exists_set_with_same_name = bool(VariantSet.objects.filter(
reference_genome=ref_genome,
label=variant_set_name).count())
if exists_set_with_same_name:
error_string = 'Variant set %s exists' % variant_set_name
else:
error_string = ''
empty_variant_set = VariantSet.objects.create(
reference_genome=ref_genome,
label=variant_set_name)
result = {
'error': error_string,
'variantSetUid': empty_variant_set.uid
}
return result
@login_required
def print_mage_oligos_for_variant_set(request):
variant_set_uid = request.GET.get('variantSetUid')
variant_set = get_object_or_404(VariantSet,
reference_genome__project__owner=request.user.get_profile(),
uid=variant_set_uid)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="oligos.csv"'
repliation_origin_params = ReplicationOriginParams(
request.GET.get('repOriginStart'),
request.GET.get('repOriginEnd'),
request.GET.get('repTerminusStart'),
request.GET.get('repTerminusEnd'))
print_mage_oligos(variant_set, response, 'o_', repliation_origin_params,
experiment_dir=request.GET.get('experimentDir'))
return response
@login_required
@require_POST
def generate_new_ref_genome_for_variant_set(request):
variant_set_uid = request.POST.get('variantSetUid')
variant_set = get_object_or_404(VariantSet,
reference_genome__project__owner=request.user.get_profile(),
uid=variant_set_uid)
new_ref_genome_label = request.POST.get('refGenomeLabel')
ref_genome_maker_params = {
'label': new_ref_genome_label
}
error_string = ''
try:
new_ref_genome = generate_new_reference_genome(
variant_set, ref_genome_maker_params)
except ValidationException as e:
error_string = str(e)
if not error_string:
assert new_ref_genome
result = {
'redirect': reverse(
'main.views.reference_genome_view',
args=(new_ref_genome.project.uid, new_ref_genome.uid)),
}
else:
result = {
'error': error_string
}
return HttpResponse(json.dumps(result), content_type='application/json')
@require_GET
@login_required
def generate_contigs(request):
"""
Generates and begins download of a fasta file of contigs assembled from
unmapped and split reads of the passed ExperimentSampleToAlignment
"""
# Retrieve ExperimentSampleToAlignment
experiment_sample_uid = request.GET.get('experiment_sample_uid')
experiment_sample_to_alignment = get_object_or_404(ExperimentSampleToAlignment,
alignment_group__reference_genome__project__owner=request.user.get_profile(),
uid=experiment_sample_uid)
contig_files = assembly.generate_contigs(experiment_sample_to_alignment)
# Select only element in list
contig_file = contig_files[0]
# Start download of contigs fasta file
wrapper = FileWrapper(file(contig_file))
response = StreamingHttpResponse(wrapper, content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename="contigs.fa"'
response['Content-Length'] = os.path.getsize(contig_file)
return response
if settings.S3_ENABLED:
@login_required
def import_reference_genome_s3(request, project_uid):
if request.method == 'POST':
project = get_object_or_404(Project, owner=request.user.get_profile(),
uid=project_uid)
s3file_id = request.POST['s3file_id']
s3file = S3File.objects.get(pk=s3file_id)
import_reference_genome_from_s3(
project,
request.POST['refGenomeLabel'],
s3file,
request.POST['importFileFormat'])
return HttpResponse("", content_type='text/plain')
@login_required
@require_POST
def parse_targets_file_s3(request, project_uid):
project = get_object_or_404(Project, owner=request.user.get_profile(),
uid=project_uid)
s3file_id = request.POST['s3file_id']
s3file = S3File.objects.get(pk=s3file_id)
csv_data = s3_get_string(s3file.key)
csv_io = StringIO(csv_data)
sample_filenames = []
try:
valid_rows = parse_targets_file(csv_io, remove_directory_path=True)
for field_name, field_value in valid_rows.iteritems():
if 'Path' in field_name:
sample_filenames.append(field_value)
except AssertionError as e:
return HttpResponse(json.dumps({
'error': str(e)
}), content_type='application/json')
except:
import traceback
return HttpResponse(json.dumps({
'error': traceback.format_exc()
}), content_type='application/json')
if len(list(set(sample_filenames))) != len(sample_filenames):
return HttpResponse(json.dumps({
'error': "Targets file contains sample files with same names."
}), content_type='application/json')
return HttpResponse(json.dumps({
'targets_file_rows': valid_rows,
'sample_filenames': sample_filenames
}), content_type='application/json')
@login_required
@require_POST
def process_sample_files_s3(request, project_uid):
project = get_object_or_404(Project, owner=request.user.get_profile(),
uid=project_uid)
data = json.loads(request.raw_post_data)
s3files = []
for f in data['sample_files'].values():
s3files.append(S3File.objects.get(pk=int(f['sid'])))
import_samples_from_s3(project, data['targets_file_rows'], s3files)
return HttpResponse(json.dumps({
'targets_file_rows': data['targets_file_rows'],
'sample_files': data['sample_files']
}), content_type='application/json')
| mit |
charbeljc/OCB | addons/resource/faces/timescale.py | 263 | 3899 | ############################################################################
# Copyright (C) 2005 by Reithinger GmbH
# mreithinger@web.de
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
import faces.pcalendar as pcal
import openerp.tools as tools
import datetime
import sys
class TimeScale(object):
def __init__(self, calendar):
self.data_calendar = calendar
self._create_chart_calendar()
self.now = self.to_num(self.data_calendar.now)
def to_datetime(self, xval):
return xval.to_datetime()
def to_num(self, date):
return self.chart_calendar.WorkingDate(date)
def is_free_slot(self, value):
dt1 = self.chart_calendar.to_starttime(value)
dt2 = self.data_calendar.to_starttime\
(self.data_calendar.from_datetime(dt1))
return dt1 != dt2
def is_free_day(self, value):
dt1 = self.chart_calendar.to_starttime(value)
dt2 = self.data_calendar.to_starttime\
(self.data_calendar.from_datetime(dt1))
return dt1.date() != dt2.date()
def _create_chart_calendar(self):
dcal = self.data_calendar
ccal = self.chart_calendar = pcal.Calendar()
ccal.minimum_time_unit = 1
#pad worktime slots of calendar (all days should be equally long)
slot_sum = lambda slots: sum(map(lambda slot: slot[1] - slot[0], slots))
day_sum = lambda day: slot_sum(dcal.get_working_times(day))
max_work_time = max(map(day_sum, range(7)))
#working_time should have 2/3
sum_time = 3 * max_work_time / 2
#now create timeslots for ccal
def create_time_slots(day):
src_slots = dcal.get_working_times(day)
slots = [0, src_slots, 24*60]
slots = tuple(tools.flatten(slots))
slots = zip(slots[:-1], slots[1:])
#balance non working slots
work_time = slot_sum(src_slots)
non_work_time = sum_time - work_time
non_slots = filter(lambda s: s not in src_slots, slots)
non_slots = map(lambda s: (s[1] - s[0], s), non_slots)
non_slots.sort()
slots = []
i = 0
for l, s in non_slots:
delta = non_work_time / (len(non_slots) - i)
delta = min(l, delta)
non_work_time -= delta
slots.append((s[0], s[0] + delta))
i += 1
slots.extend(src_slots)
slots.sort()
return slots
min_delta = sys.maxint
for i in range(7):
slots = create_time_slots(i)
ccal.working_times[i] = slots
min_delta = min(min_delta, min(map(lambda s: s[1] - s[0], slots)))
ccal._recalc_working_time()
self.slot_delta = min_delta
self.day_delta = sum_time
self.week_delta = ccal.week_time
_default_scale = TimeScale(pcal._default_calendar)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
DanLipsitt/flask-sockjs | flask_sockjs/__init__.py | 2 | 1632 | # -*- coding: utf-8 -*-
from observable import Observable
from flask import Blueprint, url_for
from twisted.web.resource import Resource
from txsockjs.utils import broadcast
from txsockjs.factory import SockJSResource
from .endpoint import Endpoint, EndpointProtocol, EndpointFactory
class SockJS(Observable):
def __init__(self, twisted=None, resource=Resource(), url_prefix='ws', url_helper='ws_url_for'):
self.app = None
self.twisted = None
self.resource = resource
self.url_prefix = url_prefix
self.url_helper = url_helper
if twisted is not None:
self.init_twisted(twisted)
def init_twisted(self, twisted):
if self.twisted is None:
self.twisted = twisted
twisted.add_resource(self.url_prefix, self.resource)
twisted.on('run', self.init_app)
def init_app(self, app):
if self.app is None:
self.app = app
blueprint = Blueprint('sock_js', __name__)
blueprint.route("/")(self.ws_url_for)
app.register_blueprint(blueprint, url_prefix='/' + self.url_prefix)
app.jinja_env.globals[self.url_helper] = self.ws_url_for
def ws_url_for(self, endpoint=''):
url = url_for('sock_js.ws_url_for', _external=True)
return url + endpoint
def createEndpoint(self, name, options=None):
endpoint = Endpoint(name)
if options is None:
options = {'encoding': 'utf-8'}
self.resource.putChild(name, SockJSResource(EndpointFactory.forProtocol(EndpointProtocol, endpoint), options))
return endpoint
| mit |
robbiet480/home-assistant | homeassistant/components/pushbullet/sensor.py | 16 | 3912 | """Pushbullet platform for sensor component."""
import logging
import threading
from pushbullet import InvalidKeyError, Listener, PushBullet
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_API_KEY, CONF_MONITORED_CONDITIONS
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = {
"application_name": ["Application name"],
"body": ["Body"],
"notification_id": ["Notification ID"],
"notification_tag": ["Notification tag"],
"package_name": ["Package name"],
"receiver_email": ["Receiver email"],
"sender_email": ["Sender email"],
"source_device_iden": ["Sender device ID"],
"title": ["Title"],
"type": ["Type"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=["title", "body"]): vol.All(
cv.ensure_list, vol.Length(min=1), [vol.In(SENSOR_TYPES)]
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Pushbullet Sensor platform."""
try:
pushbullet = PushBullet(config.get(CONF_API_KEY))
except InvalidKeyError:
_LOGGER.error("Wrong API key for Pushbullet supplied")
return False
pbprovider = PushBulletNotificationProvider(pushbullet)
devices = []
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
devices.append(PushBulletNotificationSensor(pbprovider, sensor_type))
add_entities(devices)
class PushBulletNotificationSensor(Entity):
"""Representation of a Pushbullet Sensor."""
def __init__(self, pb, element):
"""Initialize the Pushbullet sensor."""
self.pushbullet = pb
self._element = element
self._state = None
self._state_attributes = None
def update(self):
"""Fetch the latest data from the sensor.
This will fetch the 'sensor reading' into self._state but also all
attributes into self._state_attributes.
"""
try:
self._state = self.pushbullet.data[self._element]
self._state_attributes = self.pushbullet.data
except (KeyError, TypeError):
pass
@property
def name(self):
"""Return the name of the sensor."""
return f"Pushbullet {self._element}"
@property
def state(self):
"""Return the current state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return all known attributes of the sensor."""
return self._state_attributes
class PushBulletNotificationProvider:
"""Provider for an account, leading to one or more sensors."""
def __init__(self, pb):
"""Start to retrieve pushes from the given Pushbullet instance."""
self.pushbullet = pb
self._data = None
self.listener = None
self.thread = threading.Thread(target=self.retrieve_pushes)
self.thread.daemon = True
self.thread.start()
def on_push(self, data):
"""Update the current data.
Currently only monitors pushes but might be extended to monitor
different kinds of Pushbullet events.
"""
if data["type"] == "push":
self._data = data["push"]
@property
def data(self):
"""Return the current data stored in the provider."""
return self._data
def retrieve_pushes(self):
"""Retrieve_pushes.
Spawn a new Listener and links it to self.on_push.
"""
self.listener = Listener(account=self.pushbullet, on_push=self.on_push)
_LOGGER.debug("Getting pushes")
try:
self.listener.run_forever()
finally:
self.listener.close()
| apache-2.0 |
MattRijk/django-ecomsite | lib/python2.7/site-packages/django/http/utils.py | 134 | 1501 | """
Functions that modify an HTTP request or response in some way.
"""
# This group of functions are run as part of the response handling, after
# everything else, including all response middleware. Think of them as
# "compulsory response middleware". Be careful about what goes here, because
# it's a little fiddly to override this behavior, so they should be truly
# universally applicable.
def fix_location_header(request, response):
"""
Ensures that we always use an absolute URI in any location header in the
response. This is required by RFC 2616, section 14.30.
Code constructing response objects is free to insert relative paths, as
this function converts them to absolute paths.
"""
if 'Location' in response and request.get_host():
response['Location'] = request.build_absolute_uri(response['Location'])
return response
def conditional_content_removal(request, response):
"""
Removes the content of responses for HEAD requests, 1xx, 204 and 304
responses. Ensures compliance with RFC 2616, section 4.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
if response.streaming:
response.streaming_content = []
else:
response.content = b''
response['Content-Length'] = '0'
if request.method == 'HEAD':
if response.streaming:
response.streaming_content = []
else:
response.content = b''
return response
| cc0-1.0 |
chengdh/openerp-ktv | openerp/addons/l10n_es/__openerp__.py | 8 | 2396 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2008-2010 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved.
# Jordi Esteve <jesteve@zikzakmedia.com>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Spanish - Accounting (PGCE 2008)",
"version" : "3.0",
"author" : "Spanish Localization Team",
'website' : 'https://launchpad.net/openerp-spain',
"category" : "Localization/Account Charts",
"description": """
Spanish Charts of Accounts (PGCE 2008).
=======================================
* Defines the following chart of account templates:
* Spanish General Chart of Accounts 2008.
* Spanish General Chart of Accounts 2008 for small and medium companies.
* Defines templates for sale and purchase VAT.
* Defines tax code templates.
Note: You should install the l10n_ES_account_balance_report module
for yearly account reporting (balance, profit & losses).
""",
"license" : "GPL-3",
"depends" : ["account", "base_vat", "base_iban"],
"init_xml" : [
"account_chart.xml",
"taxes_data.xml",
"fiscal_templates.xml",
"account_chart_pymes.xml",
"taxes_data_pymes.xml",
"fiscal_templates_pymes.xml",
"l10n_es_wizard.xml"
],
"demo_xml" : [],
"update_xml" : [
],
"auto_install": False,
"installable": True,
"certificate" : "00408828172062583229",
'images': ['images/config_chart_l10n_es.jpeg','images/l10n_es_chart.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
joongh/robotframework | utest/result/test_configurer.py | 6 | 11532 | from itertools import chain
import unittest
from robot.utils.asserts import assert_equal, assert_raises_with_msg, assert_true
from robot.errors import DataError
from robot.result import Keyword, TestCase, TestSuite
from robot.result.configurer import SuiteConfigurer
SETUP = Keyword.SETUP_TYPE
TEARDOWN = Keyword.TEARDOWN_TYPE
FOR_LOOP = Keyword.FOR_LOOP_TYPE
FOR_ITEM = Keyword.FOR_ITEM_TYPE
class TestSuiteAttributes(unittest.TestCase):
def setUp(self):
self.suite = TestSuite(name='Suite', metadata={'A A': '1', 'bb': '1'})
self.suite.tests.create(name='Make suite non-empty')
def test_name_and_doc(self):
self.suite.visit(SuiteConfigurer(name='New Name', doc='New Doc'))
assert_equal(self.suite.name, 'New Name')
assert_equal(self.suite.doc, 'New Doc')
def test_metadata(self):
self.suite.visit(SuiteConfigurer(metadata={'bb': '2', 'C': '2'}))
assert_equal(self.suite.metadata, {'A A': '1', 'bb': '2', 'C': '2'})
def test_metadata_is_normalized(self):
self.suite.visit(SuiteConfigurer(metadata={'aa': '2', 'B_B': '2'}))
assert_equal(self.suite.metadata, {'A A': '2', 'bb': '2'})
class TestTestAttributes(unittest.TestCase):
def setUp(self):
self.suite = TestSuite()
self.suite.tests = [TestCase()]
self.suite.suites = [TestSuite()]
self.suite.suites[0].tests = [TestCase(tags=['tag'])]
def test_set_tags(self):
self.suite.visit(SuiteConfigurer(set_tags=['new']))
assert_equal(list(self.suite.tests[0].tags), ['new'])
assert_equal(list(self.suite.suites[0].tests[0].tags), ['new', 'tag'])
def test_tags_are_normalized(self):
self.suite.visit(SuiteConfigurer(set_tags=['TAG', '', 't a g', 'NONE']))
assert_equal(list(self.suite.tests[0].tags), ['TAG'])
assert_equal(list(self.suite.suites[0].tests[0].tags), ['tag'])
def test_remove_negative_tags(self):
self.suite.visit(SuiteConfigurer(set_tags=['n', '-TAG']))
assert_equal(list(self.suite.tests[0].tags), ['n'])
assert_equal(list(self.suite.suites[0].tests[0].tags), ['n'])
def test_remove_negative_tags_using_pattern(self):
self.suite.visit(SuiteConfigurer(set_tags=['-t*', '-nomatch']))
assert_equal(list(self.suite.tests[0].tags), [])
assert_equal(list(self.suite.suites[0].tests[0].tags), [])
class TestFiltering(unittest.TestCase):
def setUp(self):
self.suite = TestSuite(name='root')
self.suite.tests = [TestCase(name='n0'), TestCase(name='n1', tags=['t1']),
TestCase(name='n2', tags=['t1', 't2'])]
self.suite.suites.create(name='sub').tests.create(name='n1', tags=['t1'])
def test_include(self):
self.suite.visit(SuiteConfigurer(include_tags=['t1', 'none', '', '?2']))
assert_equal([t.name for t in self.suite.tests], ['n1', 'n2'])
assert_equal([t.name for t in self.suite.suites[0].tests], ['n1'])
def test_exclude(self):
self.suite.visit(SuiteConfigurer(exclude_tags=['t1', '?1ANDt2']))
assert_equal([t.name for t in self.suite.tests], ['n0'])
assert_equal(list(self.suite.suites), [])
def test_include_by_names(self):
self.suite.visit(SuiteConfigurer(include_suites=['s?b', 'xxx'],
include_tests=['', '*1', 'xxx']))
assert_equal(list(self.suite.tests), [])
assert_equal([t.name for t in self.suite.suites[0].tests], ['n1'])
def test_no_matching_tests_with_one_selector_each(self):
configurer = SuiteConfigurer(include_tags='i', exclude_tags='e',
include_suites='s', include_tests='t')
assert_raises_with_msg(DataError,
"Suite 'root' contains no tests with tag 'i', "
"without tag 'e' and named 't' in suite 's'.",
self.suite.visit, configurer)
def test_no_matching_tests_with_multiple_selectors(self):
configurer = SuiteConfigurer(include_tags=['i1', 'i2'],
exclude_tags=['e1', 'e2'],
include_suites=['s1', 's2', 's3'],
include_tests=['t1', 't2'])
assert_raises_with_msg(DataError,
"Suite 'root' contains no tests "
"with tags 'i1' or 'i2', "
"without tags 'e1' or 'e2' and "
"named 't1' or 't2' "
"in suites 's1', 's2' or 's3'.",
self.suite.visit, configurer)
def test_empty_suite(self):
suite = TestSuite(name='x')
suite.visit(SuiteConfigurer(empty_suite_ok=True))
assert_raises_with_msg(DataError,
"Suite 'x' contains no tests.",
suite.visit, SuiteConfigurer())
class TestRemoveKeywords(unittest.TestCase):
def test_remove_all_removes_all(self):
suite = self._suite_with_setup_and_teardown_and_test_with_keywords()
self._remove('ALL', suite)
for keyword in chain(suite.keywords, suite.tests[0].keywords):
self._should_contain_no_messages_or_keywords(keyword)
def test_remove_passed_removes_from_passed_test(self):
suite = TestSuite()
test = suite.tests.create(status='PASS')
test.keywords.create(status='PASS').messages.create(message='keyword message')
test.keywords.create(status='PASS').keywords.create(status='PASS')
self._remove_passed(suite)
for keyword in test.keywords:
self._should_contain_no_messages_or_keywords(keyword)
def test_remove_passed_removes_setup_and_teardown_from_passed_suite(self):
suite = TestSuite()
suite.tests.create(status='PASS')
suite.keywords.create(status='PASS', type=SETUP).keywords.create()
suite.keywords.create(status='PASS', type=TEARDOWN).messages.create(message='message')
self._remove_passed(suite)
for keyword in suite.keywords:
self._should_contain_no_messages_or_keywords(keyword)
def test_remove_passed_does_not_remove_when_test_failed(self):
suite = TestSuite()
test = suite.tests.create(status='FAIL')
test.keywords.create(status='PASS').keywords.create()
test.keywords.create(status='PASS').messages.create(message='message')
failed_keyword = test.keywords.create(status='FAIL')
failed_keyword.messages.create('mess')
failed_keyword.keywords.create()
self._remove_passed(suite)
assert_equal(len(test.keywords[0].keywords), 1)
assert_equal(len(test.keywords[1].messages), 1)
assert_equal(len(test.keywords[2].messages), 1)
assert_equal(len(test.keywords[2].keywords), 1)
def test_remove_passed_does_not_remove_when_test_contains_warning(self):
suite = TestSuite()
test = self._test_with_warning(suite)
self._remove_passed(suite)
assert_equal(len(test.keywords[0].keywords), 1)
assert_equal(len(test.keywords[1].messages), 1)
def _test_with_warning(self, suite):
test = suite.tests.create(status='PASS')
test.keywords.create(status='PASS').keywords.create()
test.keywords.create(status='PASS').messages.create(message='danger!',
level='WARN')
return test
def test_remove_passed_does_not_remove_setup_and_teardown_from_failed_suite(self):
suite = TestSuite()
suite.keywords.create(type=SETUP).messages.create(message='some')
suite.keywords.create(type=TEARDOWN).keywords.create()
suite.tests.create(status='FAIL')
self._remove_passed(suite)
assert_equal(len(suite.keywords.setup.messages), 1)
assert_equal(len(suite.keywords.teardown.keywords), 1)
def test_remove_passed_does_now_remove_setup_and_teardown_from_suite_with_noncritical_failure(self):
suite = TestSuite()
suite.set_criticality([], ['non'])
suite.keywords.create(type=SETUP).messages.create(message='some')
suite.keywords.create(type=TEARDOWN).keywords.create()
suite.tests.create(status='FAIL', tags='non')
assert_equal(suite.status, 'PASS')
self._remove_passed(suite)
assert_equal(len(suite.keywords.setup.messages), 1)
assert_equal(len(suite.keywords.teardown.keywords), 1)
def test_remove_for_removes_passed_items_except_last(self):
suite, forloop = self.suite_with_forloop()
last = forloop.keywords[-1]
self._remove_for_loop(suite)
assert_equal(len(forloop.keywords), 1)
assert_true(forloop.keywords[-1] is last)
def suite_with_forloop(self):
suite = TestSuite()
test = suite.tests.create(status='PASS')
forloop = test.keywords.create(status='PASS', type=FOR_LOOP)
for i in range(100):
forloop.keywords.create(status='PASS',
type=FOR_ITEM).messages.create(
message='something')
return suite, forloop
def test_remove_for_removes_passing_items_when_there_are_failures(self):
suite, forloop = self.suite_with_forloop()
failed = forloop.keywords.create(status='FAIL')
self._remove_for_loop(suite)
assert_equal(len(forloop.keywords), 1)
assert_true(forloop.keywords[-1] is failed)
def test_remove_for_does_not_remove_for_loop_items_with_warnings(self):
suite, forloop = self.suite_with_forloop()
forloop.keywords[2].messages.create(message='danger!', level='WARN')
warn = forloop.keywords[2]
last = forloop.keywords[-1]
self._remove_for_loop(suite)
assert_equal(len(forloop.keywords), 2)
assert_equal(list(forloop.keywords), [warn, last])
def test_remove_based_on_multiple_condition(self):
suite = TestSuite()
t1 = suite.tests.create(status='PASS')
t1.keywords.create().messages.create()
t2 = suite.tests.create(status='FAIL')
t2.keywords.create().messages.create()
t2.keywords.create(type=FOR_LOOP)
for i in range(10):
t2.keywords[1].keywords.create(type=FOR_ITEM, status='PASS')
self._remove(['passed', 'for'], suite)
assert_equal(len(t1.keywords[0].messages), 0)
assert_equal(len(t2.keywords[0].messages), 1)
assert_equal(len(t2.keywords[1].keywords), 1)
def _suite_with_setup_and_teardown_and_test_with_keywords(self):
suite = TestSuite()
suite.keywords.create(type=SETUP).messages.create('setup message')
suite.keywords.create(type=TEARDOWN).messages.create('teardown message')
test = suite.tests.create()
test.keywords.create().keywords.create()
test.keywords.create().messages.create('kw with message')
return suite
def _should_contain_no_messages_or_keywords(self, keyword):
assert_equal(len(keyword.messages), 0)
assert_equal(len(keyword.keywords), 0)
def _remove(self, option, item):
item.visit(SuiteConfigurer(remove_keywords=option))
def _remove_passed(self, item):
self._remove('PASSED', item)
def _remove_for_loop(self, item):
self._remove('FOR', item)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
kevinschoonover/cs2001-cpl | homework/2017-fs-1b-hw04-ksyh3/test/test_view_inator.py | 1 | 3578 | """Tests for view_inator route."""
import json
import random
from http import HTTPStatus
from urllib.parse import urlparse
from utils import from_datetime
def test_login_required(app):
"""Redirect to login if we're not logged in."""
rv = app.get("/view/uuid-goes-here/")
assert rv.status_code == HTTPStatus.FOUND
assert urlparse(rv.location).path == "/login/"
rv = app.get("/view/uuid-goes-here/", follow_redirects=True)
assert b"You must be logged in to access that page." in rv.data
def test_submit_invalid_methods(app):
"""Form submission doesn't work for invalid HTTP methods."""
# Log in
with app.session_transaction() as sess:
sess["username"] = "heinz"
# HEAD and OPTIONS are implemented by Flask. No need to test those.
# DELETE doesn't work
rv = app.delete("/view/beep/")
assert rv.status_code == HTTPStatus.METHOD_NOT_ALLOWED
# PUT doesn't work
rv = app.put("/view/beep/")
assert rv.status_code == HTTPStatus.METHOD_NOT_ALLOWED
# POST doesn't work
rv = app.post("/view/beep/")
assert rv.status_code == HTTPStatus.METHOD_NOT_ALLOWED
def test_load_page(app, inator_data, data_path):
"""Page loads if the user is logged in."""
# Log in
with app.session_transaction() as sess:
sess["username"] = "heinz"
# Save some random inators to the data file
with open(data_path, "w") as data_file:
json.dump({"inators": inator_data}, data_file, default=from_datetime)
# Choose an inator at random
i = random.choice(list(inator_data.values()))
# Try viewing it
rv = app.get("/view/{}/".format(i["ident"]))
assert rv.status_code == HTTPStatus.OK
assert "Details for {}".format(i["name"]).encode("ascii") in rv.data
# Make sure we see all the details for the inator on the page
assert i["ident"].encode("ascii") in rv.data
assert i["name"].encode("ascii") in rv.data
assert i["location"].encode("ascii") in rv.data
assert i["description"].encode("ascii") in rv.data
assert i["added"].strftime("%Y-%m-%d %H:%M:%S").encode("ascii") in rv.data
assert i["condition"].name.encode("ascii") in rv.data
def test_load_invalid(app, inator_data, data_path):
"""Redirected if the URL has an invalid UUID."""
# Log in
with app.session_transaction() as sess:
sess["username"] = "heinz"
# Save some random inators to the data file
with open(data_path, "w") as data_file:
json.dump({"inators": inator_data}, data_file, default=from_datetime)
# Assert that we're redirected to the right place
rv = app.get("/view/bleep-bloop-no-way-you-guys/")
assert rv.status_code == HTTPStatus.FOUND
assert urlparse(rv.location).path == "/"
# Assert that we're flashed the correct message
rv = app.get("/view/bleep-bloop/", follow_redirects=True)
assert b"No such inator with identifier bleep-bloop." in rv.data
def test_load_invalid_no_data(app, inator_data, data_path):
"""Redirected if there's no data in the data file."""
# Log in
with app.session_transaction() as sess:
sess["username"] = "heinz"
# Working with an empty data file this time
# Assert that we're redirected to the right place
rv = app.get("/view/bleep-bloop-no-way-you-guys/")
assert rv.status_code == HTTPStatus.FOUND
assert urlparse(rv.location).path == "/"
# Assert that we're flashed the correct message
rv = app.get("/view/bleep-bloop/", follow_redirects=True)
assert b"No such inator with identifier bleep-bloop." in rv.data
| gpl-3.0 |
jerryge/zulip | tools/deprecated/finbot/money.py | 114 | 7730 | #!/usr/bin/python
import datetime
import monthdelta
def parse_date(date_str):
return datetime.datetime.strptime(date_str, "%Y-%m-%d")
def unparse_date(date_obj):
return date_obj.strftime("%Y-%m-%d")
class Company(object):
def __init__(self, name):
self.name = name
self.flows = []
self.verbose = False
def __str__(self):
return self.name
def add_flow(self, flow):
self.flows.append(flow)
def cash_at_date_internal(self, start_date, end_date):
cash = 0
for flow in self.flows:
delta = flow.cashflow(start_date, end_date, (end_date - start_date).days)
cash += delta
if self.verbose:
print flow.name, round(delta, 2)
return round(cash, 2)
def cash_at_date(self, start, end):
start_date = parse_date(start)
end_date = parse_date(end)
return self.cash_at_date_internal(start_date, end_date)
def cash_monthly_summary(self, start, end):
start_date = parse_date(start)
cur_date = parse_date(start)
end_date = parse_date(end)
while cur_date <= end_date:
print cur_date, self.cash_at_date_internal(start_date, cur_date)
cur_date += monthdelta.MonthDelta(1)
if self.verbose:
print
# CashFlow objects fundamentally just provide a function that says how
# much cash has been spent by that source at each time
#
# The API is that one needs to define a function .cashflow(date)
class CashFlow(object):
def __init__(self, name):
self.name = name
class FixedCost(CashFlow):
def __init__(self, name, amount):
super(FixedCost, self).__init__(name)
self.cost = -amount
def cashflow(self, start, end, days):
return self.cost
class ConstantCost(CashFlow):
def __init__(self, name, amount):
super(ConstantCost, self).__init__(name)
self.rate = -amount
def cashflow(self, start, end, days):
return self.rate * days / 365.
class PeriodicCost(CashFlow):
def __init__(self, name, amount, start, interval):
super(PeriodicCost, self).__init__(name)
self.amount = -amount
self.start = parse_date(start)
self.interval = interval
def cashflow(self, start, end, days):
cur = self.start
delta = 0
while (cur <= end):
if cur >= start:
delta += self.amount
cur += datetime.timedelta(days=self.interval)
return delta
class MonthlyCost(CashFlow):
def __init__(self, name, amount, start):
super(MonthlyCost, self).__init__(name)
self.amount = -amount
self.start = parse_date(start)
def cashflow(self, start, end, days):
cur = self.start
delta = 0
while (cur <= end):
if cur >= start:
delta += self.amount
cur += monthdelta.MonthDelta(1)
return delta
class TotalCost(CashFlow):
def __init__(self, name, *args):
self.name = name
self.flows = args
def cashflow(self, start, end, days):
return sum(cost.cashflow(start, end, days) for cost in self.flows)
class SemiMonthlyCost(TotalCost):
def __init__(self, name, amount, start1, start2 = None):
if start2 is None:
start2 = unparse_date(parse_date(start1) + datetime.timedelta(days=14))
super(SemiMonthlyCost, self).__init__(name,
MonthlyCost(name, amount, start1),
MonthlyCost(name, amount, start2)
)
class SemiMonthlyWagesNoTax(SemiMonthlyCost):
def __init__(self, name, wage, start):
super(SemiMonthlyWagesNoTax, self).__init__(name, self.compute_wage(wage), start)
def compute_wage(self, wage):
return wage / 24.
class SemiMonthlyWages(SemiMonthlyWagesNoTax):
def compute_wage(self, wage):
fica_tax = min(wage, 110100) * 0.062 + wage * 0.0145
unemp_tax = 450
return (wage + fica_tax + unemp_tax) / 24.
def __init__(self, name, wage, start):
super(SemiMonthlyWages, self).__init__(name, wage, start)
class DelayedCost(CashFlow):
def __init__(self, start, base_model):
super(DelayedCost, self).__init__("Delayed")
self.base_model = base_model
self.start = parse_date(start)
def cashflow(self, start, end, days):
start = max(start, self.start)
if start > end:
return 0
time_delta = (end-start).days
return self.base_model.cashflow(start, end, time_delta)
class BiweeklyWagesNoTax(PeriodicCost):
def __init__(self, name, wage, start):
super(BiweeklyWagesNoTax, self).__init__(name, self.compute_wage(wage), start, 14)
def compute_wage(self, wage):
# You would think this calculation would be (wage * 14 /
# 365.24), but you'd be wrong -- companies paying biweekly
# wages overpay by about 0.34% by doing the math this way
return wage / 26.
class BiweeklyWages(BiweeklyWagesNoTax):
def compute_wage(self, wage):
fica_tax = min(wage, 110100) * 0.062 + wage * 0.0145
unemp_tax = 450
# You would think this calculation would be (wage * 14 /
# 365.24), but you'd be wrong -- companies paying biweekly
# wages overpay by about 0.34% by doing the math this way
return (wage + fica_tax + unemp_tax) / 26.
def __init__(self, name, wage, start):
super(BiweeklyWages, self).__init__(name, wage, start)
if __name__ == "__main__":
# Tests
c = Company("Example Inc")
c.add_flow(FixedCost("Initial Cash", -500000))
c.add_flow(FixedCost("Incorporation", 500))
assert(c.cash_at_date("2012-01-01", "2012-03-01") == 500000 - 500)
c.add_flow(FixedCost("Incorporation", -500))
c.add_flow(ConstantCost("Office", 50000))
assert(c.cash_at_date("2012-01-01", "2012-01-02") == 500000 - round(50000*1/365., 2))
c.add_flow(ConstantCost("Office", -50000))
c.add_flow(PeriodicCost("Payroll", 4000, "2012-01-05", 14))
assert(c.cash_at_date("2012-01-01", "2012-01-02") == 500000)
assert(c.cash_at_date("2012-01-01", "2012-01-06") == 500000 - 4000)
c.add_flow(PeriodicCost("Payroll", -4000, "2012-01-05", 14))
c.add_flow(DelayedCost("2012-02-01", ConstantCost("Office", 50000)))
assert(c.cash_at_date("2012-01-01", "2012-01-05") == 500000)
assert(c.cash_at_date("2012-01-01", "2012-02-05") == 500000 - round(50000*4/365., 2))
c.add_flow(DelayedCost("2012-02-01", ConstantCost("Office", -50000)))
c.add_flow(DelayedCost("2012-02-01", FixedCost("Financing", 50000)))
assert(c.cash_at_date("2012-01-01", "2012-01-15") == 500000)
c.add_flow(DelayedCost("2012-02-01", FixedCost("Financing", -50000)))
c.add_flow(SemiMonthlyCost("Payroll", 4000, "2012-01-01"))
assert(c.cash_at_date("2012-01-01", "2012-01-01") == 500000 - 4000)
assert(c.cash_at_date("2012-01-01", "2012-01-14") == 500000 - 4000)
assert(c.cash_at_date("2012-01-01", "2012-01-15") == 500000 - 4000 * 2)
assert(c.cash_at_date("2012-01-01", "2012-01-31") == 500000 - 4000 * 2)
assert(c.cash_at_date("2012-01-01", "2012-02-01") == 500000 - 4000 * 3)
assert(c.cash_at_date("2012-01-01", "2012-02-15") == 500000 - 4000 * 4)
c.add_flow(SemiMonthlyCost("Payroll", -4000, "2012-01-01"))
c.add_flow(SemiMonthlyWages("Payroll", 4000, "2012-01-01"))
assert(c.cash_at_date("2012-01-01", "2012-02-15") == 499207.33)
c.add_flow(SemiMonthlyWages("Payroll", -4000, "2012-01-01"))
print c
c.cash_monthly_summary("2012-01-01", "2012-07-01")
| apache-2.0 |
saurabh6790/med_new_app | stock/doctype/serial_no/serial_no.py | 20 | 12215 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import cint, getdate, cstr, flt, add_days
import datetime
from webnotes import _, ValidationError
from controllers.stock_controller import StockController
class SerialNoCannotCreateDirectError(ValidationError): pass
class SerialNoCannotCannotChangeError(ValidationError): pass
class SerialNoNotRequiredError(ValidationError): pass
class SerialNoRequiredError(ValidationError): pass
class SerialNoQtyError(ValidationError): pass
class SerialNoItemError(ValidationError): pass
class SerialNoWarehouseError(ValidationError): pass
class SerialNoStatusError(ValidationError): pass
class SerialNoNotExistsError(ValidationError): pass
class SerialNoDuplicateError(ValidationError): pass
class DocType(StockController):
def __init__(self, doc, doclist=None):
self.doc = doc
self.doclist = doclist or []
self.via_stock_ledger = False
def validate(self):
if self.doc.fields.get("__islocal") and self.doc.warehouse:
webnotes.throw(_("New Serial No cannot have Warehouse. Warehouse must be \
set by Stock Entry or Purchase Receipt"), SerialNoCannotCreateDirectError)
self.validate_warranty_status()
self.validate_amc_status()
self.validate_warehouse()
self.validate_item()
self.on_stock_ledger_entry()
def validate_amc_status(self):
if (self.doc.maintenance_status == 'Out of AMC' and self.doc.amc_expiry_date and getdate(self.doc.amc_expiry_date) >= datetime.date.today()) or (self.doc.maintenance_status == 'Under AMC' and (not self.doc.amc_expiry_date or getdate(self.doc.amc_expiry_date) < datetime.date.today())):
webnotes.throw(self.doc.name + ": " +
_("AMC expiry date and maintenance status mismatched"))
def validate_warranty_status(self):
if (self.doc.maintenance_status == 'Out of Warranty' and self.doc.warranty_expiry_date and getdate(self.doc.warranty_expiry_date) >= datetime.date.today()) or (self.doc.maintenance_status == 'Under Warranty' and (not self.doc.warranty_expiry_date or getdate(self.doc.warranty_expiry_date) < datetime.date.today())):
webnotes.throw(self.doc.name + ": " +
_("Warranty expiry date and maintenance status mismatched"))
def validate_warehouse(self):
if not self.doc.fields.get("__islocal"):
item_code, warehouse = webnotes.conn.get_value("Serial No",
self.doc.name, ["item_code", "warehouse"])
if item_code != self.doc.item_code:
webnotes.throw(_("Item Code cannot be changed for Serial No."),
SerialNoCannotCannotChangeError)
if not self.via_stock_ledger and warehouse != self.doc.warehouse:
webnotes.throw(_("Warehouse cannot be changed for Serial No."),
SerialNoCannotCannotChangeError)
def validate_item(self):
"""
Validate whether serial no is required for this item
"""
item = webnotes.doc("Item", self.doc.item_code)
if item.has_serial_no!="Yes":
webnotes.throw(_("Item must have 'Has Serial No' as 'Yes'") + ": " + self.doc.item_code)
self.doc.item_group = item.item_group
self.doc.description = item.description
self.doc.item_name = item.item_name
self.doc.brand = item.brand
self.doc.warranty_period = item.warranty_period
def set_status(self, last_sle):
if last_sle:
if last_sle.voucher_type == "Stock Entry":
document_type = webnotes.conn.get_value("Stock Entry", last_sle.voucher_no,
"purpose")
else:
document_type = last_sle.voucher_type
if last_sle.actual_qty > 0:
if document_type == "Sales Return":
self.doc.status = "Sales Returned"
else:
self.doc.status = "Available"
else:
if document_type == "Purchase Return":
self.doc.status = "Purchase Returned"
elif last_sle.voucher_type in ("Delivery Note", "Sales Invoice"):
self.doc.status = "Delivered"
else:
self.doc.status = "Not Available"
def set_purchase_details(self, purchase_sle):
if purchase_sle:
self.doc.purchase_document_type = purchase_sle.voucher_type
self.doc.purchase_document_no = purchase_sle.voucher_no
self.doc.purchase_date = purchase_sle.posting_date
self.doc.purchase_time = purchase_sle.posting_time
self.doc.purchase_rate = purchase_sle.incoming_rate
if purchase_sle.voucher_type == "Purchase Receipt":
self.doc.supplier, self.doc.supplier_name = \
webnotes.conn.get_value("Purchase Receipt", purchase_sle.voucher_no,
["supplier", "supplier_name"])
else:
for fieldname in ("purchase_document_type", "purchase_document_no",
"purchase_date", "purchase_time", "purchase_rate", "supplier", "supplier_name"):
self.doc.fields[fieldname] = None
def set_sales_details(self, delivery_sle):
if delivery_sle:
self.doc.delivery_document_type = delivery_sle.voucher_type
self.doc.delivery_document_no = delivery_sle.voucher_no
self.doc.delivery_date = delivery_sle.posting_date
self.doc.delivery_time = delivery_sle.posting_time
self.doc.customer, self.doc.customer_name = \
webnotes.conn.get_value(delivery_sle.voucher_type, delivery_sle.voucher_no,
["customer", "customer_name"])
if self.doc.warranty_period:
self.doc.warranty_expiry_date = add_days(cstr(delivery_sle.posting_date),
cint(self.doc.warranty_period))
else:
for fieldname in ("delivery_document_type", "delivery_document_no",
"delivery_date", "delivery_time", "customer", "customer_name",
"warranty_expiry_date"):
self.doc.fields[fieldname] = None
def get_last_sle(self):
entries = {}
sle_dict = self.get_stock_ledger_entries()
if sle_dict:
if sle_dict.get("incoming", []):
entries["purchase_sle"] = sle_dict["incoming"][0]
if len(sle_dict.get("incoming", [])) - len(sle_dict.get("outgoing", [])) > 0:
entries["last_sle"] = sle_dict["incoming"][0]
else:
entries["last_sle"] = sle_dict["outgoing"][0]
entries["delivery_sle"] = sle_dict["outgoing"][0]
return entries
def get_stock_ledger_entries(self):
sle_dict = {}
for sle in webnotes.conn.sql("""select * from `tabStock Ledger Entry`
where serial_no like %s and item_code=%s and ifnull(is_cancelled, 'No')='No'
order by posting_date desc, posting_time desc, name desc""",
("%%%s%%" % self.doc.name, self.doc.item_code), as_dict=1):
if self.doc.name.upper() in get_serial_nos(sle.serial_no):
if sle.actual_qty > 0:
sle_dict.setdefault("incoming", []).append(sle)
else:
sle_dict.setdefault("outgoing", []).append(sle)
return sle_dict
def on_trash(self):
if self.doc.status == 'Delivered':
webnotes.throw(_("Delivered Serial No ") + self.doc.name + _(" can not be deleted"))
if self.doc.warehouse:
webnotes.throw(_("Cannot delete Serial No in warehouse. \
First remove from warehouse, then delete.") + ": " + self.doc.name)
def before_rename(self, old, new, merge=False):
if merge:
webnotes.throw(_("Sorry, Serial Nos cannot be merged"))
def after_rename(self, old, new, merge=False):
"""rename serial_no text fields"""
for dt in webnotes.conn.sql("""select parent from tabDocField
where fieldname='serial_no' and fieldtype='Text'"""):
for item in webnotes.conn.sql("""select name, serial_no from `tab%s`
where serial_no like '%%%s%%'""" % (dt[0], old)):
serial_nos = map(lambda i: i==old and new or i, item[1].split('\n'))
webnotes.conn.sql("""update `tab%s` set serial_no = %s
where name=%s""" % (dt[0], '%s', '%s'),
('\n'.join(serial_nos), item[0]))
def on_stock_ledger_entry(self):
if self.via_stock_ledger and not self.doc.fields.get("__islocal"):
last_sle = self.get_last_sle()
if last_sle:
self.set_status(last_sle.get("last_sle"))
self.set_purchase_details(last_sle.get("purchase_sle"))
self.set_sales_details(last_sle.get("delivery_sle"))
def on_communication(self):
return
def process_serial_no(sle):
item_det = get_item_details(sle.item_code)
validate_serial_no(sle, item_det)
update_serial_nos(sle, item_det)
def validate_serial_no(sle, item_det):
if item_det.has_serial_no=="No":
if sle.serial_no:
webnotes.throw(_("Serial Number should be blank for Non Serialized Item" + ": "
+ sle.item_code), SerialNoNotRequiredError)
else:
if sle.serial_no:
serial_nos = get_serial_nos(sle.serial_no)
if cint(sle.actual_qty) != flt(sle.actual_qty):
webnotes.throw(_("Serial No qty cannot be a fraction") + \
(": %s (%s)" % (sle.item_code, sle.actual_qty)))
if len(serial_nos) and len(serial_nos) != abs(cint(sle.actual_qty)):
webnotes.throw(_("Serial Nos do not match with qty") + \
(": %s (%s)" % (sle.item_code, sle.actual_qty)), SerialNoQtyError)
for serial_no in serial_nos:
if webnotes.conn.exists("Serial No", serial_no):
sr = webnotes.bean("Serial No", serial_no)
if sr.doc.item_code!=sle.item_code:
webnotes.throw(_("Serial No does not belong to Item") +
(": %s (%s)" % (sle.item_code, serial_no)), SerialNoItemError)
if sr.doc.warehouse and sle.actual_qty > 0:
webnotes.throw(_("Same Serial No") + ": " + sr.doc.name +
_(" can not be received twice"), SerialNoDuplicateError)
if sle.actual_qty < 0:
if sr.doc.warehouse!=sle.warehouse:
webnotes.throw(_("Serial No") + ": " + serial_no +
_(" does not belong to Warehouse") + ": " + sle.warehouse,
SerialNoWarehouseError)
if sle.voucher_type in ("Delivery Note", "Sales Invoice") \
and sr.doc.status != "Available":
webnotes.throw(_("Serial No status must be 'Available' to Deliver")
+ ": " + serial_no, SerialNoStatusError)
elif sle.actual_qty < 0:
# transfer out
webnotes.throw(_("Serial No must exist to transfer out.") + \
": " + serial_no, SerialNoNotExistsError)
elif sle.actual_qty < 0 or not item_det.serial_no_series:
webnotes.throw(_("Serial Number Required for Serialized Item" + ": "
+ sle.item_code), SerialNoRequiredError)
def update_serial_nos(sle, item_det):
if sle.is_cancelled == "No" and not sle.serial_no and sle.actual_qty > 0 and item_det.serial_no_series:
from webnotes.model.doc import make_autoname
serial_nos = []
for i in xrange(cint(sle.actual_qty)):
serial_nos.append(make_autoname(item_det.serial_no_series))
webnotes.conn.set(sle, "serial_no", "\n".join(serial_nos))
if sle.serial_no:
serial_nos = get_serial_nos(sle.serial_no)
for serial_no in serial_nos:
if webnotes.conn.exists("Serial No", serial_no):
sr = webnotes.bean("Serial No", serial_no)
sr.make_controller().via_stock_ledger = True
sr.doc.warehouse = sle.warehouse if sle.actual_qty > 0 else None
sr.save()
elif sle.actual_qty > 0:
make_serial_no(serial_no, sle)
def get_item_details(item_code):
return webnotes.conn.sql("""select name, has_batch_no, docstatus,
is_stock_item, has_serial_no, serial_no_series
from tabItem where name=%s""", item_code, as_dict=True)[0]
def get_serial_nos(serial_no):
return [s.strip() for s in cstr(serial_no).strip().upper().replace(',', '\n').split('\n')
if s.strip()]
def make_serial_no(serial_no, sle):
sr = webnotes.new_bean("Serial No")
sr.doc.serial_no = serial_no
sr.doc.item_code = sle.item_code
sr.make_controller().via_stock_ledger = True
sr.insert()
sr.doc.warehouse = sle.warehouse
sr.doc.status = "Available"
sr.save()
webnotes.msgprint(_("Serial No created") + ": " + sr.doc.name)
return sr.doc.name
def update_serial_nos_after_submit(controller, parentfield):
stock_ledger_entries = webnotes.conn.sql("""select voucher_detail_no, serial_no
from `tabStock Ledger Entry` where voucher_type=%s and voucher_no=%s""",
(controller.doc.doctype, controller.doc.name), as_dict=True)
if not stock_ledger_entries: return
for d in controller.doclist.get({"parentfield": parentfield}):
serial_no = None
for sle in stock_ledger_entries:
if sle.voucher_detail_no==d.name:
serial_no = sle.serial_no
break
if d.serial_no != serial_no:
d.serial_no = serial_no
webnotes.conn.set_value(d.doctype, d.name, "serial_no", serial_no)
| agpl-3.0 |
rodrigc/buildbot | master/buildbot/test/unit/data/test_builds.py | 5 | 14436 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import mock
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.data import builds
from buildbot.data import resultspec
from buildbot.test import fakedb
from buildbot.test.fake import fakemaster
from buildbot.test.util import endpoint
from buildbot.test.util import interfaces
from buildbot.test.util.misc import TestReactorMixin
from buildbot.util import epoch2datetime
class BuildEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = builds.BuildEndpoint
resourceTypeClass = builds.Build
def setUp(self):
self.setUpEndpoint()
self.db.insertTestData([
fakedb.Builder(id=77, name='builder77'),
fakedb.Master(id=88),
fakedb.Worker(id=13, name='wrk'),
fakedb.Buildset(id=8822),
fakedb.BuildRequest(id=82, buildsetid=8822, builderid=77),
fakedb.Build(id=13, builderid=77, masterid=88, workerid=13,
buildrequestid=82, number=3),
fakedb.Build(id=14, builderid=77, masterid=88, workerid=13,
buildrequestid=82, number=4),
fakedb.Build(id=15, builderid=77, masterid=88, workerid=13,
buildrequestid=82, number=5),
])
def tearDown(self):
self.tearDownEndpoint()
@defer.inlineCallbacks
def test_get_existing(self):
build = yield self.callGet(('builds', 14))
self.validateData(build)
self.assertEqual(build['number'], 4)
@defer.inlineCallbacks
def test_get_missing(self):
build = yield self.callGet(('builds', 9999))
self.assertEqual(build, None)
@defer.inlineCallbacks
def test_get_missing_builder_number(self):
build = yield self.callGet(('builders', 999, 'builds', 4))
self.assertEqual(build, None)
@defer.inlineCallbacks
def test_get_builder_missing_number(self):
build = yield self.callGet(('builders', 77, 'builds', 44))
self.assertEqual(build, None)
@defer.inlineCallbacks
def test_get_builder_number(self):
build = yield self.callGet(('builders', 77, 'builds', 5))
self.validateData(build)
self.assertEqual(build['buildid'], 15)
@defer.inlineCallbacks
def test_get_buildername_number(self):
build = yield self.callGet(('builders', 'builder77', 'builds', 5))
self.validateData(build)
self.assertEqual(build['buildid'], 15)
@defer.inlineCallbacks
def test_get_buildername_not_existing_number(self):
build = yield self.callGet(('builders', 'builder77_nope', 'builds', 5))
self.assertEqual(build, None)
@defer.inlineCallbacks
def test_properties_injection(self):
resultSpec = resultspec.OptimisedResultSpec(
filters=[resultspec.Filter('property', 'eq', [False])])
build = yield self.callGet(('builders', 77, 'builds', 5), resultSpec=resultSpec)
self.validateData(build)
self.assertIn('properties', build)
@defer.inlineCallbacks
def test_action_stop(self):
yield self.callControl("stop", {}, ('builders', 77, 'builds', 5))
self.master.mq.assertProductions(
[(('control', 'builds', '15', 'stop'), {'reason': 'no reason'})])
@defer.inlineCallbacks
def test_action_stop_reason(self):
yield self.callControl("stop", {'reason': 'because'}, ('builders', 77, 'builds', 5))
self.master.mq.assertProductions(
[(('control', 'builds', '15', 'stop'), {'reason': 'because'})])
@defer.inlineCallbacks
def test_action_rebuild(self):
self.patch(self.master.data.updates, "rebuildBuildrequest",
mock.Mock(spec=self.master.data.updates.rebuildBuildrequest,
return_value=(1, [2])))
r = yield self.callControl("rebuild", {}, ('builders', 77, 'builds', 5))
self.assertEqual(r, (1, [2]))
buildrequest = yield self.master.data.get(('buildrequests', 82))
self.master.data.updates.rebuildBuildrequest.assert_called_with(
buildrequest)
class BuildsEndpoint(endpoint.EndpointMixin, unittest.TestCase):
endpointClass = builds.BuildsEndpoint
resourceTypeClass = builds.Build
def setUp(self):
self.setUpEndpoint()
self.db.insertTestData([
fakedb.Builder(id=77, name='builder77'),
fakedb.Builder(id=78, name='builder78'),
fakedb.Builder(id=79, name='builder79'),
fakedb.Master(id=88),
fakedb.Worker(id=13, name='wrk'),
fakedb.Buildset(id=8822),
fakedb.BuildRequest(id=82, buildsetid=8822),
fakedb.Build(id=13, builderid=77, masterid=88, workerid=13,
buildrequestid=82, number=3),
fakedb.Build(id=14, builderid=77, masterid=88, workerid=13,
buildrequestid=82, number=4),
fakedb.Build(id=15, builderid=78, masterid=88, workerid=12,
buildrequestid=83, number=5, complete_at=1),
fakedb.Build(id=16, builderid=79, masterid=88, workerid=12,
buildrequestid=84, number=6, complete_at=1),
])
def tearDown(self):
self.tearDownEndpoint()
@defer.inlineCallbacks
def test_get_all(self):
builds = yield self.callGet(('builds',))
[self.validateData(build) for build in builds]
self.assertEqual(sorted([b['number'] for b in builds]),
[3, 4, 5, 6])
@defer.inlineCallbacks
def test_get_builder(self):
builds = yield self.callGet(('builders', 78, 'builds'))
[self.validateData(build) for build in builds]
self.assertEqual(sorted([b['number'] for b in builds]), [5])
@defer.inlineCallbacks
def test_get_buildername(self):
builds = yield self.callGet(('builders', 'builder78', 'builds'))
[self.validateData(build) for build in builds]
self.assertEqual(sorted([b['number'] for b in builds]), [5])
@defer.inlineCallbacks
def test_get_buildername_not_existing(self):
builds = yield self.callGet(('builders', 'builder78_nope', 'builds'))
self.assertEqual(builds, [])
@defer.inlineCallbacks
def test_get_buildrequest(self):
builds = yield self.callGet(('buildrequests', 82, 'builds'))
[self.validateData(build) for build in builds]
self.assertEqual(sorted([b['number'] for b in builds]), [3, 4])
@defer.inlineCallbacks
def test_get_buildrequest_not_existing(self):
builds = yield self.callGet(('buildrequests', 899, 'builds'))
self.assertEqual(builds, [])
@defer.inlineCallbacks
def test_get_buildrequest_via_filter(self):
resultSpec = resultspec.OptimisedResultSpec(
filters=[resultspec.Filter('buildrequestid', 'eq', [82])])
builds = yield self.callGet(('builds',), resultSpec=resultSpec)
[self.validateData(build) for build in builds]
self.assertEqual(sorted([b['number'] for b in builds]), [3, 4])
@defer.inlineCallbacks
def test_get_buildrequest_via_filter_with_string(self):
resultSpec = resultspec.OptimisedResultSpec(
filters=[resultspec.Filter('buildrequestid', 'eq', ['82'])])
builds = yield self.callGet(('builds',), resultSpec=resultSpec)
[self.validateData(build) for build in builds]
self.assertEqual(sorted([b['number'] for b in builds]), [3, 4])
@defer.inlineCallbacks
def test_get_worker(self):
builds = yield self.callGet(('workers', 13, 'builds'))
[self.validateData(build) for build in builds]
self.assertEqual(sorted([b['number'] for b in builds]), [3, 4])
@defer.inlineCallbacks
def test_get_complete(self):
resultSpec = resultspec.OptimisedResultSpec(
filters=[resultspec.Filter('complete', 'eq', [False])])
builds = yield self.callGet(('builds',), resultSpec=resultSpec)
[self.validateData(build) for build in builds]
self.assertEqual(sorted([b['number'] for b in builds]), [3, 4])
@defer.inlineCallbacks
def test_get_complete_at(self):
resultSpec = resultspec.OptimisedResultSpec(
filters=[resultspec.Filter('complete_at', 'eq', [None])])
builds = yield self.callGet(('builds',), resultSpec=resultSpec)
[self.validateData(build) for build in builds]
self.assertEqual(sorted([b['number'] for b in builds]), [3, 4])
@defer.inlineCallbacks
def test_properties_injection(self):
resultSpec = resultspec.OptimisedResultSpec(
filters=[resultspec.Filter('property', 'eq', [False])])
builds = yield self.callGet(('builds',), resultSpec=resultSpec)
for b in builds:
self.validateData(b)
self.assertIn('properties', b)
@defer.inlineCallbacks
def test_get_filter_eq(self):
resultSpec = resultspec.OptimisedResultSpec(
filters=[resultspec.Filter('builderid', 'eq', [78, 79])])
builds = yield self.callGet(('builds',), resultSpec=resultSpec)
[self.validateData(b) for b in builds]
self.assertEqual(sorted([b['number'] for b in builds]), [5, 6])
@defer.inlineCallbacks
def test_get_filter_ne(self):
resultSpec = resultspec.OptimisedResultSpec(
filters=[resultspec.Filter('builderid', 'ne', [78, 79])])
builds = yield self.callGet(('builds',), resultSpec=resultSpec)
[self.validateData(b) for b in builds]
self.assertEqual(sorted([b['number'] for b in builds]), [3, 4])
class Build(interfaces.InterfaceTests, TestReactorMixin, unittest.TestCase):
new_build_event = {'builderid': 10,
'buildid': 100,
'buildrequestid': 13,
'workerid': 20,
'complete': False,
'complete_at': None,
'masterid': 824,
'number': 1,
'results': None,
'started_at': epoch2datetime(1),
'state_string': 'created',
'properties': {}}
def setUp(self):
self.setUpTestReactor()
self.master = fakemaster.make_master(self, wantMq=True, wantDb=True,
wantData=True)
self.rtype = builds.Build(self.master)
@defer.inlineCallbacks
def do_test_callthrough(self, dbMethodName, method, exp_args=None,
exp_kwargs=None, *args, **kwargs):
rv = (1, 2)
m = mock.Mock(return_value=defer.succeed(rv))
setattr(self.master.db.builds, dbMethodName, m)
res = yield method(*args, **kwargs)
self.assertIdentical(res, rv)
m.assert_called_with(*(exp_args or args), **(exp_kwargs or kwargs))
@defer.inlineCallbacks
def do_test_event(self, method, exp_events=None,
*args, **kwargs):
self.reactor.advance(1)
if exp_events is None:
exp_events = []
yield method(*args, **kwargs)
self.master.mq.assertProductions(exp_events)
def test_signature_addBuild(self):
@self.assertArgSpecMatches(
self.master.data.updates.addBuild, # fake
self.rtype.addBuild) # real
def addBuild(self, builderid, buildrequestid, workerid):
pass
def test_addBuild(self):
return self.do_test_callthrough('addBuild', self.rtype.addBuild,
builderid=10, buildrequestid=13, workerid=20,
exp_kwargs=dict(builderid=10, buildrequestid=13,
workerid=20, masterid=self.master.masterid,
state_string='created'))
def test_addBuildEvent(self):
@defer.inlineCallbacks
def addBuild(*args, **kwargs):
buildid, _ = yield self.rtype.addBuild(*args, **kwargs)
yield self.rtype.generateNewBuildEvent(buildid)
return None
return self.do_test_event(addBuild,
builderid=10, buildrequestid=13, workerid=20,
exp_events=[(('builders', '10', 'builds', '1', 'new'),
self.new_build_event),
(('builds', '100', 'new'),
self.new_build_event),
(('workers', '20', 'builds', '100', 'new'),
self.new_build_event)])
def test_signature_setBuildStateString(self):
@self.assertArgSpecMatches(
self.master.data.updates.setBuildStateString, # fake
self.rtype.setBuildStateString) # real
def setBuildStateString(self, buildid, state_string):
pass
def test_setBuildStateString(self):
return self.do_test_callthrough('setBuildStateString',
self.rtype.setBuildStateString,
buildid=10, state_string='a b')
def test_signature_finishBuild(self):
@self.assertArgSpecMatches(
self.master.data.updates.finishBuild, # fake
self.rtype.finishBuild) # real
def finishBuild(self, buildid, results):
pass
def test_finishBuild(self):
return self.do_test_callthrough('finishBuild', self.rtype.finishBuild,
buildid=15, results=3)
| gpl-2.0 |
ttm/percolation | percolation/measures/text/overall.py | 1 | 3269 | import percolation as P
__doc__ = "functions for analysis of text by isolated functionalities \
or analysis and rendering roadmaps"
def measureAll(authors_messages, sectorialized_agents):
"""Overall text analysis routine, uses all resources
Uses: P.text.aux.textFromAuthors()
P.text.aux.textFromSectors()
Used by: P.renderLegacy.topologicalTextualCharacterization.Analysis()
"""
authors_texts = P.measures.text.aux.textFromAuthors(authors_messages, sectorialized_agents)
authors_measures = {}
# análise de cada mensagem e de cada autor
for author in authors_texts:
authors_measures[author] = {}
texts = authors_texts[author]
authors_measures[author]["raw_strings"] = P.measures.text.raw.analyseAll(texts)
authors_measures[author]["pos"] = P.measures.text.pos.analyseAll(authors_measures[author]["raw_strings"])
authors_measures[author]["wordnet"] = P.measures.text.wordnet.analyseAll(authors_measures[author]["pos"])
authors_measures[author]["tfIdf"] = P.measures.text.tfIdf.analyseAll(texts) # tfIdf de cada texto e do autor, numeric: mean e std das distancias
# análise de cada setor e da estrutura toda
# sectors_texts = P.text.aux.textFromSectors(authors_text,sectorialized_agents)
sectors_measures = {}
for sector in sectorialized_agents:
sectors_measures[sector]["raw_strings"] = P.measures.text.raw.sectorsAnalyseAll(authors_measures, sectorialized_agents[sector])
sectors_measures[sector]["pos"] = P.measures.text.pos.sectorsAnalyseAll(authors_measures, sectorialized_agents[sector])
sectors_measures[sector]["wordnet"] = P.measures.text.wordnet.sectorsAnalyseAll(authors_measures, sectorialized_agents[sector])
# tfIdf de cada texto e de cada autor, numeric: mean e std das distancias por texto e por autor, e media e etd dos autores
sectors_measures[sector]["tfIdf"] = P.measures.text.tfIdf.sectorsAnalyseAll(authors_measures, sectorialized_agents[sector])
# texts = [sectors_texts[i] for i in ("peripherals","intermediaries","hubs")]
# sectors_analysis["raw_strings"] = P.text.raw.analyseAll(texts)
# sectors_analysis["pos"] = P.text.pos.analyseAll(sectors_analysis["raw_analysis"])
# sectors_analysis[ "wordnet" ] = P.text.wordnet.analyseAll(sectors_analysis["pos_analysis"])
# sectors_analysis["tfIdf"] = P.text.tfIdf.tfIdf(texts)
overall_measures = {}
overall_measures["raw_strings"] = P.measures.text.raw.systemAnalysis(sectors_measures) # medias de toda a rede por mensagem, por autor e por setor
overall_measures["pos"] = P.measures.text.raw.systemAnalysis(sectors_measures) # medias de toda a rede por mensagem, por autor e por setor
overall_measures["wordnet"] = P.measures.text.raw.systemAnalysis(sectors_measures) # medias de toda a rede por mensagem, por autor e por setor
# tfIdf measurespor texto, autor e setor, numeric: media e desvio das distancias por cada grupo, media e desvio dos setores e dos autores
overall_measures["tfIdf"] = P.measures.text.tfIdf.systemAnalysis(sectors_measures) # medias de toda a rede por mensagem, por autor e por setor
del authors_texts, sectorialized_agents, author, sector
return locals()
| gpl-3.0 |
hunter007/django | tests/generic_views/test_base.py | 269 | 19854 | from __future__ import unicode_literals
import time
import unittest
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import resolve
from django.http import HttpResponse
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import require_jinja2
from django.views.generic import RedirectView, TemplateView, View
from . import views
class SimpleView(View):
"""
A simple view with a docstring.
"""
def get(self, request):
return HttpResponse('This is a simple view')
class SimplePostView(SimpleView):
post = SimpleView.get
class PostOnlyView(View):
def post(self, request):
return HttpResponse('This view only accepts POST')
class CustomizableView(SimpleView):
parameter = {}
def decorator(view):
view.is_decorated = True
return view
class DecoratedDispatchView(SimpleView):
@decorator
def dispatch(self, request, *args, **kwargs):
return super(DecoratedDispatchView, self).dispatch(request, *args, **kwargs)
class AboutTemplateView(TemplateView):
def get(self, request):
return self.render_to_response({})
def get_template_names(self):
return ['generic_views/about.html']
class AboutTemplateAttributeView(TemplateView):
template_name = 'generic_views/about.html'
def get(self, request):
return self.render_to_response(context={})
class InstanceView(View):
def get(self, request):
return self
class ViewTest(unittest.TestCase):
rf = RequestFactory()
def _assert_simple(self, response):
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'This is a simple view')
def test_no_init_kwargs(self):
"""
Test that a view can't be accidentally instantiated before deployment
"""
try:
SimpleView(key='value').as_view()
self.fail('Should not be able to instantiate a view')
except AttributeError:
pass
def test_no_init_args(self):
"""
Test that a view can't be accidentally instantiated before deployment
"""
try:
SimpleView.as_view('value')
self.fail('Should not be able to use non-keyword arguments instantiating a view')
except TypeError:
pass
def test_pathological_http_method(self):
"""
The edge case of a http request that spoofs an existing method name is caught.
"""
self.assertEqual(SimpleView.as_view()(
self.rf.get('/', REQUEST_METHOD='DISPATCH')
).status_code, 405)
def test_get_only(self):
"""
Test a view which only allows GET doesn't allow other methods.
"""
self._assert_simple(SimpleView.as_view()(self.rf.get('/')))
self.assertEqual(SimpleView.as_view()(self.rf.post('/')).status_code, 405)
self.assertEqual(SimpleView.as_view()(
self.rf.get('/', REQUEST_METHOD='FAKE')
).status_code, 405)
def test_get_and_head(self):
"""
Test a view which supplies a GET method also responds correctly to HEAD.
"""
self._assert_simple(SimpleView.as_view()(self.rf.get('/')))
response = SimpleView.as_view()(self.rf.head('/'))
self.assertEqual(response.status_code, 200)
def test_head_no_get(self):
"""
Test a view which supplies no GET method responds to HEAD with HTTP 405.
"""
response = PostOnlyView.as_view()(self.rf.head('/'))
self.assertEqual(response.status_code, 405)
def test_get_and_post(self):
"""
Test a view which only allows both GET and POST.
"""
self._assert_simple(SimplePostView.as_view()(self.rf.get('/')))
self._assert_simple(SimplePostView.as_view()(self.rf.post('/')))
self.assertEqual(SimplePostView.as_view()(
self.rf.get('/', REQUEST_METHOD='FAKE')
).status_code, 405)
def test_invalid_keyword_argument(self):
"""
Test that view arguments must be predefined on the class and can't
be named like a HTTP method.
"""
# Check each of the allowed method names
for method in SimpleView.http_method_names:
kwargs = dict(((method, "value"),))
self.assertRaises(TypeError, SimpleView.as_view, **kwargs)
# Check the case view argument is ok if predefined on the class...
CustomizableView.as_view(parameter="value")
# ...but raises errors otherwise.
self.assertRaises(TypeError, CustomizableView.as_view, foobar="value")
def test_calling_more_than_once(self):
"""
Test a view can only be called once.
"""
request = self.rf.get('/')
view = InstanceView.as_view()
self.assertNotEqual(view(request), view(request))
def test_class_attributes(self):
"""
Test that the callable returned from as_view() has proper
docstring, name and module.
"""
self.assertEqual(SimpleView.__doc__, SimpleView.as_view().__doc__)
self.assertEqual(SimpleView.__name__, SimpleView.as_view().__name__)
self.assertEqual(SimpleView.__module__, SimpleView.as_view().__module__)
def test_dispatch_decoration(self):
"""
Test that attributes set by decorators on the dispatch method
are also present on the closure.
"""
self.assertTrue(DecoratedDispatchView.as_view().is_decorated)
def test_options(self):
"""
Test that views respond to HTTP OPTIONS requests with an Allow header
appropriate for the methods implemented by the view class.
"""
request = self.rf.options('/')
view = SimpleView.as_view()
response = view(request)
self.assertEqual(200, response.status_code)
self.assertTrue(response['Allow'])
def test_options_for_get_view(self):
"""
Test that a view implementing GET allows GET and HEAD.
"""
request = self.rf.options('/')
view = SimpleView.as_view()
response = view(request)
self._assert_allows(response, 'GET', 'HEAD')
def test_options_for_get_and_post_view(self):
"""
Test that a view implementing GET and POST allows GET, HEAD, and POST.
"""
request = self.rf.options('/')
view = SimplePostView.as_view()
response = view(request)
self._assert_allows(response, 'GET', 'HEAD', 'POST')
def test_options_for_post_view(self):
"""
Test that a view implementing POST allows POST.
"""
request = self.rf.options('/')
view = PostOnlyView.as_view()
response = view(request)
self._assert_allows(response, 'POST')
def _assert_allows(self, response, *expected_methods):
"Assert allowed HTTP methods reported in the Allow response header"
response_allows = set(response['Allow'].split(', '))
self.assertEqual(set(expected_methods + ('OPTIONS',)), response_allows)
def test_args_kwargs_request_on_self(self):
"""
Test a view only has args, kwargs & request once `as_view`
has been called.
"""
bare_view = InstanceView()
view = InstanceView.as_view()(self.rf.get('/'))
for attribute in ('args', 'kwargs', 'request'):
self.assertNotIn(attribute, dir(bare_view))
self.assertIn(attribute, dir(view))
def test_direct_instantiation(self):
"""
It should be possible to use the view by directly instantiating it
without going through .as_view() (#21564).
"""
view = PostOnlyView()
response = view.dispatch(self.rf.head('/'))
self.assertEqual(response.status_code, 405)
@override_settings(ROOT_URLCONF='generic_views.urls')
class TemplateViewTest(SimpleTestCase):
rf = RequestFactory()
def _assert_about(self, response):
response.render()
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<h1>About</h1>')
def test_get(self):
"""
Test a view that simply renders a template on GET
"""
self._assert_about(AboutTemplateView.as_view()(self.rf.get('/about/')))
def test_head(self):
"""
Test a TemplateView responds correctly to HEAD
"""
response = AboutTemplateView.as_view()(self.rf.head('/about/'))
self.assertEqual(response.status_code, 200)
def test_get_template_attribute(self):
"""
Test a view that renders a template on GET with the template name as
an attribute on the class.
"""
self._assert_about(AboutTemplateAttributeView.as_view()(self.rf.get('/about/')))
def test_get_generic_template(self):
"""
Test a completely generic view that renders a template on GET
with the template name as an argument at instantiation.
"""
self._assert_about(TemplateView.as_view(template_name='generic_views/about.html')(self.rf.get('/about/')))
def test_template_name_required(self):
"""
A template view must provide a template name.
"""
self.assertRaises(ImproperlyConfigured, self.client.get, '/template/no_template/')
@require_jinja2
def test_template_engine(self):
"""
A template view may provide a template engine.
"""
request = self.rf.get('/using/')
view = TemplateView.as_view(template_name='generic_views/using.html')
self.assertEqual(view(request).render().content, b'DTL\n')
view = TemplateView.as_view(template_name='generic_views/using.html', template_engine='django')
self.assertEqual(view(request).render().content, b'DTL\n')
view = TemplateView.as_view(template_name='generic_views/using.html', template_engine='jinja2')
self.assertEqual(view(request).render().content, b'Jinja2\n')
def test_template_params(self):
"""
A generic template view passes kwargs as context.
"""
response = self.client.get('/template/simple/bar/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['foo'], 'bar')
self.assertIsInstance(response.context['view'], View)
def test_extra_template_params(self):
"""
A template view can be customized to return extra context.
"""
response = self.client.get('/template/custom/bar/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['foo'], 'bar')
self.assertEqual(response.context['key'], 'value')
self.assertIsInstance(response.context['view'], View)
def test_cached_views(self):
"""
A template view can be cached
"""
response = self.client.get('/template/cached/bar/')
self.assertEqual(response.status_code, 200)
time.sleep(1.0)
response2 = self.client.get('/template/cached/bar/')
self.assertEqual(response2.status_code, 200)
self.assertEqual(response.content, response2.content)
time.sleep(2.0)
# Let the cache expire and test again
response2 = self.client.get('/template/cached/bar/')
self.assertEqual(response2.status_code, 200)
self.assertNotEqual(response.content, response2.content)
def test_content_type(self):
response = self.client.get('/template/content_type/')
self.assertEqual(response['Content-Type'], 'text/plain')
def test_resolve_view(self):
match = resolve('/template/content_type/')
self.assertIs(match.func.view_class, TemplateView)
self.assertEqual(match.func.view_initkwargs['content_type'], 'text/plain')
def test_resolve_login_required_view(self):
match = resolve('/template/login_required/')
self.assertIs(match.func.view_class, TemplateView)
@override_settings(ROOT_URLCONF='generic_views.urls')
class RedirectViewTest(SimpleTestCase):
rf = RequestFactory()
def test_no_url(self):
"Without any configuration, returns HTTP 410 GONE"
response = RedirectView.as_view()(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 410)
def test_default_redirect(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_permanent_redirect(self):
"Permanent redirects are an option"
response = RedirectView.as_view(url='/bar/', permanent=True)(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, '/bar/')
def test_temporary_redirect(self):
"Temporary redirects are an option"
response = RedirectView.as_view(url='/bar/', permanent=False)(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_include_args(self):
"GET arguments can be included in the redirected URL"
response = RedirectView.as_view(url='/bar/')(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
response = RedirectView.as_view(url='/bar/', query_string=True)(self.rf.get('/foo/?pork=spam'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/?pork=spam')
def test_include_urlencoded_args(self):
"GET arguments can be URL-encoded when included in the redirected URL"
response = RedirectView.as_view(url='/bar/', query_string=True)(
self.rf.get('/foo/?unicode=%E2%9C%93'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/?unicode=%E2%9C%93')
def test_parameter_substitution(self):
"Redirection URLs can be parameterized"
response = RedirectView.as_view(url='/bar/%(object_id)d/')(self.rf.get('/foo/42/'), object_id=42)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/42/')
def test_named_url_pattern(self):
"Named pattern parameter should reverse to the matching pattern"
response = RedirectView.as_view(pattern_name='artist_detail')(self.rf.get('/foo/'), pk=1)
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '/detail/artist/1/')
def test_named_url_pattern_using_args(self):
response = RedirectView.as_view(pattern_name='artist_detail')(self.rf.get('/foo/'), 1)
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '/detail/artist/1/')
def test_wrong_named_url_pattern(self):
"A wrong pattern name returns 410 GONE"
response = RedirectView.as_view(pattern_name='wrong.pattern_name')(self.rf.get('/foo/'))
self.assertEqual(response.status_code, 410)
def test_redirect_POST(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.post('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_HEAD(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.head('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_OPTIONS(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.options('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_PUT(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.put('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_PATCH(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.patch('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_DELETE(self):
"Default is a temporary redirect"
response = RedirectView.as_view(url='/bar/')(self.rf.delete('/foo/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, '/bar/')
def test_redirect_when_meta_contains_no_query_string(self):
"regression for #16705"
# we can't use self.rf.get because it always sets QUERY_STRING
response = RedirectView.as_view(url='/bar/')(self.rf.request(PATH_INFO='/foo/'))
self.assertEqual(response.status_code, 302)
def test_direct_instantiation(self):
"""
It should be possible to use the view without going through .as_view()
(#21564).
"""
view = RedirectView()
response = view.dispatch(self.rf.head('/foo/'))
self.assertEqual(response.status_code, 410)
class GetContextDataTest(unittest.TestCase):
def test_get_context_data_super(self):
test_view = views.CustomContextView()
context = test_view.get_context_data(kwarg_test='kwarg_value')
# the test_name key is inserted by the test classes parent
self.assertIn('test_name', context)
self.assertEqual(context['kwarg_test'], 'kwarg_value')
self.assertEqual(context['custom_key'], 'custom_value')
# test that kwarg overrides values assigned higher up
context = test_view.get_context_data(test_name='test_value')
self.assertEqual(context['test_name'], 'test_value')
def test_object_at_custom_name_in_context_data(self):
# Checks 'pony' key presence in dict returned by get_context_date
test_view = views.CustomSingleObjectView()
test_view.context_object_name = 'pony'
context = test_view.get_context_data()
self.assertEqual(context['pony'], test_view.object)
def test_object_in_get_context_data(self):
# Checks 'object' key presence in dict returned by get_context_date #20234
test_view = views.CustomSingleObjectView()
context = test_view.get_context_data()
self.assertEqual(context['object'], test_view.object)
class UseMultipleObjectMixinTest(unittest.TestCase):
rf = RequestFactory()
def test_use_queryset_from_view(self):
test_view = views.CustomMultipleObjectMixinView()
test_view.get(self.rf.get('/'))
# Don't pass queryset as argument
context = test_view.get_context_data()
self.assertEqual(context['object_list'], test_view.queryset)
def test_overwrite_queryset(self):
test_view = views.CustomMultipleObjectMixinView()
test_view.get(self.rf.get('/'))
queryset = [{'name': 'Lennon'}, {'name': 'Ono'}]
self.assertNotEqual(test_view.queryset, queryset)
# Overwrite the view's queryset with queryset from kwarg
context = test_view.get_context_data(object_list=queryset)
self.assertEqual(context['object_list'], queryset)
class SingleObjectTemplateResponseMixinTest(unittest.TestCase):
def test_template_mixin_without_template(self):
"""
We want to makes sure that if you use a template mixin, but forget the
template, it still tells you it's ImproperlyConfigured instead of
TemplateDoesNotExist.
"""
view = views.TemplateResponseWithoutTemplate()
self.assertRaises(ImproperlyConfigured, view.get_template_names)
| bsd-3-clause |
arnif/CouchPotatoServer | libs/sqlalchemy/orm/session.py | 15 | 73776 | # orm/session.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides the Session class and related utilities."""
import weakref
from itertools import chain
from sqlalchemy import util, sql, engine, log, exc as sa_exc
from sqlalchemy.sql import util as sql_util, expression
from sqlalchemy.orm import (
SessionExtension, attributes, exc, query, unitofwork, util as mapperutil, state
)
from sqlalchemy.orm.util import object_mapper as _object_mapper
from sqlalchemy.orm.util import class_mapper as _class_mapper
from sqlalchemy.orm.util import (
_class_to_mapper, _state_mapper,
)
from sqlalchemy.orm.mapper import Mapper, _none_set
from sqlalchemy.orm.unitofwork import UOWTransaction
from sqlalchemy.orm import identity
from sqlalchemy import event
from sqlalchemy.orm.events import SessionEvents
import sys
__all__ = ['Session', 'SessionTransaction', 'SessionExtension']
def sessionmaker(bind=None, class_=None, autoflush=True, autocommit=False,
expire_on_commit=True, **kwargs):
"""Generate a custom-configured :class:`.Session` class.
The returned object is a subclass of :class:`.Session`, which, when instantiated
with no arguments, uses the keyword arguments configured here as its
constructor arguments.
It is intended that the :func:`.sessionmaker()` function be called within the
global scope of an application, and the returned class be made available
to the rest of the application as the single class used to instantiate
sessions.
e.g.::
# global scope
Session = sessionmaker(autoflush=False)
# later, in a local scope, create and use a session:
sess = Session()
Any keyword arguments sent to the constructor itself will override the
"configured" keywords::
Session = sessionmaker()
# bind an individual session to a connection
sess = Session(bind=connection)
The class also includes a special classmethod ``configure()``, which
allows additional configurational options to take place after the custom
``Session`` class has been generated. This is useful particularly for
defining the specific ``Engine`` (or engines) to which new instances of
``Session`` should be bound::
Session = sessionmaker()
Session.configure(bind=create_engine('sqlite:///foo.db'))
sess = Session()
For options, see the constructor options for :class:`.Session`.
"""
kwargs['bind'] = bind
kwargs['autoflush'] = autoflush
kwargs['autocommit'] = autocommit
kwargs['expire_on_commit'] = expire_on_commit
if class_ is None:
class_ = Session
class Sess(object):
def __init__(self, **local_kwargs):
for k in kwargs:
local_kwargs.setdefault(k, kwargs[k])
super(Sess, self).__init__(**local_kwargs)
@classmethod
def configure(self, **new_kwargs):
"""(Re)configure the arguments for this sessionmaker.
e.g.::
Session = sessionmaker()
Session.configure(bind=create_engine('sqlite://'))
"""
kwargs.update(new_kwargs)
return type("SessionMaker", (Sess, class_), {})
class SessionTransaction(object):
"""A Session-level transaction.
This corresponds to one or more Core :class:`~.engine.base.Transaction`
instances behind the scenes, with one :class:`~.engine.base.Transaction`
per :class:`~.engine.base.Engine` in use.
Direct usage of :class:`.SessionTransaction` is not typically
necessary as of SQLAlchemy 0.4; use the :meth:`.Session.rollback` and
:meth:`.Session.commit` methods on :class:`.Session` itself to
control the transaction.
The current instance of :class:`.SessionTransaction` for a given
:class:`.Session` is available via the :attr:`.Session.transaction`
attribute.
The :class:`.SessionTransaction` object is **not** thread-safe.
See also:
:meth:`.Session.rollback`
:meth:`.Session.commit`
:attr:`.Session.is_active`
:meth:`.SessionEvents.after_commit`
:meth:`.SessionEvents.after_rollback`
:meth:`.SessionEvents.after_soft_rollback`
.. index::
single: thread safety; SessionTransaction
"""
_rollback_exception = None
def __init__(self, session, parent=None, nested=False):
self.session = session
self._connections = {}
self._parent = parent
self.nested = nested
self._active = True
self._prepared = False
if not parent and nested:
raise sa_exc.InvalidRequestError(
"Can't start a SAVEPOINT transaction when no existing "
"transaction is in progress")
if self.session._enable_transaction_accounting:
self._take_snapshot()
@property
def is_active(self):
return self.session is not None and self._active
def _assert_is_active(self):
self._assert_is_open()
if not self._active:
if self._rollback_exception:
raise sa_exc.InvalidRequestError(
"This Session's transaction has been rolled back "
"due to a previous exception during flush."
" To begin a new transaction with this Session, "
"first issue Session.rollback()."
" Original exception was: %s"
% self._rollback_exception
)
else:
raise sa_exc.InvalidRequestError(
"This Session's transaction has been rolled back "
"by a nested rollback() call. To begin a new "
"transaction, issue Session.rollback() first."
)
def _assert_is_open(self, error_msg="The transaction is closed"):
if self.session is None:
raise sa_exc.ResourceClosedError(error_msg)
@property
def _is_transaction_boundary(self):
return self.nested or not self._parent
def connection(self, bindkey, **kwargs):
self._assert_is_active()
engine = self.session.get_bind(bindkey, **kwargs)
return self._connection_for_bind(engine)
def _begin(self, nested=False):
self._assert_is_active()
return SessionTransaction(
self.session, self, nested=nested)
def _iterate_parents(self, upto=None):
if self._parent is upto:
return (self,)
else:
if self._parent is None:
raise sa_exc.InvalidRequestError(
"Transaction %s is not on the active transaction list" % (
upto))
return (self,) + self._parent._iterate_parents(upto)
def _take_snapshot(self):
if not self._is_transaction_boundary:
self._new = self._parent._new
self._deleted = self._parent._deleted
return
if not self.session._flushing:
self.session.flush()
self._new = weakref.WeakKeyDictionary()
self._deleted = weakref.WeakKeyDictionary()
def _restore_snapshot(self):
assert self._is_transaction_boundary
for s in set(self._new).union(self.session._new):
self.session._expunge_state(s)
if s.key:
del s.key
for s in set(self._deleted).union(self.session._deleted):
if s.deleted:
#assert s in self._deleted
del s.deleted
self.session._update_impl(s)
assert not self.session._deleted
for s in self.session.identity_map.all_states():
s.expire(s.dict, self.session.identity_map._modified)
def _remove_snapshot(self):
assert self._is_transaction_boundary
if not self.nested and self.session.expire_on_commit:
for s in self.session.identity_map.all_states():
s.expire(s.dict, self.session.identity_map._modified)
def _connection_for_bind(self, bind):
self._assert_is_active()
if bind in self._connections:
return self._connections[bind][0]
if self._parent:
conn = self._parent._connection_for_bind(bind)
if not self.nested:
return conn
else:
if isinstance(bind, engine.Connection):
conn = bind
if conn.engine in self._connections:
raise sa_exc.InvalidRequestError(
"Session already has a Connection associated for the "
"given Connection's Engine")
else:
conn = bind.contextual_connect()
if self.session.twophase and self._parent is None:
transaction = conn.begin_twophase()
elif self.nested:
transaction = conn.begin_nested()
else:
transaction = conn.begin()
self._connections[conn] = self._connections[conn.engine] = \
(conn, transaction, conn is not bind)
self.session.dispatch.after_begin(self.session, self, conn)
return conn
def prepare(self):
if self._parent is not None or not self.session.twophase:
raise sa_exc.InvalidRequestError(
"Only root two phase transactions of can be prepared")
self._prepare_impl()
def _prepare_impl(self):
self._assert_is_active()
if self._parent is None or self.nested:
self.session.dispatch.before_commit(self.session)
stx = self.session.transaction
if stx is not self:
for subtransaction in stx._iterate_parents(upto=self):
subtransaction.commit()
if not self.session._flushing:
self.session.flush()
if self._parent is None and self.session.twophase:
try:
for t in set(self._connections.values()):
t[1].prepare()
except:
self.rollback()
raise
self._deactivate()
self._prepared = True
def commit(self):
self._assert_is_open()
if not self._prepared:
self._prepare_impl()
if self._parent is None or self.nested:
for t in set(self._connections.values()):
t[1].commit()
self.session.dispatch.after_commit(self.session)
if self.session._enable_transaction_accounting:
self._remove_snapshot()
self.close()
return self._parent
def rollback(self, _capture_exception=False):
self._assert_is_open()
stx = self.session.transaction
if stx is not self:
for subtransaction in stx._iterate_parents(upto=self):
subtransaction.close()
if self.is_active or self._prepared:
for transaction in self._iterate_parents():
if transaction._parent is None or transaction.nested:
transaction._rollback_impl()
transaction._deactivate()
break
else:
transaction._deactivate()
sess = self.session
if self.session._enable_transaction_accounting and \
not sess._is_clean():
# if items were added, deleted, or mutated
# here, we need to re-restore the snapshot
util.warn(
"Session's state has been changed on "
"a non-active transaction - this state "
"will be discarded.")
self._restore_snapshot()
self.close()
if self._parent and _capture_exception:
self._parent._rollback_exception = sys.exc_info()[1]
sess.dispatch.after_soft_rollback(sess, self)
return self._parent
def _rollback_impl(self):
for t in set(self._connections.values()):
t[1].rollback()
if self.session._enable_transaction_accounting:
self._restore_snapshot()
self.session.dispatch.after_rollback(self.session)
def _deactivate(self):
self._active = False
def close(self):
self.session.transaction = self._parent
if self._parent is None:
for connection, transaction, autoclose in \
set(self._connections.values()):
if autoclose:
connection.close()
else:
transaction.close()
if not self.session.autocommit:
self.session.begin()
self._deactivate()
self.session = None
self._connections = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._assert_is_open("Cannot end transaction context. The transaction "
"was closed from within the context")
if self.session.transaction is None:
return
if type is None:
try:
self.commit()
except:
self.rollback()
raise
else:
self.rollback()
class Session(object):
"""Manages persistence operations for ORM-mapped objects.
The Session's usage paradigm is described at :ref:`session_toplevel`.
"""
public_methods = (
'__contains__', '__iter__', 'add', 'add_all', 'begin', 'begin_nested',
'close', 'commit', 'connection', 'delete', 'execute', 'expire',
'expire_all', 'expunge', 'expunge_all', 'flush', 'get_bind',
'is_modified',
'merge', 'query', 'refresh', 'rollback',
'scalar')
def __init__(self, bind=None, autoflush=True, expire_on_commit=True,
_enable_transaction_accounting=True,
autocommit=False, twophase=False,
weak_identity_map=True, binds=None, extension=None,
query_cls=query.Query):
"""Construct a new Session.
See also the :func:`.sessionmaker` function which is used to
generate a :class:`.Session`-producing callable with a given
set of arguments.
:param autocommit: Defaults to ``False``. When ``True``, the ``Session``
does not keep a persistent transaction running, and will acquire
connections from the engine on an as-needed basis, returning them
immediately after their use. Flushes will begin and commit (or possibly
rollback) their own transaction if no transaction is present. When using
this mode, the `session.begin()` method may be used to begin a
transaction explicitly.
Leaving it on its default value of ``False`` means that the ``Session``
will acquire a connection and begin a transaction the first time it is
used, which it will maintain persistently until ``rollback()``,
``commit()``, or ``close()`` is called. When the transaction is released
by any of these methods, the ``Session`` is ready for the next usage,
which will again acquire and maintain a new connection/transaction.
:param autoflush: When ``True``, all query operations will issue a
``flush()`` call to this ``Session`` before proceeding. This is a
convenience feature so that ``flush()`` need not be called repeatedly
in order for database queries to retrieve results. It's typical that
``autoflush`` is used in conjunction with ``autocommit=False``. In this
scenario, explicit calls to ``flush()`` are rarely needed; you usually
only need to call ``commit()`` (which flushes) to finalize changes.
:param bind: An optional ``Engine`` or ``Connection`` to which this
``Session`` should be bound. When specified, all SQL operations
performed by this session will execute via this connectable.
:param binds: An optional dictionary which contains more granular "bind"
information than the ``bind`` parameter provides. This dictionary can
map individual ``Table`` instances as well as ``Mapper`` instances to
individual ``Engine`` or ``Connection`` objects. Operations which
proceed relative to a particular ``Mapper`` will consult this
dictionary for the direct ``Mapper`` instance as well as the mapper's
``mapped_table`` attribute in order to locate an connectable to use.
The full resolution is described in the ``get_bind()`` method of
``Session``. Usage looks like::
Session = sessionmaker(binds={
SomeMappedClass: create_engine('postgresql://engine1'),
somemapper: create_engine('postgresql://engine2'),
some_table: create_engine('postgresql://engine3'),
})
Also see the :meth:`.Session.bind_mapper` and :meth:`.Session.bind_table` methods.
:param \class_: Specify an alternate class other than
``sqlalchemy.orm.session.Session`` which should be used by the returned
class. This is the only argument that is local to the
``sessionmaker()`` function, and is not sent directly to the
constructor for ``Session``.
:param _enable_transaction_accounting: Defaults to ``True``. A
legacy-only flag which when ``False`` disables *all* 0.5-style object
accounting on transaction boundaries, including auto-expiry of
instances on rollback and commit, maintenance of the "new" and
"deleted" lists upon rollback, and autoflush of pending changes upon
begin(), all of which are interdependent.
:param expire_on_commit: Defaults to ``True``. When ``True``, all
instances will be fully expired after each ``commit()``, so that all
attribute/object access subsequent to a completed transaction will load
from the most recent database state.
:param extension: An optional
:class:`~.SessionExtension` instance, or a list
of such instances, which will receive pre- and post- commit and flush
events, as well as a post-rollback event. **Deprecated.**
Please see :class:`.SessionEvents`.
:param query_cls: Class which should be used to create new Query objects,
as returned by the ``query()`` method. Defaults to
:class:`~sqlalchemy.orm.query.Query`.
:param twophase: When ``True``, all transactions will be started as
a "two phase" transaction, i.e. using the "two phase" semantics
of the database in use along with an XID. During a ``commit()``,
after ``flush()`` has been issued for all attached databases, the
``prepare()`` method on each database's ``TwoPhaseTransaction`` will
be called. This allows each database to roll back the entire
transaction, before each transaction is committed.
:param weak_identity_map: Defaults to ``True`` - when set to
``False``, objects placed in the :class:`.Session` will be
strongly referenced until explicitly removed or the
:class:`.Session` is closed. **Deprecated** - this option
is obsolete.
"""
if weak_identity_map:
self._identity_cls = identity.WeakInstanceDict
else:
util.warn_deprecated("weak_identity_map=False is deprecated. "
"This feature is not needed.")
self._identity_cls = identity.StrongInstanceDict
self.identity_map = self._identity_cls()
self._new = {} # InstanceState->object, strong refs object
self._deleted = {} # same
self.bind = bind
self.__binds = {}
self._flushing = False
self.transaction = None
self.hash_key = _new_sessionid()
self.autoflush = autoflush
self.autocommit = autocommit
self.expire_on_commit = expire_on_commit
self._enable_transaction_accounting = _enable_transaction_accounting
self.twophase = twophase
self._query_cls = query_cls
if extension:
for ext in util.to_list(extension):
SessionExtension._adapt_listener(self, ext)
if binds is not None:
for mapperortable, bind in binds.iteritems():
if isinstance(mapperortable, (type, Mapper)):
self.bind_mapper(mapperortable, bind)
else:
self.bind_table(mapperortable, bind)
if not self.autocommit:
self.begin()
_sessions[self.hash_key] = self
dispatch = event.dispatcher(SessionEvents)
connection_callable = None
transaction = None
"""The current active or inactive :class:`.SessionTransaction`."""
def begin(self, subtransactions=False, nested=False):
"""Begin a transaction on this Session.
If this Session is already within a transaction, either a plain
transaction or nested transaction, an error is raised, unless
``subtransactions=True`` or ``nested=True`` is specified.
The ``subtransactions=True`` flag indicates that this :meth:`~.Session.begin`
can create a subtransaction if a transaction is already in progress.
For documentation on subtransactions, please see :ref:`session_subtransactions`.
The ``nested`` flag begins a SAVEPOINT transaction and is equivalent
to calling :meth:`~.Session.begin_nested`. For documentation on SAVEPOINT
transactions, please see :ref:`session_begin_nested`.
"""
if self.transaction is not None:
if subtransactions or nested:
self.transaction = self.transaction._begin(
nested=nested)
else:
raise sa_exc.InvalidRequestError(
"A transaction is already begun. Use subtransactions=True "
"to allow subtransactions.")
else:
self.transaction = SessionTransaction(
self, nested=nested)
return self.transaction # needed for __enter__/__exit__ hook
def begin_nested(self):
"""Begin a `nested` transaction on this Session.
The target database(s) must support SQL SAVEPOINTs or a
SQLAlchemy-supported vendor implementation of the idea.
For documentation on SAVEPOINT
transactions, please see :ref:`session_begin_nested`.
"""
return self.begin(nested=True)
def rollback(self):
"""Rollback the current transaction in progress.
If no transaction is in progress, this method is a pass-through.
This method rolls back the current transaction or nested transaction
regardless of subtransactions being in effect. All subtransactions up
to the first real transaction are closed. Subtransactions occur when
begin() is called multiple times.
"""
if self.transaction is None:
pass
else:
self.transaction.rollback()
def commit(self):
"""Flush pending changes and commit the current transaction.
If no transaction is in progress, this method raises an
InvalidRequestError.
By default, the :class:`.Session` also expires all database
loaded state on all ORM-managed attributes after transaction commit.
This so that subsequent operations load the most recent
data from the database. This behavior can be disabled using
the ``expire_on_commit=False`` option to :func:`.sessionmaker` or
the :class:`.Session` constructor.
If a subtransaction is in effect (which occurs when begin() is called
multiple times), the subtransaction will be closed, and the next call
to ``commit()`` will operate on the enclosing transaction.
For a session configured with autocommit=False, a new transaction will
be begun immediately after the commit, but note that the newly begun
transaction does *not* use any connection resources until the first
SQL is actually emitted.
"""
if self.transaction is None:
if not self.autocommit:
self.begin()
else:
raise sa_exc.InvalidRequestError("No transaction is begun.")
self.transaction.commit()
def prepare(self):
"""Prepare the current transaction in progress for two phase commit.
If no transaction is in progress, this method raises an
InvalidRequestError.
Only root transactions of two phase sessions can be prepared. If the
current transaction is not such, an InvalidRequestError is raised.
"""
if self.transaction is None:
if not self.autocommit:
self.begin()
else:
raise sa_exc.InvalidRequestError("No transaction is begun.")
self.transaction.prepare()
def connection(self, mapper=None, clause=None,
bind=None,
close_with_result=False,
**kw):
"""Return a :class:`.Connection` object corresponding to this
:class:`.Session` object's transactional state.
If this :class:`.Session` is configured with ``autocommit=False``,
either the :class:`.Connection` corresponding to the current transaction
is returned, or if no transaction is in progress, a new one is begun
and the :class:`.Connection` returned (note that no transactional state
is established with the DBAPI until the first SQL statement is emitted).
Alternatively, if this :class:`.Session` is configured with ``autocommit=True``,
an ad-hoc :class:`.Connection` is returned using :meth:`.Engine.contextual_connect`
on the underlying :class:`.Engine`.
Ambiguity in multi-bind or unbound :class:`.Session` objects can be resolved through
any of the optional keyword arguments. This ultimately makes usage of the
:meth:`.get_bind` method for resolution.
:param bind:
Optional :class:`.Engine` to be used as the bind. If
this engine is already involved in an ongoing transaction,
that connection will be used. This argument takes precedence
over ``mapper``, ``clause``.
:param mapper:
Optional :func:`.mapper` mapped class, used to identify
the appropriate bind. This argument takes precedence over
``clause``.
:param clause:
A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`,
:func:`~.sql.expression.text`,
etc.) which will be used to locate a bind, if a bind
cannot otherwise be identified.
:param close_with_result: Passed to :meth:`Engine.connect`, indicating
the :class:`.Connection` should be considered "single use", automatically
closing when the first result set is closed. This flag only has
an effect if this :class:`.Session` is configured with ``autocommit=True``
and does not already have a transaction in progress.
:param \**kw:
Additional keyword arguments are sent to :meth:`get_bind()`,
allowing additional arguments to be passed to custom
implementations of :meth:`get_bind`.
"""
if bind is None:
bind = self.get_bind(mapper, clause=clause, **kw)
return self._connection_for_bind(bind,
close_with_result=close_with_result)
def _connection_for_bind(self, engine, **kwargs):
if self.transaction is not None:
return self.transaction._connection_for_bind(engine)
else:
return engine.contextual_connect(**kwargs)
def execute(self, clause, params=None, mapper=None, bind=None, **kw):
"""Execute a clause within the current transaction.
Returns a :class:`.ResultProxy` representing
results of the statement execution, in the same manner as that of an
:class:`.Engine` or
:class:`.Connection`.
:meth:`~.Session.execute` accepts any executable clause construct, such
as :func:`~.sql.expression.select`,
:func:`~.sql.expression.insert`,
:func:`~.sql.expression.update`,
:func:`~.sql.expression.delete`, and
:func:`~.sql.expression.text`, and additionally accepts
plain strings that represent SQL statements. If a plain string is
passed, it is first converted to a
:func:`~.sql.expression.text` construct, which here means
that bind parameters should be specified using the format ``:param``.
If raw DBAPI statement execution is desired, use :meth:`.Session.connection`
to acquire a :class:`.Connection`, then call its :meth:`~.Connection.execute`
method.
The statement is executed within the current transactional context of
this :class:`.Session`, using the same behavior as that of
the :meth:`.Session.connection` method to determine the active
:class:`.Connection`. The ``close_with_result`` flag is
set to ``True`` so that an ``autocommit=True`` :class:`.Session`
with no active transaction will produce a result that auto-closes
the underlying :class:`.Connection`.
:param clause:
A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`,
:func:`~.sql.expression.text`, etc.) or string SQL statement to be executed. The clause
will also be used to locate a bind, if this :class:`.Session`
is not bound to a single engine already, and the ``mapper``
and ``bind`` arguments are not passed.
:param params:
Optional dictionary of bind names mapped to values.
:param mapper:
Optional :func:`.mapper` or mapped class, used to identify
the appropriate bind. This argument takes precedence over
``clause`` when locating a bind.
:param bind:
Optional :class:`.Engine` to be used as the bind. If
this engine is already involved in an ongoing transaction,
that connection will be used. This argument takes
precedence over ``mapper`` and ``clause`` when locating
a bind.
:param \**kw:
Additional keyword arguments are sent to :meth:`get_bind()`,
allowing additional arguments to be passed to custom
implementations of :meth:`get_bind`.
"""
clause = expression._literal_as_text(clause)
if bind is None:
bind = self.get_bind(mapper, clause=clause, **kw)
return self._connection_for_bind(bind, close_with_result=True).execute(
clause, params or {})
def scalar(self, clause, params=None, mapper=None, bind=None, **kw):
"""Like :meth:`~.Session.execute` but return a scalar result."""
return self.execute(clause, params=params, mapper=mapper, bind=bind, **kw).scalar()
def close(self):
"""Close this Session.
This clears all items and ends any transaction in progress.
If this session were created with ``autocommit=False``, a new
transaction is immediately begun. Note that this new transaction does
not use any connection resources until they are first needed.
"""
self.expunge_all()
if self.transaction is not None:
for transaction in self.transaction._iterate_parents():
transaction.close()
@classmethod
def close_all(cls):
"""Close *all* sessions in memory."""
for sess in _sessions.values():
sess.close()
def expunge_all(self):
"""Remove all object instances from this ``Session``.
This is equivalent to calling ``expunge(obj)`` on all objects in this
``Session``.
"""
for state in self.identity_map.all_states() + list(self._new):
state.detach()
self.identity_map = self._identity_cls()
self._new = {}
self._deleted = {}
# TODO: need much more test coverage for bind_mapper() and similar !
# TODO: + crystalize + document resolution order vis. bind_mapper/bind_table
def bind_mapper(self, mapper, bind):
"""Bind operations for a mapper to a Connectable.
mapper
A mapper instance or mapped class
bind
Any Connectable: a ``Engine`` or ``Connection``.
All subsequent operations involving this mapper will use the given
`bind`.
"""
if isinstance(mapper, type):
mapper = _class_mapper(mapper)
self.__binds[mapper.base_mapper] = bind
for t in mapper._all_tables:
self.__binds[t] = bind
def bind_table(self, table, bind):
"""Bind operations on a Table to a Connectable.
table
A ``Table`` instance
bind
Any Connectable: a ``Engine`` or ``Connection``.
All subsequent operations involving this ``Table`` will use the
given `bind`.
"""
self.__binds[table] = bind
def get_bind(self, mapper=None, clause=None):
"""Return a "bind" to which this :class:`.Session` is bound.
The "bind" is usually an instance of :class:`.Engine`,
except in the case where the :class:`.Session` has been
explicitly bound directly to a :class:`.Connection`.
For a multiply-bound or unbound :class:`.Session`, the
``mapper`` or ``clause`` arguments are used to determine the
appropriate bind to return.
Note that the "mapper" argument is usually present
when :meth:`.Session.get_bind` is called via an ORM
operation such as a :meth:`.Session.query`, each
individual INSERT/UPDATE/DELETE operation within a
:meth:`.Session.flush`, call, etc.
The order of resolution is:
1. if mapper given and session.binds is present,
locate a bind based on mapper.
2. if clause given and session.binds is present,
locate a bind based on :class:`.Table` objects
found in the given clause present in session.binds.
3. if session.bind is present, return that.
4. if clause given, attempt to return a bind
linked to the :class:`.MetaData` ultimately
associated with the clause.
5. if mapper given, attempt to return a bind
linked to the :class:`.MetaData` ultimately
associated with the :class:`.Table` or other
selectable to which the mapper is mapped.
6. No bind can be found, :class:`.UnboundExecutionError`
is raised.
:param mapper:
Optional :func:`.mapper` mapped class or instance of
:class:`.Mapper`. The bind can be derived from a :class:`.Mapper`
first by consulting the "binds" map associated with this
:class:`.Session`, and secondly by consulting the :class:`.MetaData`
associated with the :class:`.Table` to which the :class:`.Mapper`
is mapped for a bind.
:param clause:
A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`,
:func:`~.sql.expression.text`,
etc.). If the ``mapper`` argument is not present or could not produce
a bind, the given expression construct will be searched for a bound
element, typically a :class:`.Table` associated with bound
:class:`.MetaData`.
"""
if mapper is clause is None:
if self.bind:
return self.bind
else:
raise sa_exc.UnboundExecutionError(
"This session is not bound to a single Engine or "
"Connection, and no context was provided to locate "
"a binding.")
c_mapper = mapper is not None and _class_to_mapper(mapper) or None
# manually bound?
if self.__binds:
if c_mapper:
if c_mapper.base_mapper in self.__binds:
return self.__binds[c_mapper.base_mapper]
elif c_mapper.mapped_table in self.__binds:
return self.__binds[c_mapper.mapped_table]
if clause is not None:
for t in sql_util.find_tables(clause, include_crud=True):
if t in self.__binds:
return self.__binds[t]
if self.bind:
return self.bind
if isinstance(clause, sql.expression.ClauseElement) and clause.bind:
return clause.bind
if c_mapper and c_mapper.mapped_table.bind:
return c_mapper.mapped_table.bind
context = []
if mapper is not None:
context.append('mapper %s' % c_mapper)
if clause is not None:
context.append('SQL expression')
raise sa_exc.UnboundExecutionError(
"Could not locate a bind configured on %s or this Session" % (
', '.join(context)))
def query(self, *entities, **kwargs):
"""Return a new ``Query`` object corresponding to this ``Session``."""
return self._query_cls(entities, self, **kwargs)
@property
@util.contextmanager
def no_autoflush(self):
"""Return a context manager that disables autoflush.
e.g.::
with session.no_autoflush:
some_object = SomeClass()
session.add(some_object)
# won't autoflush
some_object.related_thing = session.query(SomeRelated).first()
Operations that proceed within the ``with:`` block
will not be subject to flushes occurring upon query
access. This is useful when initializing a series
of objects which involve existing database queries,
where the uncompleted object should not yet be flushed.
New in 0.7.6.
"""
autoflush = self.autoflush
self.autoflush = False
yield self
self.autoflush = autoflush
def _autoflush(self):
if self.autoflush and not self._flushing:
self.flush()
def _finalize_loaded(self, states):
for state, dict_ in states.items():
state.commit_all(dict_, self.identity_map)
def refresh(self, instance, attribute_names=None, lockmode=None):
"""Expire and refresh the attributes on the given instance.
A query will be issued to the database and all attributes will be
refreshed with their current database value.
Lazy-loaded relational attributes will remain lazily loaded, so that
the instance-wide refresh operation will be followed immediately by
the lazy load of that attribute.
Eagerly-loaded relational attributes will eagerly load within the
single refresh operation.
Note that a highly isolated transaction will return the same values as
were previously read in that same transaction, regardless of changes
in database state outside of that transaction - usage of
:meth:`~Session.refresh` usually only makes sense if non-ORM SQL
statement were emitted in the ongoing transaction, or if autocommit
mode is turned on.
:param attribute_names: optional. An iterable collection of
string attribute names indicating a subset of attributes to
be refreshed.
:param lockmode: Passed to the :class:`~sqlalchemy.orm.query.Query`
as used by :meth:`~sqlalchemy.orm.query.Query.with_lockmode`.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
self._expire_state(state, attribute_names)
if self.query(_object_mapper(instance))._load_on_ident(
state.key, refresh_state=state,
lockmode=lockmode,
only_load_props=attribute_names) is None:
raise sa_exc.InvalidRequestError(
"Could not refresh instance '%s'" %
mapperutil.instance_str(instance))
def expire_all(self):
"""Expires all persistent instances within this Session.
When any attributes on a persistent instance is next accessed,
a query will be issued using the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire individual objects and individual attributes
on those objects, use :meth:`Session.expire`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire_all` should not be needed when
autocommit is ``False``, assuming the transaction is isolated.
"""
for state in self.identity_map.all_states():
state.expire(state.dict, self.identity_map._modified)
def expire(self, instance, attribute_names=None):
"""Expire the attributes on an instance.
Marks the attributes of an instance as out of date. When an expired
attribute is next accessed, a query will be issued to the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire all objects in the :class:`.Session` simultaneously,
use :meth:`Session.expire_all`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire` only makes sense for the specific
case that a non-ORM SQL statement was emitted in the current
transaction.
:param instance: The instance to be refreshed.
:param attribute_names: optional list of string attribute names
indicating a subset of attributes to be expired.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
self._expire_state(state, attribute_names)
def _expire_state(self, state, attribute_names):
self._validate_persistent(state)
if attribute_names:
state.expire_attributes(state.dict, attribute_names)
else:
# pre-fetch the full cascade since the expire is going to
# remove associations
cascaded = list(state.manager.mapper.cascade_iterator(
'refresh-expire', state))
self._conditional_expire(state)
for o, m, st_, dct_ in cascaded:
self._conditional_expire(st_)
def _conditional_expire(self, state):
"""Expire a state if persistent, else expunge if pending"""
if state.key:
state.expire(state.dict, self.identity_map._modified)
elif state in self._new:
self._new.pop(state)
state.detach()
@util.deprecated("0.7", "The non-weak-referencing identity map "
"feature is no longer needed.")
def prune(self):
"""Remove unreferenced instances cached in the identity map.
Note that this method is only meaningful if "weak_identity_map" is set
to False. The default weak identity map is self-pruning.
Removes any object in this Session's identity map that is not
referenced in user code, modified, new or scheduled for deletion.
Returns the number of objects pruned.
"""
return self.identity_map.prune()
def expunge(self, instance):
"""Remove the `instance` from this ``Session``.
This will free all internal references to the instance. Cascading
will be applied according to the *expunge* cascade rule.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
if state.session_id is not self.hash_key:
raise sa_exc.InvalidRequestError(
"Instance %s is not present in this Session" %
mapperutil.state_str(state))
cascaded = list(state.manager.mapper.cascade_iterator(
'expunge', state))
self._expunge_state(state)
for o, m, st_, dct_ in cascaded:
self._expunge_state(st_)
def _expunge_state(self, state):
if state in self._new:
self._new.pop(state)
state.detach()
elif self.identity_map.contains_state(state):
self.identity_map.discard(state)
self._deleted.pop(state, None)
state.detach()
elif self.transaction:
self.transaction._deleted.pop(state, None)
def _register_newly_persistent(self, state):
mapper = _state_mapper(state)
# prevent against last minute dereferences of the object
obj = state.obj()
if obj is not None:
instance_key = mapper._identity_key_from_state(state)
if _none_set.issubset(instance_key[1]) and \
not mapper.allow_partial_pks or \
_none_set.issuperset(instance_key[1]):
raise exc.FlushError(
"Instance %s has a NULL identity key. If this is an "
"auto-generated value, check that the database table "
"allows generation of new primary key values, and that "
"the mapped Column object is configured to expect these "
"generated values. Ensure also that this flush() is "
"not occurring at an inappropriate time, such as within "
"a load() event." % mapperutil.state_str(state)
)
if state.key is None:
state.key = instance_key
elif state.key != instance_key:
# primary key switch. use discard() in case another
# state has already replaced this one in the identity
# map (see test/orm/test_naturalpks.py ReversePKsTest)
self.identity_map.discard(state)
state.key = instance_key
self.identity_map.replace(state)
state.commit_all(state.dict, self.identity_map)
# remove from new last, might be the last strong ref
if state in self._new:
if self._enable_transaction_accounting and self.transaction:
self.transaction._new[state] = True
self._new.pop(state)
def _remove_newly_deleted(self, state):
if self._enable_transaction_accounting and self.transaction:
self.transaction._deleted[state] = True
self.identity_map.discard(state)
self._deleted.pop(state, None)
state.deleted = True
def add(self, instance):
"""Place an object in the ``Session``.
Its state will be persisted to the database on the next flush
operation.
Repeated calls to ``add()`` will be ignored. The opposite of ``add()``
is ``expunge()``.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
self._save_or_update_state(state)
def add_all(self, instances):
"""Add the given collection of instances to this ``Session``."""
for instance in instances:
self.add(instance)
def _save_or_update_state(self, state):
self._save_or_update_impl(state)
mapper = _state_mapper(state)
for o, m, st_, dct_ in mapper.cascade_iterator(
'save-update',
state,
halt_on=self._contains_state):
self._save_or_update_impl(st_)
def delete(self, instance):
"""Mark an instance as deleted.
The database delete operation occurs upon ``flush()``.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
if state.key is None:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" %
mapperutil.state_str(state))
if state in self._deleted:
return
# ensure object is attached to allow the
# cascade operation to load deferred attributes
# and collections
self._attach(state)
# grab the cascades before adding the item to the deleted list
# so that autoflush does not delete the item
# the strong reference to the instance itself is significant here
cascade_states = list(state.manager.mapper.cascade_iterator(
'delete', state))
self._deleted[state] = state.obj()
self.identity_map.add(state)
for o, m, st_, dct_ in cascade_states:
self._delete_impl(st_)
def merge(self, instance, load=True, **kw):
"""Copy the state an instance onto the persistent instance with the
same identifier.
If there is no persistent instance currently associated with the
session, it will be loaded. Return the persistent instance. If the
given instance is unsaved, save a copy of and return it as a newly
persistent instance. The given instance does not become associated
with the session.
This operation cascades to associated instances if the association is
mapped with ``cascade="merge"``.
See :ref:`unitofwork_merging` for a detailed discussion of merging.
"""
if 'dont_load' in kw:
load = not kw['dont_load']
util.warn_deprecated('dont_load=True has been renamed to '
'load=False.')
_recursive = {}
if load:
# flush current contents if we expect to load data
self._autoflush()
_object_mapper(instance) # verify mapped
autoflush = self.autoflush
try:
self.autoflush = False
return self._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
load=load, _recursive=_recursive)
finally:
self.autoflush = autoflush
def _merge(self, state, state_dict, load=True, _recursive=None):
mapper = _state_mapper(state)
if state in _recursive:
return _recursive[state]
new_instance = False
key = state.key
if key is None:
if not load:
raise sa_exc.InvalidRequestError(
"merge() with load=False option does not support "
"objects transient (i.e. unpersisted) objects. flush() "
"all changes on mapped instances before merging with "
"load=False.")
key = mapper._identity_key_from_state(state)
if key in self.identity_map:
merged = self.identity_map[key]
elif not load:
if state.modified:
raise sa_exc.InvalidRequestError(
"merge() with load=False option does not support "
"objects marked as 'dirty'. flush() all changes on "
"mapped instances before merging with load=False.")
merged = mapper.class_manager.new_instance()
merged_state = attributes.instance_state(merged)
merged_state.key = key
self._update_impl(merged_state)
new_instance = True
elif not _none_set.issubset(key[1]) or \
(mapper.allow_partial_pks and
not _none_set.issuperset(key[1])):
merged = self.query(mapper.class_).get(key[1])
else:
merged = None
if merged is None:
merged = mapper.class_manager.new_instance()
merged_state = attributes.instance_state(merged)
merged_dict = attributes.instance_dict(merged)
new_instance = True
self._save_or_update_state(merged_state)
else:
merged_state = attributes.instance_state(merged)
merged_dict = attributes.instance_dict(merged)
_recursive[state] = merged
# check that we didn't just pull the exact same
# state out.
if state is not merged_state:
# version check if applicable
if mapper.version_id_col is not None:
existing_version = mapper._get_state_attr_by_column(
state,
state_dict,
mapper.version_id_col,
passive=attributes.PASSIVE_NO_INITIALIZE)
merged_version = mapper._get_state_attr_by_column(
merged_state,
merged_dict,
mapper.version_id_col,
passive=attributes.PASSIVE_NO_INITIALIZE)
if existing_version is not attributes.PASSIVE_NO_RESULT and \
merged_version is not attributes.PASSIVE_NO_RESULT and \
existing_version != merged_version:
raise exc.StaleDataError(
"Version id '%s' on merged state %s "
"does not match existing version '%s'. "
"Leave the version attribute unset when "
"merging to update the most recent version."
% (
existing_version,
mapperutil.state_str(merged_state),
merged_version
))
merged_state.load_path = state.load_path
merged_state.load_options = state.load_options
for prop in mapper.iterate_properties:
prop.merge(self, state, state_dict,
merged_state, merged_dict,
load, _recursive)
if not load:
# remove any history
merged_state.commit_all(merged_dict, self.identity_map)
if new_instance:
merged_state.manager.dispatch.load(merged_state, None)
return merged
@classmethod
def identity_key(cls, *args, **kwargs):
return mapperutil.identity_key(*args, **kwargs)
@classmethod
def object_session(cls, instance):
"""Return the ``Session`` to which an object belongs."""
return object_session(instance)
def _validate_persistent(self, state):
if not self.identity_map.contains_state(state):
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persistent within this Session" %
mapperutil.state_str(state))
def _save_impl(self, state):
if state.key is not None:
raise sa_exc.InvalidRequestError(
"Object '%s' already has an identity - it can't be registered "
"as pending" % mapperutil.state_str(state))
self._attach(state)
if state not in self._new:
self._new[state] = state.obj()
state.insert_order = len(self._new)
def _update_impl(self, state):
if (self.identity_map.contains_state(state) and
state not in self._deleted):
return
if state.key is None:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" %
mapperutil.state_str(state))
if state.deleted:
raise sa_exc.InvalidRequestError(
"Instance '%s' has been deleted. Use the make_transient() "
"function to send this object back to the transient state." %
mapperutil.state_str(state)
)
self._attach(state)
self._deleted.pop(state, None)
self.identity_map.add(state)
def _save_or_update_impl(self, state):
if state.key is None:
self._save_impl(state)
else:
self._update_impl(state)
def _delete_impl(self, state):
if state in self._deleted:
return
if state.key is None:
return
self._attach(state)
self._deleted[state] = state.obj()
self.identity_map.add(state)
def _attach(self, state):
if state.key and \
state.key in self.identity_map and \
not self.identity_map.contains_state(state):
raise sa_exc.InvalidRequestError("Can't attach instance "
"%s; another instance with key %s is already "
"present in this session."
% (mapperutil.state_str(state), state.key))
if state.session_id and \
state.session_id is not self.hash_key and \
state.session_id in _sessions:
raise sa_exc.InvalidRequestError(
"Object '%s' is already attached to session '%s' "
"(this is '%s')" % (mapperutil.state_str(state),
state.session_id, self.hash_key))
if state.session_id != self.hash_key:
state.session_id = self.hash_key
if self.dispatch.after_attach:
self.dispatch.after_attach(self, state.obj())
def __contains__(self, instance):
"""Return True if the instance is associated with this session.
The instance may be pending or persistent within the Session for a
result of True.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
return self._contains_state(state)
def __iter__(self):
"""Iterate over all pending or persistent instances within this Session."""
return iter(list(self._new.values()) + self.identity_map.values())
def _contains_state(self, state):
return state in self._new or self.identity_map.contains_state(state)
def flush(self, objects=None):
"""Flush all the object changes to the database.
Writes out all pending object creations, deletions and modifications
to the database as INSERTs, DELETEs, UPDATEs, etc. Operations are
automatically ordered by the Session's unit of work dependency
solver.
Database operations will be issued in the current transactional
context and do not affect the state of the transaction, unless an
error occurs, in which case the entire transaction is rolled back.
You may flush() as often as you like within a transaction to move
changes from Python to the database's transaction buffer.
For ``autocommit`` Sessions with no active manual transaction, flush()
will create a transaction on the fly that surrounds the entire set of
operations int the flush.
objects
Optional; a list or tuple collection. Restricts the flush operation
to only these objects, rather than all pending changes.
Deprecated - this flag prevents the session from properly maintaining
accounting among inter-object relations and can cause invalid results.
"""
if objects:
util.warn_deprecated(
"The 'objects' argument to session.flush() is deprecated; "
"Please do not add objects to the session which should not "
"yet be persisted.")
if self._flushing:
raise sa_exc.InvalidRequestError("Session is already flushing")
if self._is_clean():
return
try:
self._flushing = True
self._flush(objects)
finally:
self._flushing = False
def _is_clean(self):
return not self.identity_map.check_modified() and \
not self._deleted and \
not self._new
def _flush(self, objects=None):
dirty = self._dirty_states
if not dirty and not self._deleted and not self._new:
self.identity_map._modified.clear()
return
flush_context = UOWTransaction(self)
if self.dispatch.before_flush:
self.dispatch.before_flush(self, flush_context, objects)
# re-establish "dirty states" in case the listeners
# added
dirty = self._dirty_states
deleted = set(self._deleted)
new = set(self._new)
dirty = set(dirty).difference(deleted)
# create the set of all objects we want to operate upon
if objects:
# specific list passed in
objset = set()
for o in objects:
try:
state = attributes.instance_state(o)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(o)
objset.add(state)
else:
objset = None
# store objects whose fate has been decided
processed = set()
# put all saves/updates into the flush context. detect top-level
# orphans and throw them into deleted.
if objset:
proc = new.union(dirty).intersection(objset).difference(deleted)
else:
proc = new.union(dirty).difference(deleted)
for state in proc:
is_orphan = _state_mapper(state)._is_orphan(state) and state.has_identity
flush_context.register_object(state, isdelete=is_orphan)
processed.add(state)
# put all remaining deletes into the flush context.
if objset:
proc = deleted.intersection(objset).difference(processed)
else:
proc = deleted.difference(processed)
for state in proc:
flush_context.register_object(state, isdelete=True)
if not flush_context.has_work:
return
flush_context.transaction = transaction = self.begin(
subtransactions=True)
try:
flush_context.execute()
self.dispatch.after_flush(self, flush_context)
flush_context.finalize_flush_changes()
# useful assertions:
#if not objects:
# assert not self.identity_map._modified
#else:
# assert self.identity_map._modified == \
# self.identity_map._modified.difference(objects)
self.dispatch.after_flush_postexec(self, flush_context)
transaction.commit()
except:
transaction.rollback(_capture_exception=True)
raise
def is_modified(self, instance, include_collections=True,
passive=attributes.PASSIVE_OFF):
"""Return ``True`` if the given instance has locally
modified attributes.
This method retrieves the history for each instrumented
attribute on the instance and performs a comparison of the current
value to its previously committed value, if any.
It is in effect a more expensive and accurate
version of checking for the given instance in the
:attr:`.Session.dirty` collection; a full test for
each attribute's net "dirty" status is performed.
E.g.::
return session.is_modified(someobject, passive=True)
.. note::
In SQLAlchemy 0.7 and earlier, the ``passive``
flag should **always** be explicitly set to ``True``.
The current default value of :data:`.attributes.PASSIVE_OFF`
for this flag is incorrect, in that it loads unloaded
collections and attributes which by definition
have no modified state, and furthermore trips off
autoflush which then causes all subsequent, possibly
modified attributes to lose their modified state.
The default value of the flag will be changed in 0.8.
A few caveats to this method apply:
* Instances present in the :attr:`.Session.dirty` collection may report
``False`` when tested with this method. This is because
the object may have received change events via attribute
mutation, thus placing it in :attr:`.Session.dirty`,
but ultimately the state is the same as that loaded from
the database, resulting in no net change here.
* Scalar attributes may not have recorded the previously set
value when a new value was applied, if the attribute was not loaded,
or was expired, at the time the new value was received - in these
cases, the attribute is assumed to have a change, even if there is
ultimately no net change against its database value. SQLAlchemy in
most cases does not need the "old" value when a set event occurs, so
it skips the expense of a SQL call if the old value isn't present,
based on the assumption that an UPDATE of the scalar value is
usually needed, and in those few cases where it isn't, is less
expensive on average than issuing a defensive SELECT.
The "old" value is fetched unconditionally only if the attribute
container has the ``active_history`` flag set to ``True``. This flag
is set typically for primary key attributes and scalar object references
that are not a simple many-to-one. To set this flag for
any arbitrary mapped column, use the ``active_history`` argument
with :func:`.column_property`.
:param instance: mapped instance to be tested for pending changes.
:param include_collections: Indicates if multivalued collections should be
included in the operation. Setting this to ``False`` is a way to detect
only local-column based properties (i.e. scalar columns or many-to-one
foreign keys) that would result in an UPDATE for this instance upon
flush.
:param passive: Indicates if unloaded attributes and
collections should be loaded in the course of performing
this test. If set to ``False``, or left at its default
value of :data:`.PASSIVE_OFF`, unloaded attributes
will be loaded. If set to ``True`` or
:data:`.PASSIVE_NO_INITIALIZE`, unloaded
collections and attributes will remain unloaded. As
noted previously, the existence of this flag here
is a bug, as unloaded attributes by definition have
no changes, and the load operation also triggers an
autoflush which then cancels out subsequent changes.
This flag should **always be set to
True**. In 0.8 the flag will be deprecated and the default
set to ``True``.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
dict_ = state.dict
if passive is True:
passive = attributes.PASSIVE_NO_INITIALIZE
elif passive is False:
passive = attributes.PASSIVE_OFF
for attr in state.manager.attributes:
if \
(
not include_collections and
hasattr(attr.impl, 'get_collection')
) or not hasattr(attr.impl, 'get_history'):
continue
(added, unchanged, deleted) = \
attr.impl.get_history(state, dict_, passive=passive)
if added or deleted:
return True
return False
@property
def is_active(self):
"""True if this :class:`.Session` has an active transaction.
This indicates if the :class:`.Session` is capable of emitting
SQL, as from the :meth:`.Session.execute`, :meth:`.Session.query`,
or :meth:`.Session.flush` methods. If False, it indicates
that the innermost transaction has been rolled back, but enclosing
:class:`.SessionTransaction` objects remain in the transactional
stack, which also must be rolled back.
This flag is generally only useful with a :class:`.Session`
configured in its default mode of ``autocommit=False``.
"""
return self.transaction and self.transaction.is_active
identity_map = None
"""A mapping of object identities to objects themselves.
Iterating through ``Session.identity_map.values()`` provides
access to the full set of persistent objects (i.e., those
that have row identity) currently in the session.
See also:
:func:`.identity_key` - operations involving identity keys.
"""
@property
def _dirty_states(self):
"""The set of all persistent states considered dirty.
This method returns all states that were modified including
those that were possibly deleted.
"""
return self.identity_map._dirty_states()
@property
def dirty(self):
"""The set of all persistent instances considered dirty.
E.g.::
some_mapped_object in session.dirty
Instances are considered dirty when they were modified but not
deleted.
Note that this 'dirty' calculation is 'optimistic'; most
attribute-setting or collection modification operations will
mark an instance as 'dirty' and place it in this set, even if
there is no net change to the attribute's value. At flush
time, the value of each attribute is compared to its
previously saved value, and if there's no net change, no SQL
operation will occur (this is a more expensive operation so
it's only done at flush time).
To check if an instance has actionable net changes to its
attributes, use the :meth:`.Session.is_modified` method.
"""
return util.IdentitySet(
[state.obj()
for state in self._dirty_states
if state not in self._deleted])
@property
def deleted(self):
"The set of all instances marked as 'deleted' within this ``Session``"
return util.IdentitySet(self._deleted.values())
@property
def new(self):
"The set of all instances marked as 'new' within this ``Session``."
return util.IdentitySet(self._new.values())
_sessions = weakref.WeakValueDictionary()
def make_transient(instance):
"""Make the given instance 'transient'.
This will remove its association with any
session and additionally will remove its "identity key",
such that it's as though the object were newly constructed,
except retaining its values. It also resets the
"deleted" flag on the state if this object
had been explicitly deleted by its session.
Attributes which were "expired" or deferred at the
instance level are reverted to undefined, and
will not trigger any loads.
"""
state = attributes.instance_state(instance)
s = _state_session(state)
if s:
s._expunge_state(state)
# remove expired state and
# deferred callables
state.callables.clear()
if state.key:
del state.key
if state.deleted:
del state.deleted
def object_session(instance):
"""Return the ``Session`` to which instance belongs.
If the instance is not a mapped instance, an error is raised.
"""
try:
return _state_session(attributes.instance_state(instance))
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
def _state_session(state):
if state.session_id:
try:
return _sessions[state.session_id]
except KeyError:
pass
return None
_new_sessionid = util.counter()
| gpl-3.0 |
RaresO/test | node_modules/node-gyp/gyp/pylib/gyp/MSVSUserFile.py | 2710 | 5094 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
| mit |
badock/nova | nova/tests/virt/disk/test_inject.py | 15 | 11213 | # Copyright (C) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from nova import exception
from nova import test
from nova.tests.virt.disk.vfs import fakeguestfs
from nova.virt.disk import api as diskapi
from nova.virt.disk.vfs import guestfs as vfsguestfs
class VirtDiskTest(test.NoDBTestCase):
def setUp(self):
super(VirtDiskTest, self).setUp()
sys.modules['guestfs'] = fakeguestfs
vfsguestfs.guestfs = fakeguestfs
def test_inject_data(self):
self.assertTrue(diskapi.inject_data("/some/file", use_cow=True))
self.assertTrue(diskapi.inject_data("/some/file",
mandatory=('files',)))
self.assertTrue(diskapi.inject_data("/some/file", key="mysshkey",
mandatory=('key',)))
os_name = os.name
os.name = 'nt' # Cause password injection to fail
self.assertRaises(exception.NovaException,
diskapi.inject_data,
"/some/file", admin_password="p",
mandatory=('admin_password',))
self.assertFalse(diskapi.inject_data("/some/file", admin_password="p"))
os.name = os_name
self.assertFalse(diskapi.inject_data("/some/fail/file",
key="mysshkey"))
def test_inject_data_key(self):
vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
vfs.setup()
diskapi._inject_key_into_fs("mysshkey", vfs)
self.assertIn("/root/.ssh", vfs.handle.files)
self.assertEqual(vfs.handle.files["/root/.ssh"],
{'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0o700})
self.assertIn("/root/.ssh/authorized_keys", vfs.handle.files)
self.assertEqual(vfs.handle.files["/root/.ssh/authorized_keys"],
{'isdir': False,
'content': "Hello World\n# The following ssh " +
"key was injected by Nova\nmysshkey\n",
'gid': 100,
'uid': 100,
'mode': 0o600})
vfs.teardown()
def test_inject_data_key_with_selinux(self):
vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
vfs.setup()
vfs.make_path("etc/selinux")
vfs.make_path("etc/rc.d")
diskapi._inject_key_into_fs("mysshkey", vfs)
self.assertIn("/etc/rc.d/rc.local", vfs.handle.files)
self.assertEqual(vfs.handle.files["/etc/rc.d/rc.local"],
{'isdir': False,
'content': "Hello World#!/bin/sh\n# Added by " +
"Nova to ensure injected ssh keys " +
"have the right context\nrestorecon " +
"-RF root/.ssh 2>/dev/null || :\n",
'gid': 100,
'uid': 100,
'mode': 0o700})
self.assertIn("/root/.ssh", vfs.handle.files)
self.assertEqual(vfs.handle.files["/root/.ssh"],
{'isdir': True, 'gid': 0, 'uid': 0, 'mode': 0o700})
self.assertIn("/root/.ssh/authorized_keys", vfs.handle.files)
self.assertEqual(vfs.handle.files["/root/.ssh/authorized_keys"],
{'isdir': False,
'content': "Hello World\n# The following ssh " +
"key was injected by Nova\nmysshkey\n",
'gid': 100,
'uid': 100,
'mode': 0o600})
vfs.teardown()
def test_inject_data_key_with_selinux_append_with_newline(self):
vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
vfs.setup()
vfs.replace_file("/etc/rc.d/rc.local", "#!/bin/sh\necho done")
vfs.make_path("etc/selinux")
vfs.make_path("etc/rc.d")
diskapi._inject_key_into_fs("mysshkey", vfs)
self.assertIn("/etc/rc.d/rc.local", vfs.handle.files)
self.assertEqual(vfs.handle.files["/etc/rc.d/rc.local"],
{'isdir': False,
'content': "#!/bin/sh\necho done\n# Added "
"by Nova to ensure injected ssh keys have "
"the right context\nrestorecon -RF "
"root/.ssh 2>/dev/null || :\n",
'gid': 100,
'uid': 100,
'mode': 0o700})
vfs.teardown()
def test_inject_net(self):
vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
vfs.setup()
diskapi._inject_net_into_fs("mynetconfig", vfs)
self.assertIn("/etc/network/interfaces", vfs.handle.files)
self.assertEqual(vfs.handle.files["/etc/network/interfaces"],
{'content': 'mynetconfig',
'gid': 100,
'isdir': False,
'mode': 0o700,
'uid': 100})
vfs.teardown()
def test_inject_metadata(self):
vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
vfs.setup()
diskapi._inject_metadata_into_fs({"foo": "bar", "eek": "wizz"}, vfs)
self.assertIn("/meta.js", vfs.handle.files)
self.assertEqual({'content': '{"foo": "bar", ' +
'"eek": "wizz"}',
'gid': 100,
'isdir': False,
'mode': 0o700,
'uid': 100},
vfs.handle.files["/meta.js"])
vfs.teardown()
def test_inject_admin_password(self):
vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
vfs.setup()
def fake_salt():
return "1234567890abcdef"
self.stubs.Set(diskapi, '_generate_salt', fake_salt)
vfs.handle.write("/etc/shadow",
"root:$1$12345678$xxxxx:14917:0:99999:7:::\n" +
"bin:*:14495:0:99999:7:::\n" +
"daemon:*:14495:0:99999:7:::\n")
vfs.handle.write("/etc/passwd",
"root:x:0:0:root:/root:/bin/bash\n" +
"bin:x:1:1:bin:/bin:/sbin/nologin\n" +
"daemon:x:2:2:daemon:/sbin:/sbin/nologin\n")
diskapi._inject_admin_password_into_fs("123456", vfs)
self.assertEqual(vfs.handle.files["/etc/passwd"],
{'content': "root:x:0:0:root:/root:/bin/bash\n" +
"bin:x:1:1:bin:/bin:/sbin/nologin\n" +
"daemon:x:2:2:daemon:/sbin:" +
"/sbin/nologin\n",
'gid': 100,
'isdir': False,
'mode': 0o700,
'uid': 100})
shadow = vfs.handle.files["/etc/shadow"]
# if the encrypted password is only 13 characters long, then
# nova.virt.disk.api:_set_password fell back to DES.
if len(shadow['content']) == 91:
self.assertEqual(shadow,
{'content': "root:12tir.zIbWQ3c" +
":14917:0:99999:7:::\n" +
"bin:*:14495:0:99999:7:::\n" +
"daemon:*:14495:0:99999:7:::\n",
'gid': 100,
'isdir': False,
'mode': 0o700,
'uid': 100})
else:
self.assertEqual(shadow,
{'content': "root:$1$12345678$a4ge4d5iJ5vw" +
"vbFS88TEN0:14917:0:99999:7:::\n" +
"bin:*:14495:0:99999:7:::\n" +
"daemon:*:14495:0:99999:7:::\n",
'gid': 100,
'isdir': False,
'mode': 0o700,
'uid': 100})
vfs.teardown()
def test_inject_files_into_fs(self):
vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
vfs.setup()
diskapi._inject_files_into_fs([("/path/to/not/exists/file",
"inject-file-contents")],
vfs)
self.assertIn("/path/to/not/exists", vfs.handle.files)
shadow_dir = vfs.handle.files["/path/to/not/exists"]
self.assertEqual(shadow_dir,
{"isdir": True,
"gid": 0,
"uid": 0,
"mode": 0o744})
shadow_file = vfs.handle.files["/path/to/not/exists/file"]
self.assertEqual(shadow_file,
{"isdir": False,
"content": "inject-file-contents",
"gid": 100,
"uid": 100,
"mode": 0o700})
vfs.teardown()
def test_inject_files_into_fs_dir_exists(self):
vfs = vfsguestfs.VFSGuestFS("/some/file", "qcow2")
vfs.setup()
called = {'make_path': False}
def fake_has_file(*args, **kwargs):
return True
def fake_make_path(*args, **kwargs):
called['make_path'] = True
self.stubs.Set(vfs, 'has_file', fake_has_file)
self.stubs.Set(vfs, 'make_path', fake_make_path)
# test for already exists dir
diskapi._inject_files_into_fs([("/path/to/exists/file",
"inject-file-contents")],
vfs)
self.assertIn("/path/to/exists/file", vfs.handle.files)
self.assertFalse(called['make_path'])
# test for root dir
diskapi._inject_files_into_fs([("/inject-file",
"inject-file-contents")],
vfs)
self.assertIn("/inject-file", vfs.handle.files)
self.assertFalse(called['make_path'])
# test for null dir
vfs.handle.files.pop("/inject-file")
diskapi._inject_files_into_fs([("inject-file",
"inject-file-contents")],
vfs)
self.assertIn("/inject-file", vfs.handle.files)
self.assertFalse(called['make_path'])
vfs.teardown()
| apache-2.0 |
yetilinux/yetiweb | packages/models.py | 1 | 10119 | from collections import namedtuple
from django.db import models
from django.db.models.signals import pre_save
from django.contrib.auth.models import User
from main.models import Arch, Repo
from main.utils import set_created_field
class PackageRelation(models.Model):
'''
Represents maintainership (or interest) in a package by a given developer.
It is not a true foreign key to packages as we want to key off
pkgbase/pkgname instead, as well as preserve this information across
package deletes, adds, and in all repositories.
'''
MAINTAINER = 1
WATCHER = 2
TYPE_CHOICES = (
(MAINTAINER, 'Maintainer'),
(WATCHER, 'Watcher'),
)
pkgbase = models.CharField(max_length=255)
user = models.ForeignKey(User, related_name="package_relations")
type = models.PositiveIntegerField(choices=TYPE_CHOICES, default=MAINTAINER)
created = models.DateTimeField(editable=False)
def get_associated_packages(self):
# TODO: delayed import to avoid circular reference
from main.models import Package
return Package.objects.normal().filter(pkgbase=self.pkgbase)
def repositories(self):
packages = self.get_associated_packages()
return sorted(set([p.repo for p in packages]))
def __unicode__(self):
return u'%s: %s (%s)' % (
self.pkgbase, self.user, self.get_type_display())
class Meta:
unique_together = (('pkgbase', 'user', 'type'),)
class SignoffSpecificationManager(models.Manager):
def get_from_package(self, pkg):
'''Utility method to pull all relevant name-version fields from a
package and get a matching signoff specification.'''
return self.get(
pkgbase=pkg.pkgbase, pkgver=pkg.pkgver, pkgrel=pkg.pkgrel,
epoch=pkg.epoch, arch=pkg.arch, repo=pkg.repo)
def get_or_default_from_package(self, pkg):
'''utility method to pull all relevant name-version fields from a
package and get a matching signoff specification, or return the default
base case.'''
try:
return self.get(
pkgbase=pkg.pkgbase, pkgver=pkg.pkgver, pkgrel=pkg.pkgrel,
epoch=pkg.epoch, arch=pkg.arch, repo=pkg.repo)
except SignoffSpecification.DoesNotExist:
return DEFAULT_SIGNOFF_SPEC
class SignoffSpecification(models.Model):
'''
A specification for the signoff policy for this particular revision of a
package. The default is requiring two signoffs for a given package. These
are created only if necessary; e.g., if one wanted to override the
required=2 attribute, otherwise a sane default object is used.
'''
pkgbase = models.CharField(max_length=255, db_index=True)
pkgver = models.CharField(max_length=255)
pkgrel = models.CharField(max_length=255)
epoch = models.PositiveIntegerField(default=0)
arch = models.ForeignKey(Arch)
repo = models.ForeignKey(Repo)
user = models.ForeignKey(User, null=True)
created = models.DateTimeField(editable=False)
required = models.PositiveIntegerField(default=2,
help_text="How many signoffs are required for this package?")
enabled = models.BooleanField(default=True,
help_text="Is this package eligible for signoffs?")
known_bad = models.BooleanField(default=False,
help_text="Is package is known to be broken in some way?")
comments = models.TextField(null=True, blank=True)
objects = SignoffSpecificationManager()
@property
def full_version(self):
if self.epoch > 0:
return u'%d:%s-%s' % (self.epoch, self.pkgver, self.pkgrel)
return u'%s-%s' % (self.pkgver, self.pkgrel)
def __unicode__(self):
return u'%s-%s' % (self.pkgbase, self.full_version)
# fake default signoff spec when we don't have a persisted one in the database
FakeSignoffSpecification = namedtuple('FakeSignoffSpecification',
('required', 'enabled', 'known_bad', 'comments'))
DEFAULT_SIGNOFF_SPEC = FakeSignoffSpecification(2, True, False, u'')
class SignoffManager(models.Manager):
def get_from_package(self, pkg, user, revoked=False):
'''Utility method to pull all relevant name-version fields from a
package and get a matching signoff.'''
not_revoked = not revoked
return self.get(
pkgbase=pkg.pkgbase, pkgver=pkg.pkgver, pkgrel=pkg.pkgrel,
epoch=pkg.epoch, arch=pkg.arch, repo=pkg.repo,
revoked__isnull=not_revoked, user=user)
def get_or_create_from_package(self, pkg, user):
'''Utility method to pull all relevant name-version fields from a
package and get or create a matching signoff.'''
return self.get_or_create(
pkgbase=pkg.pkgbase, pkgver=pkg.pkgver, pkgrel=pkg.pkgrel,
epoch=pkg.epoch, arch=pkg.arch, repo=pkg.repo,
revoked=None, user=user)
def for_package(self, pkg):
return self.select_related('user').filter(
pkgbase=pkg.pkgbase, pkgver=pkg.pkgver, pkgrel=pkg.pkgrel,
epoch=pkg.epoch, arch=pkg.arch, repo=pkg.repo)
class Signoff(models.Model):
'''
A signoff for a package (by pkgbase) at a given point in time. These are
not keyed directly to a Package object so they don't ever get deleted when
Packages come and go from testing repositories.
'''
pkgbase = models.CharField(max_length=255, db_index=True)
pkgver = models.CharField(max_length=255)
pkgrel = models.CharField(max_length=255)
epoch = models.PositiveIntegerField(default=0)
arch = models.ForeignKey(Arch)
repo = models.ForeignKey(Repo)
user = models.ForeignKey(User, related_name="package_signoffs")
created = models.DateTimeField(editable=False)
revoked = models.DateTimeField(null=True)
comments = models.TextField(null=True, blank=True)
objects = SignoffManager()
@property
def packages(self):
# TODO: delayed import to avoid circular reference
from main.models import Package
return Package.objects.normal().filter(pkgbase=self.pkgbase,
pkgver=self.pkgver, pkgrel=self.pkgrel, epoch=self.epoch,
arch=self.arch, repo=self.repo)
@property
def full_version(self):
if self.epoch > 0:
return u'%d:%s-%s' % (self.epoch, self.pkgver, self.pkgrel)
return u'%s-%s' % (self.pkgver, self.pkgrel)
def __unicode__(self):
revoked = u''
if self.revoked:
revoked = u' (revoked)'
return u'%s-%s: %s%s' % (
self.pkgbase, self.full_version, self.user, revoked)
class FlagRequest(models.Model):
'''
A notification the package is out-of-date submitted through the web site.
'''
user = models.ForeignKey(User, blank=True, null=True)
user_email = models.EmailField('email address')
created = models.DateTimeField(editable=False)
ip_address = models.IPAddressField('IP address')
pkgbase = models.CharField(max_length=255, db_index=True)
version = models.CharField(max_length=255, default='')
repo = models.ForeignKey(Repo)
num_packages = models.PositiveIntegerField('number of packages', default=1)
message = models.TextField('message to developer', blank=True)
is_spam = models.BooleanField(default=False,
help_text="Is this comment from a real person?")
is_legitimate = models.BooleanField(default=True,
help_text="Is this actually an out-of-date flag request?")
class Meta:
get_latest_by = 'created'
def who(self):
if self.user:
return self.user.get_full_name()
return self.user_email
def __unicode__(self):
return u'%s from %s on %s' % (self.pkgbase, self.who(), self.created)
class PackageGroup(models.Model):
'''
Represents a group a package is in. There is no actual group entity,
only names that link to given packages.
'''
pkg = models.ForeignKey('main.Package', related_name='groups')
name = models.CharField(max_length=255, db_index=True)
def __unicode__(self):
return "%s: %s" % (self.name, self.pkg)
class License(models.Model):
pkg = models.ForeignKey('main.Package', related_name='licenses')
name = models.CharField(max_length=255)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
class Conflict(models.Model):
pkg = models.ForeignKey('main.Package', related_name='conflicts')
name = models.CharField(max_length=255, db_index=True)
comparison = models.CharField(max_length=255, default='')
version = models.CharField(max_length=255, default='')
def __unicode__(self):
if self.version:
return u'%s%s%s' % (self.name, self.comparison, self.version)
return self.name
class Meta:
ordering = ['name']
class Provision(models.Model):
pkg = models.ForeignKey('main.Package', related_name='provides')
name = models.CharField(max_length=255, db_index=True)
# comparison must be '=' for provides
comparison = '='
version = models.CharField(max_length=255, default='')
def __unicode__(self):
if self.version:
return u'%s=%s' % (self.name, self.version)
return self.name
class Meta:
ordering = ['name']
class Replacement(models.Model):
pkg = models.ForeignKey('main.Package', related_name='replaces')
name = models.CharField(max_length=255, db_index=True)
comparison = models.CharField(max_length=255, default='')
version = models.CharField(max_length=255, default='')
def __unicode__(self):
if self.version:
return u'%s%s%s' % (self.name, self.comparison, self.version)
return self.name
class Meta:
ordering = ['name']
# hook up some signals
for sender in (PackageRelation, SignoffSpecification, Signoff):
pre_save.connect(set_created_field, sender=sender,
dispatch_uid="packages.models")
# vim: set ts=4 sw=4 et:
| gpl-2.0 |
tszym/ansible | lib/ansible/modules/cloud/cloudstack/cs_staticnat.py | 18 | 7667 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_staticnat
short_description: Manages static NATs on Apache CloudStack based clouds.
description:
- Create, update and remove static NATs.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
ip_address:
description:
- Public IP address the static NAT is assigned to.
required: true
vm:
description:
- Name of virtual machine which we make the static NAT for.
- Required if C(state=present).
required: false
default: null
vm_guest_ip:
description:
- VM guest NIC secondary IP address for the static NAT.
required: false
default: false
network:
description:
- Network the IP address is related to.
required: false
default: null
version_added: "2.2"
vpc:
description:
- VPC the network related to.
required: false
default: null
version_added: "2.3"
state:
description:
- State of the static NAT.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
domain:
description:
- Domain the static NAT is related to.
required: false
default: null
account:
description:
- Account the static NAT is related to.
required: false
default: null
project:
description:
- Name of the project the static NAT is related to.
required: false
default: null
zone:
description:
- Name of the zone in which the virtual machine is in.
- If not set, default zone is used.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# create a static NAT: 1.2.3.4 -> web01
- local_action:
module: cs_staticnat
ip_address: 1.2.3.4
vm: web01
# remove a static NAT
- local_action:
module: cs_staticnat
ip_address: 1.2.3.4
state: absent
'''
RETURN = '''
---
id:
description: UUID of the ip_address.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
ip_address:
description: Public IP address.
returned: success
type: string
sample: 1.2.3.4
vm_name:
description: Name of the virtual machine.
returned: success
type: string
sample: web-01
vm_display_name:
description: Display name of the virtual machine.
returned: success
type: string
sample: web-01
vm_guest_ip:
description: IP of the virtual machine.
returned: success
type: string
sample: 10.101.65.152
zone:
description: Name of zone the static NAT is related to.
returned: success
type: string
sample: ch-gva-2
project:
description: Name of project the static NAT is related to.
returned: success
type: string
sample: Production
account:
description: Account the static NAT is related to.
returned: success
type: string
sample: example account
domain:
description: Domain the static NAT is related to.
returned: success
type: string
sample: example domain
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackStaticNat(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackStaticNat, self).__init__(module)
self.returns = {
'virtualmachinedisplayname': 'vm_display_name',
'virtualmachinename': 'vm_name',
'ipaddress': 'ip_address',
'vmipaddress': 'vm_guest_ip',
}
def create_static_nat(self, ip_address):
self.result['changed'] = True
args = {
'virtualmachineid': self.get_vm(key='id'),
'ipaddressid': ip_address['id'],
'vmguestip': self.get_vm_guest_ip(),
'networkid': self.get_network(key='id')
}
if not self.module.check_mode:
self.query_api('enableStaticNat', **args)
# reset ip address and query new values
self.ip_address = None
ip_address = self.get_ip_address()
return ip_address
def update_static_nat(self, ip_address):
args = {
'virtualmachineid': self.get_vm(key='id'),
'ipaddressid': ip_address['id'],
'vmguestip': self.get_vm_guest_ip(),
'networkid': self.get_network(key='id')
}
# make an alias, so we can use has_changed()
ip_address['vmguestip'] = ip_address['vmipaddress']
if self.has_changed(args, ip_address, ['vmguestip', 'virtualmachineid']):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('disableStaticNat', ipaddressid=ip_address['id'])
self.poll_job(res, 'staticnat')
self.query_api('enableStaticNat', **args)
# reset ip address and query new values
self.ip_address = None
ip_address = self.get_ip_address()
return ip_address
def present_static_nat(self):
ip_address = self.get_ip_address()
if not ip_address['isstaticnat']:
ip_address = self.create_static_nat(ip_address)
else:
ip_address = self.update_static_nat(ip_address)
return ip_address
def absent_static_nat(self):
ip_address = self.get_ip_address()
if ip_address['isstaticnat']:
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('disableStaticNat', ipaddressid=ip_address['id'])
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'staticnat')
return ip_address
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
ip_address=dict(required=True),
vm=dict(),
vm_guest_ip=dict(),
network=dict(),
vpc=dict(),
state=dict(choices=['present', 'absent'], default='present'),
zone=dict(),
domain=dict(),
account=dict(),
project=dict(),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_static_nat = AnsibleCloudStackStaticNat(module)
state = module.params.get('state')
if state in ['absent']:
ip_address = acs_static_nat.absent_static_nat()
else:
ip_address = acs_static_nat.present_static_nat()
result = acs_static_nat.get_result(ip_address)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
m-kuhn/QGIS | python/plugins/processing/tests/CheckValidityAlgorithm.py | 5 | 4895 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for Processing CheckValidity algorithm.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Alessandro Pasotti'
__date__ = '2018-09'
__copyright__ = 'Copyright 2018, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.PyQt.QtCore import QCoreApplication, QVariant
from qgis.core import (
QgsFeature,
QgsGeometry,
QgsApplication,
QgsMemoryProviderUtils,
QgsWkbTypes,
QgsField,
QgsFields,
QgsProcessingContext,
QgsProcessingFeedback,
QgsCoordinateReferenceSystem,
QgsProject,
QgsProcessingException,
QgsProcessingUtils,
QgsSettings
)
from processing.core.Processing import Processing
from processing.gui.AlgorithmExecutor import execute
from qgis.testing import start_app, unittest
from qgis.PyQt.QtTest import QSignalSpy
from qgis.analysis import QgsNativeAlgorithms
start_app()
class ConsoleFeedBack(QgsProcessingFeedback):
def reportError(self, error, fatalError=False):
print(error)
class TestQgsProcessingCheckValidity(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
QCoreApplication.setOrganizationName("QGIS_Test")
QCoreApplication.setOrganizationDomain(
"QGIS_TestPyQgsProcessingCheckValidity.com")
QCoreApplication.setApplicationName(
"QGIS_TestPyQgsProcessingCheckValidity")
QgsSettings().clear()
Processing.initialize()
QgsApplication.processingRegistry().addProvider(QgsNativeAlgorithms())
cls.registry = QgsApplication.instance().processingRegistry()
def _make_layer(self, layer_wkb_name):
fields = QgsFields()
wkb_type = getattr(QgsWkbTypes, layer_wkb_name)
fields.append(QgsField('int_f', QVariant.Int))
layer = QgsMemoryProviderUtils.createMemoryLayer(
'%s_layer' % layer_wkb_name, fields, wkb_type, QgsCoordinateReferenceSystem(4326))
self.assertTrue(layer.isValid())
self.assertEqual(layer.wkbType(), wkb_type)
return layer
def test_check_validity(self):
"""Test that the output invalid contains the error reason"""
polygon_layer = self._make_layer('Polygon')
self.assertTrue(polygon_layer.startEditing())
f = QgsFeature(polygon_layer.fields())
f.setAttributes([1])
# Flake!
f.setGeometry(QgsGeometry.fromWkt(
'POLYGON ((0 0, 2 2, 0 2, 2 0, 0 0))'))
self.assertTrue(f.isValid())
f2 = QgsFeature(polygon_layer.fields())
f2.setAttributes([1])
f2.setGeometry(QgsGeometry.fromWkt(
'POLYGON((1.1 1.1, 1.1 2.1, 2.1 2.1, 2.1 1.1, 1.1 1.1))'))
self.assertTrue(f2.isValid())
self.assertTrue(polygon_layer.addFeatures([f, f2]))
polygon_layer.commitChanges()
polygon_layer.rollBack()
self.assertEqual(polygon_layer.featureCount(), 2)
QgsProject.instance().addMapLayers([polygon_layer])
alg = self.registry.createAlgorithmById('qgis:checkvalidity')
context = QgsProcessingContext()
context.setProject(QgsProject.instance())
feedback = ConsoleFeedBack()
self.assertIsNotNone(alg)
parameters = {}
parameters['INPUT_LAYER'] = polygon_layer.id()
parameters['VALID_OUTPUT'] = 'memory:'
parameters['INVALID_OUTPUT'] = 'memory:'
parameters['ERROR_OUTPUT'] = 'memory:'
# QGIS method
parameters['METHOD'] = 1
ok, results = execute(
alg, parameters, context=context, feedback=feedback)
self.assertTrue(ok)
invalid_layer = QgsProcessingUtils.mapLayerFromString(
results['INVALID_OUTPUT'], context)
self.assertEqual(invalid_layer.fields().names()[-1], '_errors')
self.assertEqual(invalid_layer.featureCount(), 1)
f = next(invalid_layer.getFeatures())
self.assertEqual(f.attributes(), [
1, 'segments 0 and 2 of line 0 intersect at 1, 1'])
# GEOS method
parameters['METHOD'] = 2
ok, results = execute(
alg, parameters, context=context, feedback=feedback)
self.assertTrue(ok)
invalid_layer = QgsProcessingUtils.mapLayerFromString(
results['INVALID_OUTPUT'], context)
self.assertEqual(invalid_layer.fields().names()[-1], '_errors')
self.assertEqual(invalid_layer.featureCount(), 1)
f = next(invalid_layer.getFeatures())
self.assertEqual(f.attributes(), [1, 'Self-intersection'])
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
apark263/tensorflow | tensorflow/python/kernel_tests/control_flow_util_v2_test.py | 36 | 2273 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.control_flow_util_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.platform import test
class ControlFlowUtilV2Test(test.TestCase):
def setUp(self):
self._enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
def tearDown(self):
control_flow_util.ENABLE_CONTROL_FLOW_V2 = self._enable_control_flow_v2_old
def _create_control_flow(self, expect_in_defun):
"""Helper method for testInDefun."""
def body(i):
def branch():
self.assertEqual(control_flow_util_v2.in_defun(), expect_in_defun)
return i + 1
return control_flow_ops.cond(constant_op.constant(True),
branch, lambda: 0)
return control_flow_ops.while_loop(lambda i: i < 4, body,
[constant_op.constant(0)])
@test_util.run_in_graph_and_eager_modes
def testInDefun(self):
self._create_control_flow(False)
@function.defun
def defun():
self._create_control_flow(True)
defun()
self.assertFalse(control_flow_util_v2.in_defun())
if __name__ == "__main__":
test.main()
| apache-2.0 |
cpausmit/Kraken | filefi/032/data-2011.py | 1 | 2250 | # $Id: data.py,v 1.1 2013/07/10 02:25:44 paus Exp $
import FWCore.ParameterSet.Config as cms
process = cms.Process('FILEFI')
# import of standard configurations
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
process.load('Configuration.StandardSequences.GeometryDB_cff')
process.load('Configuration/StandardSequences/MagneticField_38T_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.load('Configuration/EventContent/EventContent_cff')
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('Mit_031'),
annotation = cms.untracked.string('AOD'),
name = cms.untracked.string('BambuProduction')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound'),
fileMode = cms.untracked.string('NOMERGE'),
)
# input source
process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring('file:/tmp/FAB36B02-36D4-E111-92D6-0025B32036E2.root')
# fileNames = cms.untracked.vstring('file:/tmp/F853EAC9-44C8-E111-9778-003048F110BE.root')
# fileNames = cms.untracked.vstring('file:/tmp/4EA92226-F2C6-E111-A390-001D09F23A20.root')
# fileNames = cms.untracked.vstring('file:/tmp/1C19C50D-AED9-E111-9DDF-E0CB4E553651.root')
fileNames = cms.untracked.vstring('root://xrootd.unl.edu//store/data/Run2011A/Photon/AOD/21Jun2013-v1/10000/767F1882-00E0-E211-B32F-001E67396FA9.root')
)
process.source.inputCommands = cms.untracked.vstring("keep *",
"drop *_MEtoEDMConverter_*_*",
"drop L1GlobalTriggerObjectMapRecord_hltL1GtObjectMap__HLT")
# other statements
process.GlobalTag.globaltag = 'FT_R_53_LV3::All'
process.add_(cms.Service("ObjectService"))
process.load("MitProd.BAMBUSequences.BambuFillAOD_cfi")
#process.MitTreeFiller.TreeWriter.fileName = 'XX-MITDATASET-XX'
process.MitTreeFiller.TreeWriter.fileName = 'bambu-output-file-tmp'
process.bambu_step = cms.Path(process.BambuFillAOD)
# schedule definition
process.schedule = cms.Schedule(process.bambu_step)
| mit |
dart-lang/sdk | runtime/third_party/binary_size/src/run_binary_size_analysis.py | 2 | 27960 | #!/usr/bin/env python3
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate a spatial analysis against an arbitrary library.
To use, build the 'binary_size_tool' target. Then run this tool, passing
in the location of the library to be analyzed along with any other options
you desire.
"""
import json
import logging
import multiprocessing
import optparse
import os
import re
import shutil
import struct
import subprocess
import sys
import tempfile
import time
import binary_size_utils
import elf_symbolizer
# Node dictionary keys. These are output in json read by the webapp so
# keep them short to save file size.
# Note: If these change, the webapp must also change.
NODE_TYPE_KEY = 'k'
NODE_NAME_KEY = 'n'
NODE_CHILDREN_KEY = 'children'
NODE_SYMBOL_TYPE_KEY = 't'
NODE_SYMBOL_SIZE_KEY = 'value'
NODE_MAX_DEPTH_KEY = 'maxDepth'
NODE_LAST_PATH_ELEMENT_KEY = 'lastPathElement'
# The display name of the bucket where we put symbols without path.
NAME_NO_PATH_BUCKET = '(No Path)'
# Try to keep data buckets smaller than this to avoid killing the
# graphing lib.
BIG_BUCKET_LIMIT = 3000
def _MkChild(node, name):
child = node[NODE_CHILDREN_KEY].get(name)
if child is None:
child = {NODE_NAME_KEY: name, NODE_CHILDREN_KEY: {}}
node[NODE_CHILDREN_KEY][name] = child
return child
def SplitNoPathBucket(node):
"""NAME_NO_PATH_BUCKET can be too large for the graphing lib to
handle. Split it into sub-buckets in that case."""
root_children = node[NODE_CHILDREN_KEY]
if NAME_NO_PATH_BUCKET in root_children:
no_path_bucket = root_children[NAME_NO_PATH_BUCKET]
old_children = no_path_bucket[NODE_CHILDREN_KEY]
count = 0
for symbol_type, symbol_bucket in old_children.items():
count += len(symbol_bucket[NODE_CHILDREN_KEY])
if count > BIG_BUCKET_LIMIT:
new_children = {}
no_path_bucket[NODE_CHILDREN_KEY] = new_children
current_bucket = None
index = 0
for symbol_type, symbol_bucket in old_children.items():
for symbol_name, value in symbol_bucket[
NODE_CHILDREN_KEY].items():
if index % BIG_BUCKET_LIMIT == 0:
group_no = (index / BIG_BUCKET_LIMIT) + 1
current_bucket = _MkChild(
no_path_bucket,
'%s subgroup %d' % (NAME_NO_PATH_BUCKET, group_no))
assert not NODE_TYPE_KEY in node or node[
NODE_TYPE_KEY] == 'p'
node[NODE_TYPE_KEY] = 'p' # p for path
index += 1
symbol_size = value[NODE_SYMBOL_SIZE_KEY]
AddSymbolIntoFileNode(current_bucket, symbol_type,
symbol_name, symbol_size)
def MakeChildrenDictsIntoLists(node):
largest_list_len = 0
if NODE_CHILDREN_KEY in node:
largest_list_len = len(node[NODE_CHILDREN_KEY])
child_list = []
for child in node[NODE_CHILDREN_KEY].values():
child_largest_list_len = MakeChildrenDictsIntoLists(child)
if child_largest_list_len > largest_list_len:
largest_list_len = child_largest_list_len
child_list.append(child)
node[NODE_CHILDREN_KEY] = child_list
return largest_list_len
def AddSymbolIntoFileNode(node, symbol_type, symbol_name, symbol_size):
"""Puts symbol into the file path node |node|.
Returns the number of added levels in tree. I.e. returns 2."""
# 'node' is the file node and first step is to find its symbol-type bucket.
node[NODE_LAST_PATH_ELEMENT_KEY] = True
node = _MkChild(node, symbol_type)
assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'b'
node[NODE_SYMBOL_TYPE_KEY] = symbol_type
node[NODE_TYPE_KEY] = 'b' # b for bucket
# 'node' is now the symbol-type bucket. Make the child entry.
node = _MkChild(node, symbol_name)
if NODE_CHILDREN_KEY in node:
if node[NODE_CHILDREN_KEY]:
logging.warning(
'A container node used as symbol for %s.' % symbol_name)
# This is going to be used as a leaf so no use for child list.
del node[NODE_CHILDREN_KEY]
node[NODE_SYMBOL_SIZE_KEY] = symbol_size
node[NODE_SYMBOL_TYPE_KEY] = symbol_type
node[NODE_TYPE_KEY] = 's' # s for symbol
return 2 # Depth of the added subtree.
def MakeCompactTree(symbols, symbol_path_origin_dir):
result = {
NODE_NAME_KEY: '/',
NODE_CHILDREN_KEY: {},
NODE_TYPE_KEY: 'p',
NODE_MAX_DEPTH_KEY: 0
}
seen_symbol_with_path = False
cwd = os.path.abspath(os.getcwd())
for symbol_name, symbol_type, symbol_size, file_path, _address in symbols:
if 'vtable for ' in symbol_name:
symbol_type = '@' # hack to categorize these separately
# Take path like '/foo/bar/baz', convert to ['foo', 'bar', 'baz']
if file_path and file_path != "??":
file_path = os.path.abspath(
os.path.join(symbol_path_origin_dir, file_path))
# Let the output structure be relative to $CWD if inside $CWD,
# otherwise relative to the disk root. This is to avoid
# unnecessary click-through levels in the output.
if file_path.startswith(cwd + os.sep):
file_path = file_path[len(cwd):]
if file_path.startswith('/'):
file_path = file_path[1:]
seen_symbol_with_path = True
else:
file_path = NAME_NO_PATH_BUCKET
path_parts = file_path.split('/')
# Find pre-existing node in tree, or update if it already exists
node = result
depth = 0
while len(path_parts) > 0:
path_part = path_parts.pop(0)
if len(path_part) == 0:
continue
depth += 1
node = _MkChild(node, path_part)
assert not NODE_TYPE_KEY in node or node[NODE_TYPE_KEY] == 'p'
node[NODE_TYPE_KEY] = 'p' # p for path
depth += AddSymbolIntoFileNode(node, symbol_type, symbol_name,
symbol_size)
result[NODE_MAX_DEPTH_KEY] = max(result[NODE_MAX_DEPTH_KEY], depth)
if not seen_symbol_with_path:
logging.warning('Symbols lack paths. Data will not be structured.')
# The (no path) bucket can be extremely large if we failed to get
# path information. Split it into subgroups if needed.
SplitNoPathBucket(result)
largest_list_len = MakeChildrenDictsIntoLists(result)
if largest_list_len > BIG_BUCKET_LIMIT:
logging.warning('There are sections with %d nodes. '
'Results might be unusable.' % largest_list_len)
return result
def DumpCompactTree(symbols, symbol_path_origin_dir, outfile):
tree_root = MakeCompactTree(symbols, symbol_path_origin_dir)
with open(outfile, 'w') as out:
out.write('var tree_data=')
# Use separators without whitespace to get a smaller file.
json.dump(tree_root, out, separators=(',', ':'))
print('Writing %d bytes json' % os.path.getsize(outfile))
def MakeSourceMap(symbols):
sources = {}
for _sym, _symbol_type, size, path, _address in symbols:
key = None
if path:
key = os.path.normpath(path)
else:
key = '[no path]'
if key not in sources:
sources[key] = {'path': path, 'symbol_count': 0, 'size': 0}
record = sources[key]
record['size'] += size
record['symbol_count'] += 1
return sources
# Regex for parsing "nm" output. A sample line looks like this:
# 0167b39c 00000018 t ACCESS_DESCRIPTION_free /path/file.c:95
#
# The fields are: address, size, type, name, source location
# Regular expression explained ( see also: https://xkcd.com/208 ):
# ([0-9a-f]{8,}+) The address
# [\s]+ Whitespace separator
# ([0-9a-f]{8,}+) The size. From here on out it's all optional.
# [\s]+ Whitespace separator
# (\S?) The symbol type, which is any non-whitespace char
# [\s*] Whitespace separator
# ([^\t]*) Symbol name, any non-tab character (spaces ok!)
# [\t]? Tab separator
# (.*) The location (filename[:linennum|?][ (discriminator n)]
sNmPattern = re.compile(
r'([0-9a-f]{8,})[\s]+([0-9a-f]{8,})[\s]*(\S?)[\s*]([^\t]*)[\t]?(.*)')
class Progress():
def __init__(self):
self.count = 0
self.skip_count = 0
self.collisions = 0
self.time_last_output = time.time()
self.count_last_output = 0
self.disambiguations = 0
self.was_ambiguous = 0
def RunElfSymbolizer(outfile, library, addr2line_binary, nm_binary, jobs,
disambiguate, src_path):
nm_output = RunNm(library, nm_binary)
nm_output_lines = nm_output.splitlines()
nm_output_lines_len = len(nm_output_lines)
address_symbol = {}
progress = Progress()
def map_address_symbol(symbol, addr):
progress.count += 1
if addr in address_symbol:
# 'Collision between %s and %s.' % (str(symbol.name),
# str(address_symbol[addr].name))
progress.collisions += 1
else:
if symbol.disambiguated:
progress.disambiguations += 1
if symbol.was_ambiguous:
progress.was_ambiguous += 1
address_symbol[addr] = symbol
progress_output()
def progress_output():
progress_chunk = 100
if progress.count % progress_chunk == 0:
time_now = time.time()
time_spent = time_now - progress.time_last_output
if time_spent > 1.0:
# Only output at most once per second.
progress.time_last_output = time_now
chunk_size = progress.count - progress.count_last_output
progress.count_last_output = progress.count
if time_spent > 0:
speed = chunk_size / time_spent
else:
speed = 0
progress_percent = (100.0 * (
progress.count + progress.skip_count) / nm_output_lines_len)
disambiguation_percent = 0
if progress.disambiguations != 0:
disambiguation_percent = (100.0 * progress.disambiguations /
progress.was_ambiguous)
sys.stdout.write(
'\r%.1f%%: Looked up %d symbols (%d collisions, '
'%d disambiguations where %.1f%% succeeded)'
' - %.1f lookups/s.' %
(progress_percent, progress.count, progress.collisions,
progress.disambiguations, disambiguation_percent, speed))
# In case disambiguation was disabled, we remove the source path (which upon
# being set signals the symbolizer to enable disambiguation)
if not disambiguate:
src_path = None
symbolizer = elf_symbolizer.ELFSymbolizer(
library,
addr2line_binary,
map_address_symbol,
max_concurrent_jobs=jobs,
source_root_path=src_path)
user_interrupted = False
try:
for binary_line in nm_output_lines:
line = binary_line.decode()
match = sNmPattern.match(line)
if match:
location = match.group(5)
if not location:
addr = int(match.group(1), 16)
size = int(match.group(2), 16)
if addr in address_symbol: # Already looked up, shortcut
# ELFSymbolizer.
map_address_symbol(address_symbol[addr], addr)
continue
elif size == 0:
# Save time by not looking up empty symbols (do they even exist?)
print('Empty symbol: ' + line)
else:
symbolizer.SymbolizeAsync(addr, addr)
continue
progress.skip_count += 1
except KeyboardInterrupt:
user_interrupted = True
print('Interrupting - killing subprocesses. Please wait.')
try:
symbolizer.Join()
except KeyboardInterrupt:
# Don't want to abort here since we will be finished in a few seconds.
user_interrupted = True
print('Patience you must have my young padawan.')
print('')
if user_interrupted:
print('Skipping the rest of the file mapping. '
'Output will not be fully classified.')
symbol_path_origin_dir = os.path.dirname(os.path.abspath(library))
with open(outfile, 'w') as out:
for binary_line in nm_output_lines:
line = binary_line.decode()
match = sNmPattern.match(line)
if match:
location = match.group(5)
if not location:
addr = int(match.group(1), 16)
symbol = address_symbol.get(addr)
if symbol is not None:
path = '??'
if symbol.source_path is not None:
path = os.path.abspath(
os.path.join(symbol_path_origin_dir,
symbol.source_path))
line_number = 0
if symbol.source_line is not None:
line_number = symbol.source_line
out.write('%s\t%s:%d\n' % (line, path, line_number))
continue
out.write('%s\n' % line)
print('%d symbols in the results.' % len(address_symbol))
def RunNm(binary, nm_binary):
cmd = [
nm_binary, '-C', '--print-size', '--size-sort', '--reverse-sort', binary
]
nm_process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(process_output, err_output) = nm_process.communicate()
if nm_process.returncode != 0:
if err_output:
raise Exception(err_output)
else:
raise Exception(process_output)
return process_output
def GetNmSymbols(nm_infile, outfile, library, jobs, verbose, addr2line_binary,
nm_binary, disambiguate, src_path):
if nm_infile is None:
if outfile is None:
outfile = tempfile.NamedTemporaryFile(delete=False).name
if verbose:
print('Running parallel addr2line, dumping symbols to ' + outfile)
RunElfSymbolizer(outfile, library, addr2line_binary, nm_binary, jobs,
disambiguate, src_path)
nm_infile = outfile
elif verbose:
print('Using nm input from ' + nm_infile)
with open(nm_infile, 'r') as infile:
return list(binary_size_utils.ParseNm(infile))
PAK_RESOURCE_ID_TO_STRING = {"inited": False}
def LoadPakIdsFromResourceFile(filename):
"""Given a file name, it loads everything that looks like a resource id
into PAK_RESOURCE_ID_TO_STRING."""
with open(filename) as resource_header:
for line in resource_header:
if line.startswith("#define "):
line_data = line.split()
if len(line_data) == 3:
try:
resource_number = int(line_data[2])
resource_name = line_data[1]
PAK_RESOURCE_ID_TO_STRING[
resource_number] = resource_name
except ValueError:
pass
def GetReadablePakResourceName(pak_file, resource_id):
"""Pak resources have a numeric identifier. It is not helpful when
trying to locate where footprint is generated. This does its best to
map the number to a usable string."""
if not PAK_RESOURCE_ID_TO_STRING['inited']:
# Try to find resource header files generated by grit when
# building the pak file. We'll look for files named *resources.h"
# and lines of the type:
# #define MY_RESOURCE_JS 1234
PAK_RESOURCE_ID_TO_STRING['inited'] = True
gen_dir = os.path.join(os.path.dirname(pak_file), 'gen')
if os.path.isdir(gen_dir):
for dirname, _dirs, files in os.walk(gen_dir):
for filename in files:
if filename.endswith('resources.h'):
LoadPakIdsFromResourceFile(
os.path.join(dirname, filename))
return PAK_RESOURCE_ID_TO_STRING.get(resource_id,
'Pak Resource %d' % resource_id)
def AddPakData(symbols, pak_file):
"""Adds pseudo-symbols from a pak file."""
pak_file = os.path.abspath(pak_file)
with open(pak_file, 'rb') as pak:
data = pak.read()
PAK_FILE_VERSION = 4
HEADER_LENGTH = 2 * 4 + 1 # Two uint32s. (file version, number of entries)
# and one uint8 (encoding of text resources)
INDEX_ENTRY_SIZE = 2 + 4 # Each entry is a uint16 and a uint32.
version, num_entries, _encoding = struct.unpack('<IIB',
data[:HEADER_LENGTH])
assert version == PAK_FILE_VERSION, (
'Unsupported pak file '
'version (%d) in %s. Only '
'support version %d' % (version, pak_file, PAK_FILE_VERSION))
if num_entries > 0:
# Read the index and data.
data = data[HEADER_LENGTH:]
for _ in range(num_entries):
resource_id, offset = struct.unpack('<HI', data[:INDEX_ENTRY_SIZE])
data = data[INDEX_ENTRY_SIZE:]
_next_id, next_offset = struct.unpack('<HI',
data[:INDEX_ENTRY_SIZE])
resource_size = next_offset - offset
symbol_name = GetReadablePakResourceName(pak_file, resource_id)
symbol_path = pak_file
symbol_type = 'd' # Data. Approximation.
symbol_size = resource_size
symbols.append((symbol_name, symbol_type, symbol_size, symbol_path))
def _find_in_system_path(binary):
"""Locate the full path to binary in the system path or return None
if not found."""
system_path = os.environ["PATH"].split(os.pathsep)
for path in system_path:
binary_path = os.path.join(path, binary)
if os.path.isfile(binary_path):
return binary_path
return None
def CheckDebugFormatSupport(library, addr2line_binary):
"""Kills the program if debug data is in an unsupported format.
There are two common versions of the DWARF debug formats and
since we are right now transitioning from DWARF2 to newer formats,
it's possible to have a mix of tools that are not compatible. Detect
that and abort rather than produce meaningless output."""
tool_output = subprocess.check_output([addr2line_binary,
'--version']).decode()
version_re = re.compile(r'^GNU [^ ]+ .* (\d+).(\d+).*?$', re.M)
parsed_output = version_re.match(tool_output)
major = int(parsed_output.group(1))
minor = int(parsed_output.group(2))
supports_dwarf4 = major > 2 or major == 2 and minor > 22
if supports_dwarf4:
return
print('Checking version of debug information in %s.' % library)
debug_info = subprocess.check_output(
['readelf', '--debug-dump=info', '--dwarf-depth=1', library])
dwarf_version_re = re.compile(r'^\s+Version:\s+(\d+)$', re.M)
parsed_dwarf_format_output = dwarf_version_re.search(debug_info)
version = int(parsed_dwarf_format_output.group(1))
if version > 2:
print(
'The supplied tools only support DWARF2 debug data but the binary\n'
+ 'uses DWARF%d. Update the tools or compile the binary\n' % version
+ 'with -gdwarf-2.')
sys.exit(1)
def main():
usage = """%prog [options]
Runs a spatial analysis on a given library, looking up the source locations
of its symbols and calculating how much space each directory, source file,
and so on is taking. The result is a report that can be used to pinpoint
sources of large portions of the binary, etceteras.
Under normal circumstances, you only need to pass two arguments, thusly:
%prog --library /path/to/library --destdir /path/to/output
In this mode, the program will dump the symbols from the specified library
and map those symbols back to source locations, producing a web-based
report in the specified output directory.
Other options are available via '--help'.
"""
parser = optparse.OptionParser(usage=usage)
parser.add_option(
'--nm-in',
metavar='PATH',
help='if specified, use nm input from <path> instead of '
'generating it. Note that source locations should be '
'present in the file; i.e., no addr2line symbol lookups '
'will be performed when this option is specified. '
'Mutually exclusive with --library.')
parser.add_option(
'--destdir',
metavar='PATH',
help='write output to the specified directory. An HTML '
'report is generated here along with supporting files; '
'any existing report will be overwritten.')
parser.add_option(
'--library',
metavar='PATH',
help='if specified, process symbols in the library at '
'the specified path. Mutually exclusive with --nm-in.')
parser.add_option(
'--pak',
metavar='PATH',
help='if specified, includes the contents of the '
'specified *.pak file in the output.')
parser.add_option(
'--nm-binary',
help='use the specified nm binary to analyze library. '
'This is to be used when the nm in the path is not for '
'the right architecture or of the right version.')
parser.add_option(
'--addr2line-binary',
help='use the specified addr2line binary to analyze '
'library. This is to be used when the addr2line in '
'the path is not for the right architecture or '
'of the right version.')
parser.add_option(
'--jobs',
type='int',
help='number of jobs to use for the parallel '
'addr2line processing pool; defaults to 1. More '
'jobs greatly improve throughput but eat RAM like '
'popcorn, and take several gigabytes each. Start low '
'and ramp this number up until your machine begins to '
'struggle with RAM. '
'This argument is only valid when using --library.')
parser.add_option(
'-v',
'--verbose',
dest='verbose',
action='store_true',
help='be verbose, printing lots of status information.')
parser.add_option(
'--nm-out',
metavar='PATH',
help='(deprecated) No-op. nm.out is stored in --destdir.')
parser.add_option(
'--no-nm-out',
action='store_true',
help='do not keep the nm output file. This file is useful '
'if you want to see the fully processed nm output after '
'the symbols have been mapped to source locations, or if '
'you plan to run explain_binary_size_delta.py. By default '
'the file \'nm.out\' is placed alongside the generated '
'report. The nm.out file is only created when using '
'--library.')
parser.add_option(
'--disable-disambiguation',
action='store_true',
help='disables the disambiguation process altogether,'
' NOTE: this may, depending on your toolchain, produce'
' output with some symbols at the top layer if addr2line'
' could not get the entire source path.')
parser.add_option(
'--source-path',
default='./',
help='the path to the source code of the output binary, '
'default set to current directory. Used in the'
' disambiguation process.')
opts, _args = parser.parse_args()
if ((not opts.library) and
(not opts.nm_in)) or (opts.library and opts.nm_in):
parser.error('exactly one of --library or --nm-in is required')
if opts.nm_out:
print('WARNING: --nm-out is deprecated and has no effect.',
file=sys.stderr)
if (opts.nm_in):
if opts.jobs:
print('WARNING: --jobs has no effect when used with --nm-in',
file=sys.stderr)
if not opts.destdir:
parser.error('--destdir is a required argument')
if not opts.jobs:
# Use the number of processors but cap between 2 and 4 since raw
# CPU power isn't the limiting factor. It's I/O limited, memory
# bus limited and available-memory-limited. Too many processes and
# the computer will run out of memory and it will be slow.
opts.jobs = max(2, min(4, multiprocessing.cpu_count()))
if opts.addr2line_binary:
assert os.path.isfile(opts.addr2line_binary)
addr2line_binary = opts.addr2line_binary
else:
addr2line_binary = _find_in_system_path('addr2line')
assert addr2line_binary, 'Unable to find addr2line in the path. '\
'Use --addr2line-binary to specify location.'
if opts.nm_binary:
assert os.path.isfile(opts.nm_binary)
nm_binary = opts.nm_binary
else:
nm_binary = _find_in_system_path('nm')
assert nm_binary, 'Unable to find nm in the path. Use --nm-binary '\
'to specify location.'
if opts.pak:
assert os.path.isfile(opts.pak), 'Could not find ' % opts.pak
print('addr2line: %s' % addr2line_binary)
print('nm: %s' % nm_binary)
if opts.library:
CheckDebugFormatSupport(opts.library, addr2line_binary)
# Prepare output directory and report guts
if not os.path.exists(opts.destdir):
os.makedirs(opts.destdir, 0o755)
nm_out = os.path.join(opts.destdir, 'nm.out')
if opts.no_nm_out:
nm_out = None
# Copy report boilerplate into output directory. This also proves that the
# output directory is safe for writing, so there should be no problems writing
# the nm.out file later.
data_js_file_name = os.path.join(opts.destdir, 'data.js')
d3_out = os.path.join(opts.destdir, 'd3')
if not os.path.exists(d3_out):
os.makedirs(d3_out, 0o755)
d3_src = os.path.join(os.path.dirname(__file__), '..', '..', 'd3', 'src')
template_src = os.path.join(os.path.dirname(__file__), 'template')
shutil.copy(os.path.join(d3_src, 'LICENSE'), d3_out)
shutil.copy(os.path.join(d3_src, 'd3.js'), d3_out)
shutil.copy(os.path.join(template_src, 'index.html'), opts.destdir)
shutil.copy(os.path.join(template_src, 'D3SymbolTreeMap.js'), opts.destdir)
# Run nm and/or addr2line to gather the data
symbols = GetNmSymbols(opts.nm_in, nm_out, opts.library, opts.jobs,
opts.verbose is True, addr2line_binary, nm_binary,
opts.disable_disambiguation is None,
opts.source_path)
# Post-processing
if opts.pak:
AddPakData(symbols, opts.pak)
if opts.library:
symbol_path_origin_dir = os.path.dirname(os.path.abspath(opts.library))
else:
# Just a guess. Hopefully all paths in the input file are absolute.
symbol_path_origin_dir = os.path.abspath(os.getcwd())
# Dump JSON for the HTML report.
DumpCompactTree(symbols, symbol_path_origin_dir, data_js_file_name)
print('Report saved to ' + opts.destdir + '/index.html')
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
wandec/grr | parsers/windows_persistence_test.py | 2 | 2225 | #!/usr/bin/env python
"""Tests for grr.parsers.windows_persistence."""
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.parsers import windows_persistence
class WindowsPersistenceMechanismsParserTest(test_lib.FlowTestsBaseclass):
def testParse(self):
parser = windows_persistence.WindowsPersistenceMechanismsParser()
path = (r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion"
r"\Run\test")
pathspec = rdfvalue.PathSpec(path=path,
pathtype=rdfvalue.PathSpec.PathType.REGISTRY)
reg_data = "C:\\blah\\some.exe /v"
reg_type = rdfvalue.StatEntry.RegistryType.REG_SZ
stat = rdfvalue.StatEntry(aff4path="aff4:/asdfasdf/", pathspec=pathspec,
registry_type=reg_type,
registry_data=rdfvalue.DataBlob(string=reg_data))
persistence = [stat]
image_paths = ["system32\\drivers\\ACPI.sys",
"%systemroot%\\system32\\svchost.exe -k netsvcs",
"\\SystemRoot\\system32\\drivers\\acpipmi.sys"]
reg_key = rdfvalue.RDFURN("aff4:/C.1000000000000000/registry"
"/HKEY_LOCAL_MACHINE/SYSTEM/ControlSet001"
"/services/AcpiPmi")
for path in image_paths:
serv_info = rdfvalue.WindowsServiceInformation(name="blah",
display_name="GRRservice",
image_path=path,
registry_key=reg_key)
persistence.append(serv_info)
knowledge_base = rdfvalue.KnowledgeBase()
knowledge_base.environ_systemroot = "C:\\Windows"
expected = ["C:\\blah\\some.exe",
"C:\\Windows\\system32\\drivers\\ACPI.sys",
"C:\\Windows\\system32\\svchost.exe",
"C:\\Windows\\system32\\drivers\\acpipmi.sys"]
for index, item in enumerate(persistence):
results = list(parser.Parse(item, knowledge_base,
rdfvalue.PathSpec.PathType.OS))
self.assertEqual(results[0].pathspec.path, expected[index])
self.assertEqual(len(results), 1)
| apache-2.0 |
rousseab/pymatgen | pymatgen/phasediagram/tests/test_plotter.py | 4 | 2182 | # coding: utf-8
from __future__ import unicode_literals
import unittest
import os
import numpy as np
from pymatgen.phasediagram.entries import PDEntryIO
from pymatgen.phasediagram.pdmaker import PhaseDiagram
from pymatgen.phasediagram.plotter import PDPlotter, uniquelines, \
triangular_coord, tet_coord
module_dir = os.path.dirname(os.path.abspath(__file__))
class PDPlotterTest(unittest.TestCase):
def setUp(self):
(elements, entries) = PDEntryIO.from_csv(os.path.join(module_dir, "pdentries_test.csv"))
self.pd = PhaseDiagram(entries)
self.plotter = PDPlotter(self.pd)
def test_pd_plot_data(self):
(lines, labels, unstable_entries) = self.plotter.pd_plot_data
self.assertEqual(len(lines), 22)
self.assertEqual(len(labels), len(self.pd.stable_entries), "Incorrect number of lines generated!")
self.assertEqual(len(unstable_entries), len(self.pd.all_entries) - len(self.pd.stable_entries), "Incorrect number of lines generated!")
class UtilityFunctionTest(unittest.TestCase):
def test_unique_lines(self):
testdata = [[5, 53, 353], [399, 20, 52], [399, 400, 20], [13, 399, 52],
[21, 400, 353], [393, 5, 353], [400, 393, 353],
[393, 400, 399], [393, 13, 5], [13, 393, 399],
[400, 17, 20], [21, 17, 400]]
expected_ans = set([(5, 393), (21, 353), (353, 400), (5, 13), (17, 20),
(21, 400), (17, 400), (52, 399), (393, 399),
(20, 52), (353, 393), (5, 353), (5, 53), (13, 399),
(393, 400), (13, 52), (53, 353), (17, 21),
(13, 393), (20, 399), (399, 400), (20, 400)])
self.assertEqual(uniquelines(testdata), expected_ans)
def test_triangular_coord(self):
coord = [0.5, 0.5]
coord = triangular_coord(coord)
self.assertTrue(np.allclose(coord, [ 0.75, 0.4330127]))
def test_tet_coord(self):
coord = [0.5, 0.5, 0.5]
coord = tet_coord(coord)
self.assertTrue(np.allclose(coord, [ 1., 0.57735027, 0.40824829]))
if __name__ == '__main__':
unittest.main()
| mit |
sorki/faf | src/pyfaf/retrace.py | 1 | 8983 | import re
import threading
from pyfaf.common import FafError, log
from pyfaf.queries import get_debug_files
from pyfaf.rpm import unpack_rpm_to_tmp
from pyfaf.utils.proc import safe_popen
# Instance of 'RootLogger' has no 'getChildLogger' member
# Invalid name "log" for type constant
# pylint: disable-msg=C0103,E1103
log = log.getChildLogger(__name__)
# pylint: enable-msg=C0103
RE_ADDR2LINE_LINE1 = re.compile(r"^([_0-9a-zA-Z\.~<>@:\*&,\)"
r"\( ]+|operator[^ ]+)(\+0x[0-9a-f]+)?"
r"( inlined at ([^:]+):([0-9]+) in (.*))?$")
RE_UNSTRIP_BASE_OFFSET = re.compile(r"^((0x)?[0-9a-f]+)")
__all__ = ["IncompleteTask", "RetraceTaskPackage", "RetraceTask",
"RetraceWorker", "addr2line", "demangle", "get_base_address",
"ssource2funcname", "usrmove"]
class IncompleteTask(FafError):
pass
class RetraceTaskPackage(object):
"""
A "buffer" representing pyfaf.storage.Package. SQL Alchemy objects are
not threadsafe and this object is used to query and buffer all
the necessary information so that DB calls are not required from workers.
"""
def __init__(self, db_package):
self.db_package = db_package
self.nvra = db_package.nvra()
if db_package.pkgtype.lower() == "rpm":
self.unpack_to_tmp = unpack_rpm_to_tmp
self.path = None
if db_package.has_lob("package"):
self.path = db_package.get_lob_path("package")
self.unpacked_path = None
# An attribute affected in pyfaf.retrace line 32 hide this method
# pylint: disable-msg=E0202
def unpack_to_tmp(self, *args, **kwargs):
"""
Used to unpack the package to a temp directory. Is dependent on
package type: RPM/DEB/...
"""
raise NotImplementedError
# pylint: disable-msg=E0202
# Too few public methods
# pylint: disable-msg=R0903
class RetraceTask(object):
"""
A class representing the retrace task, containing information about
all packages and symbols related to the task.
"""
def __init__(self, db_debug_package, db_src_package, bin_pkg_map, db=None):
self.debuginfo = RetraceTaskPackage(db_debug_package)
if self.debuginfo.path is None:
raise IncompleteTask("Package lob for {0} not found in storage"
.format(self.debuginfo.nvra))
if db is None:
self.debuginfo.debug_files = None
else:
self.debuginfo.debug_files = get_debug_files(db, db_debug_package)
if db_src_package is None:
self.source = None
else:
self.source = RetraceTaskPackage(db_src_package)
if self.source.path is None:
raise IncompleteTask("Package lob for {0} not found in storage"
.format(self.source.nvra))
self.binary_packages = {}
if bin_pkg_map is not None:
for db_bin_package, db_ssources in bin_pkg_map.items():
pkgobj = RetraceTaskPackage(db_bin_package)
if pkgobj.path is None:
raise IncompleteTask("Package lob for {0} not found in "
"storage".format(pkgobj.nvra))
self.binary_packages[pkgobj] = db_ssources
# pylint: enable-msg=R0903
class RetraceWorker(threading.Thread, object):
"""
The worker providing asynchronous unpacking of packages.
"""
def __init__(self, worker_id, inqueue, outqueue):
name = "Worker #{0}".format(worker_id)
super(RetraceWorker, self).__init__(name=name)
self.inqueue = inqueue
self.outqueue = outqueue
self.stop = False
# Instance of 'RootLogger' has no 'getChildLogger' member
# pylint: disable-msg=E1103
self.log = log.getChildLogger("{0}.{1}".format(self.__class__.__name__,
self.name))
# pylint: enable-msg=E1103
def _process_task(self, task):
"""
Asynchronously unpack one set of packages (debuginfo, source, binary)
"""
self.log.info("Unpacking '{0}'".format(task.debuginfo.nvra))
task.debuginfo.unpacked_path = \
task.debuginfo.unpack_to_tmp(task.debuginfo.path,
prefix=task.debuginfo.nvra)
if task.source is not None:
self.log.info("Unpacking '{0}'".format(task.source.nvra))
task.source.unpacked_path = \
task.source.unpack_to_tmp(task.source.path,
prefix=task.source.nvra)
for bin_pkg in task.binary_packages.keys():
self.log.info("Unpacking '{0}'".format(bin_pkg.nvra))
if bin_pkg.path == task.debuginfo.path:
self.log.info("Already unpacked")
continue
bin_pkg.unpacked_path = bin_pkg.unpack_to_tmp(bin_pkg.path,
prefix=bin_pkg.nvra)
def run(self):
while not self.stop:
try:
task = self.inqueue.popleft()
self._process_task(task)
self.outqueue.put(task)
except FafError as ex:
self.log.warn("Unpacking failed: {0}".format(str(ex)))
continue
except IndexError:
break
self.log.info("{0} terminated".format(self.name))
def addr2line(binary_path, address, debuginfo_dir):
"""
Calls eu-addr2line on a binary, address and directory with debuginfo.
Returns an ordered list of triplets (function name, source file, line no).
The last element is always the symbol given to retrace. The elements
before are inlined symbols that should be placed above the given symbol
(assuming that entry point is on the bottom of the stacktrace).
"""
result = []
child = safe_popen("eu-addr2line",
"--executable", binary_path,
"--debuginfo-path", debuginfo_dir,
"--functions", str(address))
if child is None:
raise FafError("eu-add2line failed")
line1, line2 = child.stdout.splitlines()
line2_parts = line2.split(":", 1)
line2_srcfile = line2_parts[0]
line2_srcline = int(line2_parts[1])
match = RE_ADDR2LINE_LINE1.match(line1)
if match is None:
raise FafError("Unexpected output from eu-addr2line: '{0}'"
.format(line1))
if match.group(3) is None:
funcname = match.group(1)
srcfile = line2_srcfile
srcline = line2_srcline
else:
funcname = match.group(6)
srcfile = match.group(4)
srcline = int(match.group(5))
result.append((match.group(1), line2_srcfile, line2_srcline))
result.append((funcname, srcfile, srcline))
return result
def get_base_address(binary_path):
"""
Runs eu-unstrip on a binary to get the address used
as base for calculating relative offsets.
"""
child = safe_popen("eu-unstrip", "-n", "-e", binary_path)
if child is None:
raise FafError("eu-unstrip failed")
match = RE_UNSTRIP_BASE_OFFSET.match(child.stdout)
if match is None:
raise FafError("Unexpected output from eu-unstrip: '{0}'"
.format(child.stdout))
return int(match.group(1), 16)
def demangle(mangled):
"""
Demangle C++ symbol name.
"""
child = safe_popen("c++filt", mangled)
if child is None:
return None
result = child.stdout.strip()
if result != mangled:
log.debug("Demangled: '{0}' ~> '{1}'".format(mangled, result))
return result
def usrmove(path):
"""
Adds or cuts off /usr prefix from the path.
http://fedoraproject.org/wiki/Features/UsrMove
"""
if path.startswith("/usr"):
return path[4:]
return "/usr{0}".format(path)
def ssource2funcname(db_ssource):
"""
Returns the symbol.name property of symbolsource of '??' if symbol is None
"""
if db_ssource.symbol is None:
return "??"
return db_ssource.symbol.name
def get_function_offset_map(files):
result = {}
for filename in files:
modulename = filename.rsplit("/", 1)[1].replace("-", "_")
if modulename.endswith(".ko.debug"):
modulename = str(modulename[:-9])
if not modulename in result:
result[modulename] = {}
child = safe_popen("eu-readelf", "-s", filename)
if child is None:
continue
for line in child.stdout.splitlines():
if not "FUNC" in line and not "NOTYPE" in line:
continue
spl = line.split()
try:
result[modulename][spl[7].lstrip("_")] = int(spl[1], 16)
except IndexError:
continue
return result
| gpl-3.0 |
sabi0/intellij-community | python/helpers/py2only/docutils/io.py | 104 | 17048 | # $Id: io.py 7596 2013-01-25 13:42:17Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
I/O classes provide a uniform API for low-level input and output. Subclasses
exist for a variety of input/output mechanisms.
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import codecs
from docutils import TransformSpec
from docutils._compat import b
from docutils.utils.error_reporting import locale_encoding, ErrorString, ErrorOutput
class InputError(IOError): pass
class OutputError(IOError): pass
def check_encoding(stream, encoding):
"""Test, whether the encoding of `stream` matches `encoding`.
Returns
:None: if `encoding` or `stream.encoding` are not a valid encoding
argument (e.g. ``None``) or `stream.encoding is missing.
:True: if the encoding argument resolves to the same value as `encoding`,
:False: if the encodings differ.
"""
try:
return codecs.lookup(stream.encoding) == codecs.lookup(encoding)
except (LookupError, AttributeError, TypeError):
return None
class Input(TransformSpec):
"""
Abstract base class for input wrappers.
"""
component_type = 'input'
default_source_path = None
def __init__(self, source=None, source_path=None, encoding=None,
error_handler='strict'):
self.encoding = encoding
"""Text encoding for the input source."""
self.error_handler = error_handler
"""Text decoding error handler."""
self.source = source
"""The source of input data."""
self.source_path = source_path
"""A text reference to the source."""
if not source_path:
self.source_path = self.default_source_path
self.successful_encoding = None
"""The encoding that successfully decoded the source data."""
def __repr__(self):
return '%s: source=%r, source_path=%r' % (self.__class__, self.source,
self.source_path)
def read(self):
raise NotImplementedError
def decode(self, data):
"""
Decode a string, `data`, heuristically.
Raise UnicodeError if unsuccessful.
The client application should call ``locale.setlocale`` at the
beginning of processing::
locale.setlocale(locale.LC_ALL, '')
"""
if self.encoding and self.encoding.lower() == 'unicode':
assert isinstance(data, unicode), (
'input encoding is "unicode" '
'but input is not a unicode object')
if isinstance(data, unicode):
# Accept unicode even if self.encoding != 'unicode'.
return data
if self.encoding:
# We believe the user/application when the encoding is
# explicitly given.
encodings = [self.encoding]
else:
data_encoding = self.determine_encoding_from_data(data)
if data_encoding:
# If the data declares its encoding (explicitly or via a BOM),
# we believe it.
encodings = [data_encoding]
else:
# Apply heuristics only if no encoding is explicitly given and
# no BOM found. Start with UTF-8, because that only matches
# data that *IS* UTF-8:
encodings = ['utf-8', 'latin-1']
if locale_encoding:
encodings.insert(1, locale_encoding)
for enc in encodings:
try:
decoded = unicode(data, enc, self.error_handler)
self.successful_encoding = enc
# Return decoded, removing BOMs.
return decoded.replace(u'\ufeff', u'')
except (UnicodeError, LookupError), err:
error = err # in Python 3, the <exception instance> is
# local to the except clause
raise UnicodeError(
'Unable to decode input data. Tried the following encodings: '
'%s.\n(%s)' % (', '.join([repr(enc) for enc in encodings]),
ErrorString(error)))
coding_slug = re.compile(b("coding[:=]\s*([-\w.]+)"))
"""Encoding declaration pattern."""
byte_order_marks = ((codecs.BOM_UTF8, 'utf-8'), # 'utf-8-sig' new in v2.5
(codecs.BOM_UTF16_BE, 'utf-16-be'),
(codecs.BOM_UTF16_LE, 'utf-16-le'),)
"""Sequence of (start_bytes, encoding) tuples for encoding detection.
The first bytes of input data are checked against the start_bytes strings.
A match indicates the given encoding."""
def determine_encoding_from_data(self, data):
"""
Try to determine the encoding of `data` by looking *in* `data`.
Check for a byte order mark (BOM) or an encoding declaration.
"""
# check for a byte order mark:
for start_bytes, encoding in self.byte_order_marks:
if data.startswith(start_bytes):
return encoding
# check for an encoding declaration pattern in first 2 lines of file:
for line in data.splitlines()[:2]:
match = self.coding_slug.search(line)
if match:
return match.group(1).decode('ascii')
return None
class Output(TransformSpec):
"""
Abstract base class for output wrappers.
"""
component_type = 'output'
default_destination_path = None
def __init__(self, destination=None, destination_path=None,
encoding=None, error_handler='strict'):
self.encoding = encoding
"""Text encoding for the output destination."""
self.error_handler = error_handler or 'strict'
"""Text encoding error handler."""
self.destination = destination
"""The destination for output data."""
self.destination_path = destination_path
"""A text reference to the destination."""
if not destination_path:
self.destination_path = self.default_destination_path
def __repr__(self):
return ('%s: destination=%r, destination_path=%r'
% (self.__class__, self.destination, self.destination_path))
def write(self, data):
"""`data` is a Unicode string, to be encoded by `self.encode`."""
raise NotImplementedError
def encode(self, data):
if self.encoding and self.encoding.lower() == 'unicode':
assert isinstance(data, unicode), (
'the encoding given is "unicode" but the output is not '
'a Unicode string')
return data
if not isinstance(data, unicode):
# Non-unicode (e.g. bytes) output.
return data
else:
return data.encode(self.encoding, self.error_handler)
class FileInput(Input):
"""
Input for single, simple file-like objects.
"""
def __init__(self, source=None, source_path=None,
encoding=None, error_handler='strict',
autoclose=True, handle_io_errors=None, mode='rU'):
"""
:Parameters:
- `source`: either a file-like object (which is read directly), or
`None` (which implies `sys.stdin` if no `source_path` given).
- `source_path`: a path to a file, which is opened and then read.
- `encoding`: the expected text encoding of the input file.
- `error_handler`: the encoding error handler to use.
- `autoclose`: close automatically after read (except when
`sys.stdin` is the source).
- `handle_io_errors`: ignored, deprecated, will be removed.
- `mode`: how the file is to be opened (see standard function
`open`). The default 'rU' provides universal newline support
for text files.
"""
Input.__init__(self, source, source_path, encoding, error_handler)
self.autoclose = autoclose
self._stderr = ErrorOutput()
if source is None:
if source_path:
# Specify encoding in Python 3
if sys.version_info >= (3,0):
kwargs = {'encoding': self.encoding,
'errors': self.error_handler}
else:
kwargs = {}
try:
self.source = open(source_path, mode, **kwargs)
except IOError, error:
raise InputError(error.errno, error.strerror, source_path)
else:
self.source = sys.stdin
elif (sys.version_info >= (3,0) and
check_encoding(self.source, self.encoding) is False):
# TODO: re-open, warn or raise error?
raise UnicodeError('Encoding clash: encoding given is "%s" '
'but source is opened with encoding "%s".' %
(self.encoding, self.source.encoding))
if not source_path:
try:
self.source_path = self.source.name
except AttributeError:
pass
def read(self):
"""
Read and decode a single file and return the data (Unicode string).
"""
try: # In Python < 2.5, try...except has to be nested in try...finally.
try:
if self.source is sys.stdin and sys.version_info >= (3,0):
# read as binary data to circumvent auto-decoding
data = self.source.buffer.read()
# normalize newlines
data = b('\n').join(data.splitlines()) + b('\n')
else:
data = self.source.read()
except (UnicodeError, LookupError), err: # (in Py3k read() decodes)
if not self.encoding and self.source_path:
# re-read in binary mode and decode with heuristics
b_source = open(self.source_path, 'rb')
data = b_source.read()
b_source.close()
# normalize newlines
data = b('\n').join(data.splitlines()) + b('\n')
else:
raise
finally:
if self.autoclose:
self.close()
return self.decode(data)
def readlines(self):
"""
Return lines of a single file as list of Unicode strings.
"""
return self.read().splitlines(True)
def close(self):
if self.source is not sys.stdin:
self.source.close()
class FileOutput(Output):
"""
Output for single, simple file-like objects.
"""
mode = 'w'
"""The mode argument for `open()`."""
# 'wb' for binary (e.g. OpenOffice) files (see also `BinaryFileOutput`).
# (Do not use binary mode ('wb') for text files, as this prevents the
# conversion of newlines to the system specific default.)
def __init__(self, destination=None, destination_path=None,
encoding=None, error_handler='strict', autoclose=True,
handle_io_errors=None, mode=None):
"""
:Parameters:
- `destination`: either a file-like object (which is written
directly) or `None` (which implies `sys.stdout` if no
`destination_path` given).
- `destination_path`: a path to a file, which is opened and then
written.
- `encoding`: the text encoding of the output file.
- `error_handler`: the encoding error handler to use.
- `autoclose`: close automatically after write (except when
`sys.stdout` or `sys.stderr` is the destination).
- `handle_io_errors`: ignored, deprecated, will be removed.
- `mode`: how the file is to be opened (see standard function
`open`). The default is 'w', providing universal newline
support for text files.
"""
Output.__init__(self, destination, destination_path,
encoding, error_handler)
self.opened = True
self.autoclose = autoclose
if mode is not None:
self.mode = mode
self._stderr = ErrorOutput()
if destination is None:
if destination_path:
self.opened = False
else:
self.destination = sys.stdout
elif (# destination is file-type object -> check mode:
mode and hasattr(self.destination, 'mode')
and mode != self.destination.mode):
print >>self._stderr, ('Warning: Destination mode "%s" '
'differs from specified mode "%s"' %
(self.destination.mode, mode))
if not destination_path:
try:
self.destination_path = self.destination.name
except AttributeError:
pass
def open(self):
# Specify encoding in Python 3.
if sys.version_info >= (3,0) and 'b' not in self.mode:
kwargs = {'encoding': self.encoding,
'errors': self.error_handler}
else:
kwargs = {}
try:
self.destination = open(self.destination_path, self.mode, **kwargs)
except IOError, error:
raise OutputError(error.errno, error.strerror,
self.destination_path)
self.opened = True
def write(self, data):
"""Encode `data`, write it to a single file, and return it.
With Python 3 or binary output mode, `data` is returned unchanged,
except when specified encoding and output encoding differ.
"""
if not self.opened:
self.open()
if ('b' not in self.mode and sys.version_info < (3,0)
or check_encoding(self.destination, self.encoding) is False
):
if sys.version_info >= (3,0) and os.linesep != '\n':
data = data.replace('\n', os.linesep) # fix endings
data = self.encode(data)
try: # In Python < 2.5, try...except has to be nested in try...finally.
try:
self.destination.write(data)
except TypeError, e:
if sys.version_info >= (3,0) and isinstance(data, bytes):
try:
self.destination.buffer.write(data)
except AttributeError:
if check_encoding(self.destination,
self.encoding) is False:
raise ValueError('Encoding of %s (%s) differs \n'
' from specified encoding (%s)' %
(self.destination_path or 'destination',
self.destination.encoding, self.encoding))
else:
raise e
except (UnicodeError, LookupError), err:
raise UnicodeError(
'Unable to encode output data. output-encoding is: '
'%s.\n(%s)' % (self.encoding, ErrorString(err)))
finally:
if self.autoclose:
self.close()
return data
def close(self):
if self.destination not in (sys.stdout, sys.stderr):
self.destination.close()
self.opened = False
class BinaryFileOutput(FileOutput):
"""
A version of docutils.io.FileOutput which writes to a binary file.
"""
# Used by core.publish_cmdline_to_binary() which in turn is used by
# rst2odt (OpenOffice writer)
mode = 'wb'
class StringInput(Input):
"""
Direct string input.
"""
default_source_path = '<string>'
def read(self):
"""Decode and return the source string."""
return self.decode(self.source)
class StringOutput(Output):
"""
Direct string output.
"""
default_destination_path = '<string>'
def write(self, data):
"""Encode `data`, store it in `self.destination`, and return it."""
self.destination = self.encode(data)
return self.destination
class NullInput(Input):
"""
Degenerate input: read nothing.
"""
default_source_path = 'null input'
def read(self):
"""Return a null string."""
return u''
class NullOutput(Output):
"""
Degenerate output: write nothing.
"""
default_destination_path = 'null output'
def write(self, data):
"""Do nothing ([don't even] send data to the bit bucket)."""
pass
class DocTreeInput(Input):
"""
Adapter for document tree input.
The document tree must be passed in the ``source`` parameter.
"""
default_source_path = 'doctree input'
def read(self):
"""Return the document tree."""
return self.source
| apache-2.0 |
titasakgm/brc-stock | doc/_themes/flask_theme_support.py | 2228 | 4875 | # flasky extensions. flasky pygments style based on tango style
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class FlaskyStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
| agpl-3.0 |
MacHu-GWU/angora-project | angora/dataIO/pk.py | 1 | 16257 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module description
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is re-pack of some pickle utility functions.
- :func:`load_pk`: Load Python Object from Pickle file.
- :func:`dump_pk`: Dump Picklable Python Object to file.
- :func:`safe_dump_pk`: An atomic write version of dump_pk, silently overwrite
existing file.
- :func:`obj2bytestr`: Convert arbitrary pickable Python Object to bytestr.
- :func:`bytestr2obj`: Parse Python object from bytestr.
- :func:`obj2str`: convert arbitrary object to database friendly string, using
base64encode algorithm.
- :func:`str2obj`: Parse object from base64 encoded string.
Highlight
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- :func:`load_pk`, :func:`dump_pk`, :func:`safe_dump_pk` support gzip compress,
size is **10 - 20 times** smaller in average.
Compatibility
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Python2: Yes
- Python3: Yes
Prerequisites
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- None
Class, method, function, exception
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function, unicode_literals
import os
import sys
import gzip
import shutil
import time
import base64
import pickle
is_py2 = (sys.version_info[0] == 2)
if is_py2:
pk_protocol = 2
else:
pk_protocol = 3
try:
from ..gadget.messenger import Messenger
except:
from angora.gadget.messenger import Messenger
def load_pk(abspath, compress=False, enable_verbose=True):
"""Load Python Object from Pickle file.
:param abspath: File path. Use absolute path as much as you can. File
extension has to be ``.pickle`` or ``.gz``. (for compressed Pickle)
:type abspath: string
:param compress: (default False) Load from a gzip compressed Pickle file.
Check :func:`dump_pk()<dump_pk>` function for more information.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.pk import load_pk
>>> load_pk("test.pickle") # if you have a Pickle file
Loading from test.pickle...
Complete! Elapse 0.000272 sec.
{'a': 1, 'b': 2}
**中文文档**
从Pickle文件中读取数据
参数列表
:param abspath: 文件路径, 扩展名需为 ``.pickle`` 或 ``.gz``
:type abspath: ``字符串``
:param compress: (默认 False) 是否从一个gzip压缩过的Pickle文件中读取数据。 请
参考 :func:`dump_pk()<dump_pk>` 获得更多信息.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值``
"""
abspath = str(abspath) # try stringlize
msg = Messenger(enable_verbose=enable_verbose)
if compress: # check extension name
if os.path.splitext(abspath)[1] != ".gz":
raise Exception("compressed pickle has to use extension '.gz'!")
else:
if os.path.splitext(abspath)[1] != ".pickle":
raise Exception("file extension are not '.pickle'!")
msg.show("\nLoading from %s..." % abspath)
st = time.clock()
if compress:
with gzip.open(abspath, "rb") as f:
obj = pickle.loads(f.read())
else:
with open(abspath, "rb") as f:
obj = pickle.load(f)
if enable_verbose:
msg.show(" Complete! Elapse %.6f sec" % (time.clock() - st))
return obj
def dump_pk(obj, abspath,
pk_protocol=pk_protocol, replace=False, compress=False,
enable_verbose=True):
"""Dump Picklable Python Object to file.
Provides multiple choice to customize the behavior.
:param obj: Picklable Python Object.
:param abspath: ``save as`` path, file extension has to be ``.pickle`` or
``.gz`` (for compressed Pickle).
:type abspath: string
:param pk_protocol: (default your python version) use 2, to make a
py2.x/3.x compatible pickle file. But 3 is faster.
:type pk_protocol: int
:param replace: (default False) If ``True``, when you dump Pickle to a
existing path, it silently overwrite it. If False, an exception will be
raised. Default False setting is to prevent overwrite file by mistake.
:type replace: boolean
:param compress: (default False) If ``True``, use GNU program gzip to
compress the Pickle file. Disk usage can be greatly reduced. But you
have to use :func:`load_pk(abspath, compress=True)<load_pk>` in loading.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.pk import dump_pk
>>> pk = {"a": 1, "b": 2}
>>> dump_pk(pk, "test.pickle", replace=True)
Dumping to test.pickle...
Complete! Elapse 0.001763 sec
**中文文档**
将Python对象以Pickle的方式序列化, 保存至本地文件。(有些自定义类无法被序列化)
参数列表
:param obj: 可Pickle化的Python对象
:param abspath: 写入文件的路径。扩展名必须为 ``.pickle`` 或 ``.gz``, 其中gz用于被压
缩的Pickle
:type abspath: ``字符串``
:param pk_protocol: (默认 等于你Python的大版本号) 使用2可以使得保存的文件能被
py2.x/3.x都能读取。但是协议3的速度更快, 体积更小, 性能更高。
:type pk_protocol: ``整数``
:param replace: (默认 False) 当为``True``时, 如果写入路径已经存在, 则会自动覆盖
原文件。而为``False``时, 则会抛出异常。防止误操作覆盖源文件。
:type replace: ``布尔值``
:param compress: (默认 False) 当为``True``时, 使用开源压缩标准gzip压缩Pickle文件。
通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数
:func:`load_pk(abspath, compress=True)<load_pk>`.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值``
"""
abspath = str(abspath) # try stringlize
msg = Messenger(enable_verbose=enable_verbose)
if compress: # check extension name
root, ext = os.path.splitext(abspath)
if ext != ".gz":
if ext != ".tmp":
raise Exception(
"compressed pickle has to use extension '.gz'!")
else:
_, ext = os.path.splitext(root)
if ext != ".gz":
raise Exception(
"compressed pickle has to use extension '.gz'!")
else:
root, ext = os.path.splitext(abspath)
if ext != ".pickle":
if ext != ".tmp":
raise Exception("file extension are not '.pickle'!")
else:
_, ext = os.path.splitext(root)
if ext != ".pickle":
raise Exception("file extension are not '.pickle'!")
msg.show("\nDumping to %s..." % abspath)
st = time.clock()
if os.path.exists(abspath): # if exists, check replace option
if replace: # replace existing file
if compress:
with gzip.open(abspath, "wb") as f:
f.write(pickle.dumps(obj, protocol=pk_protocol))
else:
with open(abspath, "wb") as f:
pickle.dump(obj, f, protocol=pk_protocol)
else: # stop, print error message
raise Exception("\tCANNOT WRITE to %s, "
"it's already exists" % abspath)
else: # if not exists, just write to it
if compress:
with gzip.open(abspath, "wb") as f:
f.write(pickle.dumps(obj, protocol=pk_protocol))
else:
with open(abspath, "wb") as f:
pickle.dump(obj, f, protocol=pk_protocol)
msg.show(" Complete! Elapse %.6f sec" % (time.clock() - st))
def safe_dump_pk(obj, abspath, pk_protocol=pk_protocol, compress=False,
enable_verbose=True):
"""A stable version of dump_pk, silently overwrite existing file.
When your program been interrupted, you lose nothing. Typically if your
program is interrupted by any reason, it only leaves a incomplete file.
If you use replace=True, then you also lose your old file.
So a bettr way is to:
1. dump pickle to a temp file.
2. when it's done, rename it to #abspath, overwrite the old one.
This way guarantee atomic write.
:param obj: Picklable Python Object.
:param abspath: ``save as`` path, file extension has to be ``.pickle`` or
``.gz`` (for compressed Pickle).
:type abspath: string
:param pk_protocol: (default your python version) use 2, to make a
py2.x/3.x compatible pickle file. But 3 is faster.
:type pk_protocol: int
:param compress: (default False) If ``True``, use GNU program gzip to
compress the Pickle file. Disk usage can be greatly reduced. But you
have to use :func:`load_pk(abspath, compress=True)<load_pk>` in loading.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.pk import safe_dump_pk
>>> pk = {"a": 1, "b": 2}
>>> safe_dump_pk(pk, "test.pickle")
Dumping to test.pickle...
Complete! Elapse 0.001763 sec
**中文文档**
在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果你使用了覆盖式
写入, 则你同时也丢失了原文件。所以为了保证写操作的原子性(要么全部完成, 要么全部
都不完成), 更好的方法是: 首先将文件写入一个临时文件中, 完成后再讲文件重命名,
覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个未完成的临时文件而已, 不会
影响原文件。
参数列表
:param obj: 可Pickle化的Python对象
:param abspath: 写入文件的路径。扩展名必须为 ``.pickle`` 或 ``.gz`` , 其中gz用于被压
缩的Pickle
:type abspath: ``字符串``
:param pk_protocol: (默认 等于你Python的大版本号) 使用2可以使得保存的文件能被
py2.x/3.x都能读取。但是协议3的速度更快, 体积更小, 性能更高。
:type pk_protocol: ``整数``
:param compress: (默认 False) 当为 ``True`` 时, 使用开源压缩标准gzip压缩Pickle文件。
通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数
:func:`load_pk(abspath, compress=True)<load_pk>`.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值``
"""
abspath = str(abspath) # try stringlize
temp_abspath = "%s.tmp" % abspath
dump_pk(obj, temp_abspath, pk_protocol=pk_protocol,
replace=True, compress=compress, enable_verbose=enable_verbose)
shutil.move(temp_abspath, abspath)
def obj2bytestr(obj, pk_protocol=pk_protocol):
"""Convert arbitrary pickable Python Object to bytestr.
Usage::
>>> from weatherlab.lib.dataIO.pk import obj2bytestr
>>> data = {"a": 1, "b": 2}
>>> obj2bytestr(data, pk_protocol=2)
b'\x80\x02}q\x00(X\x01\x00\x00\x00aq\x01K\x01X\x01\x00\x00\x00bq\x02K\x02u.'
**中文文档**
将可Pickle化的Python对象转化为bytestr
"""
return pickle.dumps(obj, protocol=pk_protocol)
def bytestr2obj(bytestr):
"""Parse Python object from bytestr.
Usage::
>>> from weatherlab.lib.dataIO.pk import bytestr2obj
>>> b = b'\x80\x02}q\x00(X\x01\x00\x00\x00aq\x01K\x01X\x01\x00\x00\x00bq\x02K\x02u.'
>>> bytestr2obj(b)
{"a": 1, "b": 2}
**中文文档**
从bytestr中恢复Python对象
"""
return pickle.loads(bytestr)
def obj2str(obj, pk_protocol=pk_protocol):
"""Convert arbitrary object to utf-8 string, using
base64encode algorithm.
Usage::
>>> from weatherlab.lib.dataIO.pk import obj2str
>>> data = {"a": 1, "b": 2}
>>> obj2str(data, pk_protocol=2)
'gAJ9cQAoWAEAAABhcQFLAVgBAAAAYnECSwJ1Lg=='
**中文文档**
将可Pickle化的Python对象转化为utf-8编码的"字符串"
"""
return base64.b64encode(pickle.dumps(
obj, protocol=pk_protocol)).decode("utf-8")
def str2obj(textstr):
"""Parse object from base64 encoded string.
Usage::
>>> from weatherlab.lib.dataIO.pk import str2obj
>>> str2obj("gAJ9cQAoWAEAAABhcQFLAVgBAAAAYnECSwJ1Lg==")
{"a": 1, "b": 2}
**中文文档**
从"字符串"中恢复Python对象
"""
return pickle.loads(base64.b64decode(textstr.encode("utf-8")))
#--- Unittest ---
if __name__ == "__main__":
import unittest
import sqlite3
class PKUnittest(unittest.TestCase):
def test_write_and_read(self):
data = {1: [1, 2], 2: ["是", "否"]}
safe_dump_pk(data, "data.pickle")
data = load_pk("data.pickle") # should be a
self.assertEqual(data[1][0], 1)
self.assertEqual(data[2][0], "是")
def test_handle_object(self):
python_object = {"a": 1}
self.assertEqual(str2obj(obj2str(python_object)), python_object)
def test_obj2bytestr(self):
"""pickle.dumps的结果是bytes, 而在python2中的sqlite不支持bytes直接
插入数据库,必须使用base64.encode将bytes编码成字符串之后才能存入数据
库。而在python3中, 可以直接将pickle.dumps的bytestr存入数据库, 这样
就省去了base64编码的开销。
注: 在python2中也有通过设定 connect.text_factory 的方法解决该问题,
具体内容请google
This test will not pass in Python2, because sqlite python2 API
doens't support bytes.
"""
conn = sqlite3.connect(":memory:")
c = conn.cursor()
c.execute("CREATE TABLE test (dictionary BLOB) ") # BLOB is byte
c.execute("INSERT INTO test VALUES (?)",
(obj2bytestr({1: "a", 2: "你好"}),))
# see what stored in database
print(c.execute("select * from test").fetchone())
# recovery object from byte str
self.assertDictEqual(
bytestr2obj(c.execute("select * from test").fetchone()[0]),
{1: "a", 2: "你好"},
)
def test_obj2str(self):
"""如果将任意python对象dump成pickle bytestr, 再通过base64 encode转化
成ascii字符串, 就可以任意地存入数据库了。
"""
conn = sqlite3.connect(":memory:")
c = conn.cursor()
c.execute("CREATE TABLE test (name TEXT) ")
c.execute("INSERT INTO test VALUES (?)",
(obj2str({1: "a", 2: "你好"}),))
# see what stored in database
print(c.execute("select * from test").fetchone())
# recovery object from text str
self.assertDictEqual(
str2obj(c.execute("select * from test").fetchone()[0]),
{1: "a", 2: "你好"},
)
def test_compress(self):
data = {"a": list(range(32)),
"b": list(range(32)), }
safe_dump_pk(data, "data.gz", compress=True)
print(load_pk("data.gz", compress=True))
def tearDown(self):
for path in ["data.pickle", "data.gz"]:
try:
os.remove(path)
except:
pass
unittest.main() | mit |
marcelocure/django | tests/shortcuts/urls.py | 252 | 1416 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^render_to_response/$', views.render_to_response_view),
url(r'^render_to_response/multiple_templates/$', views.render_to_response_view_with_multiple_templates),
url(r'^render_to_response/request_context/$', views.render_to_response_view_with_request_context),
url(r'^render_to_response/content_type/$', views.render_to_response_view_with_content_type),
url(r'^render_to_response/dirs/$', views.render_to_response_view_with_dirs),
url(r'^render_to_response/status/$', views.render_to_response_view_with_status),
url(r'^render_to_response/using/$', views.render_to_response_view_with_using),
url(r'^render_to_response/context_instance_misuse/$', views.render_to_response_with_context_instance_misuse),
url(r'^render/$', views.render_view),
url(r'^render/multiple_templates/$', views.render_view_with_multiple_templates),
url(r'^render/base_context/$', views.render_view_with_base_context),
url(r'^render/content_type/$', views.render_view_with_content_type),
url(r'^render/dirs/$', views.render_with_dirs),
url(r'^render/status/$', views.render_view_with_status),
url(r'^render/using/$', views.render_view_with_using),
url(r'^render/current_app/$', views.render_view_with_current_app),
url(r'^render/current_app_conflict/$', views.render_view_with_current_app_conflict),
]
| bsd-3-clause |
bobcyw/py2app | examples/PyObjC/ICSharingWatcher/TableModelAppDelegate.py | 3 | 1155 | import os
from Cocoa import *
import objc
import leases
FILENAME = '/var/db/dhcpd_leases'
def getLeases(fn):
if os.path.exists(fn):
lines = file(fn, 'U')
else:
lines = leases.EXAMPLE.splitlines()
return list(leases.leases(lines))
class TableModelAppDelegate (NSObject):
mainWindow = objc.IBOutlet()
def awakeFromNib(self):
self.timer = NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_(1.0, self, 'pollLeases:', {}, True)
def pollLeases_(self, timer):
if not os.path.exists(FILENAME):
return
d = timer.userInfo()
newtime = os.stat(FILENAME).st_mtime
oldtime = d.get('st_mtime', 0)
if newtime > oldtime:
d['st_mtime'] = newtime
self.setLeases_(getLeases(FILENAME))
def leases(self):
if not hasattr(self, '_cachedleases'):
self._cachedleases = getLeases(FILENAME)
return self._cachedleases
def setLeases_(self, leases):
self._cachedleases = leases
def windowWillClose_(self, sender):
if sender is self.mainWindow:
NSApp().terminate()
| mit |
rew4332/tensorflow | tensorflow/contrib/tensor_forest/python/kernel_tests/best_splits_op_test.py | 28 | 4361 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.tensor_forest.ops.best_splits_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow # pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.python.ops import training_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class BestSplitsClassificationTests(test_util.TensorFlowTestCase):
def setUp(self):
self.finished = [3, 5]
self.node_map = [-1, -1, -1, 0, -1, 3, -1, -1, -1]
self.candidate_counts = [[[153., 50., 60., 40., 3.],
[200., 70., 30., 70., 30.]],
[[0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.]],
[[40., 10., 10., 10., 10.],
[30., 10., 5., 5., 10.]]]
self.total_counts = [[400., 100., 100., 100., 100.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[400., 100., 100., 100., 100.]]
self.squares = []
self.ops = training_ops.Load()
def testSimple(self):
with self.test_session():
split_indices = self.ops.best_splits(
self.finished, self.node_map, self.candidate_counts, self.squares,
self.total_counts, self.squares, regression=False)
self.assertAllEqual([0, 1], split_indices.eval())
def testNoFinished(self):
with self.test_session():
split_indices = self.ops.best_splits(
[], self.node_map, self.candidate_counts, self.squares,
self.total_counts, self.squares, regression=False)
self.assertAllEqual([], split_indices.eval())
def testBadInput(self):
del self.total_counts[1]
with self.test_session():
with self.assertRaisesOpError(
'Number of accumulators should be the same in split_sums '
'and accumulator_sums.'):
self.ops.best_splits(
self.finished, self.node_map, self.candidate_counts, self.squares,
self.total_counts, self.squares, regression=False).eval()
class BestSplitsRegressionTests(test_util.TensorFlowTestCase):
def setUp(self):
self.finished = [3, 5]
self.node_map = [-1, -1, -1, 0, -1, 3, -1, -1, -1]
self.candidate_sums = [[[5., 8., 8., 8.], [5., 10., 10., 10.]],
[[0., 0., 0., 0.], [0., 0., 0., 0.]],
[[0., 0., 0., 0.], [0., 0., 0., 0.]],
[[10., 10., 20., 10.], [10., 5., 5., 5.]]]
self.candidate_squares = [[[5., 50., 50., 50.], [5., 50., 50., 50.]],
[[0., 0., 0., 0.], [0., 0., 0., 0.]],
[[0., 0., 0., 0.], [0., 0., 0., 0.]],
[[10., 40., 50., 60.], [10., 40., 40., 40.]]]
self.total_sums = [[15., 10., 10., 10.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[20., 20., 20., 20.]]
self.total_squares = [[15., 50., 50., 50.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[20., 60., 60., 60.]]
self.ops = training_ops.Load()
def testSimple(self):
with self.test_session():
split_indices = self.ops.best_splits(
self.finished, self.node_map, self.candidate_sums,
self.candidate_squares, self.total_sums, self.total_squares,
regression=True)
self.assertAllEqual([1, 0], split_indices.eval())
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
mindnervestech/mnrp | addons/hr_payroll/__init__.py | 433 | 1137 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_payroll
import report
import wizard
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
saurabh6790/medapp | hr/doctype/leave_control_panel/leave_control_panel.py | 30 | 2230 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import cint, cstr, flt, nowdate
from webnotes.model.doc import Document
from webnotes.model.code import get_obj
from webnotes import msgprint
class DocType:
def __init__(self, doc, doclist):
self.doc = doc
self.doclist = doclist
# Get Employees
# **********************************************************************
def get_employees(self):
lst1 = [[self.doc.employee_type,"employment_type"],[self.doc.branch,"branch"],[self.doc.designation,"designation"],[self.doc.department, "department"],[self.doc.grade,"grade"]]
condition = "where "
flag = 0
for l in lst1:
if(l[0]):
if flag == 0:
condition += l[1] + "= '" + l[0] +"'"
else:
condition += " and " + l[1]+ "= '" +l[0] +"'"
flag = 1
emp_query = "select name from `tabEmployee` "
if flag == 1:
emp_query += condition
e = webnotes.conn.sql(emp_query)
return e
# ----------------
# validate values
# ----------------
def validate_values(self):
val_dict = {self.doc.fiscal_year:'Fiscal Year', self.doc.leave_type:'Leave Type', self.doc.no_of_days:'New Leaves Allocated'}
for d in val_dict:
if not d:
msgprint("Please enter : "+val_dict[d])
raise Exception
# Allocation
# **********************************************************************
def allocate_leave(self):
self.validate_values()
for d in self.get_employees():
la = Document('Leave Allocation')
la.employee = cstr(d[0])
la.employee_name = webnotes.conn.get_value('Employee',cstr(d[0]),'employee_name')
la.leave_type = self.doc.leave_type
la.fiscal_year = self.doc.fiscal_year
la.posting_date = nowdate()
la.carry_forward = cint(self.doc.carry_forward)
la.new_leaves_allocated = flt(self.doc.no_of_days)
la_obj = get_obj(doc=la)
la_obj.doc.docstatus = 1
la_obj.validate()
la_obj.on_update()
la_obj.doc.save(1)
msgprint("Leaves Allocated Successfully")
| agpl-3.0 |
un4get/xbmctorent | resources/site-packages/xbmctorrent/tvdb.py | 5 | 7208 | BASE_URL = "http://www.thetvdb.com"
HEADERS = {
"Referer": BASE_URL,
}
API_URL = "%s/api" % BASE_URL
API_KEY = "1D62F2F90030C444"
LANG = "en"
def dom2dict(node):
ret = {}
for child in node:
if len(child):
ret.setdefault(child.tag.lower(), []).append(dom2dict(child))
else:
ret[child.tag.lower()] = child.text
return ret
def split_keys(meta, *keys):
for key in keys:
if meta.get(key):
meta[key] = filter(None, meta[key].split("|"))
return meta
def image_url(fragment):
return "%s/banners/%s" % (BASE_URL, fragment)
def banner(show_id):
from urlparse import urljoin
return urljoin(BASE_URL, "banners/graphical/%s" % show_id)
def season_url(show_id, season):
return "%s/banners/seasons/%s-%d-2.jpg" % (BASE_URL, show_id, season)
def update_image_urls(meta):
if isinstance(meta, dict):
for k, v in meta.items():
if isinstance(v, list):
map(update_image_urls, v)
elif isinstance(v, dict):
update_image_urls(v)
elif k in ["banner", "fanart", "poster", "filename", "bannerpath", "vignettepath", "thumbnailpath"] and isinstance(v, basestring):
meta[k] = image_url(v)
return meta
def show_url(show_id):
return "%s/%s/series/%s/%s.xml" % (API_URL, API_KEY, show_id, LANG)
def show_base_url(show_id):
return "%s/%s/series/%s" % (API_URL, API_KEY, show_id)
def show_banners_url(show_id):
return "%s/banners.xml" % (API_URL, API_KEY, show_id)
def get(show_id):
from xbmctorrent.caching import shelf
with shelf("com.thetvdb.show.%s" % show_id) as show:
if not show:
import xml.etree.ElementTree as ET
from xbmctorrent.utils import url_get
dom = ET.fromstring(url_get(show_url(show_id), headers=HEADERS, with_immunicity=False))
if not len(dom):
return
meta = dom2dict(dom[0])
meta = split_keys(meta, "actors", "genre", "writer")
update_image_urls(meta)
show.update(meta)
return dict(show)
def search(name, complete=False):
from xbmctorrent.caching import shelf
import hashlib
search_hash = hashlib.sha1(name).hexdigest()
with shelf("com.thetvdb.search.%s" % search_hash) as show:
if not show:
import re
import xml.etree.ElementTree as ET
from xbmctorrent.utils import url_get
dom = ET.fromstring(url_get("%s/api/GetSeries.php" % BASE_URL, params={
"seriesname": name,
}, headers=HEADERS, with_immunicity=False))
if not len(dom):
return
meta = dom2dict(dom[0])
if not complete:
return update_image_urls(meta)
show.update(get(meta["id"]))
return show
def get_banners(show_id):
import xml.etree.ElementTree as ET
from xbmctorrent.utils import url_get
r = url_get("%s/banners.xml" % show_base_url(show_id), headers=HEADERS, with_immunicity=False)
dom = ET.fromstring(r)
if not len(dom):
return
return update_image_urls(dom2dict(dom))["banner"]
def get_all_meta(show_id):
import xml.etree.ElementTree as ET
from concurrent import futures
from xbmctorrent.utils import url_get, joining
def _get_all_meta():
r = url_get("%s/all/%s.xml" % (show_base_url(show_id), LANG), headers=HEADERS, with_immunicity=False)
dom = ET.fromstring(r)
if not len(dom):
return
return update_image_urls(dom2dict(dom))
with futures.ThreadPoolExecutor(max_workers=2) as pool:
meta = pool.submit(_get_all_meta)
banners = pool.submit(get_banners, show_id)
meta = meta.result()
meta["series"][0]["episodes"] = meta["episode"]
meta = meta["series"][0]
meta["banners"] = banners.result() or []
return meta
def get_list_item(meta):
m = lambda x: meta.get(x) or ""
m_list = lambda x: meta.get(x) and ", ".join(meta[x]) or ""
return {
"label": meta["seriesname"],
"icon": m("poster"),
"thumbnail": m("poster"),
"info": {
"count": meta["id"],
"title": meta["seriesname"],
"genre": m_list("genre"),
"plot": m("overview"),
"plot_outline": m("overview"),
"tagline": m("overview"),
"rating": m("rating"),
"code": m("imdb_id"),
"mpaa": m("contentrating"),
"cast": m("actors") or [],
"castandrole": m("actors") or [],
"tvshowtitle": meta["seriesname"],
"studio": m("network"),
"status": m("status"),
"premiered": m("firstaired"),
"duration": m("runtime"),
"picturepath": m("poster"),
"year": meta.get("firstaired") and meta["firstaired"].split("-")[0] or "",
"votes": "%s votes" % meta["ratingcount"],
},
"properties": {
"fanart_image": m("fanart"),
},
}
def get_season_list_item(meta, season):
m = lambda x: meta.get(x) or ""
m_list = lambda x: meta.get(x) and ", ".join(meta[x]) or ""
season_id = filter(lambda ep: int(ep["seasonnumber"]) == season, meta["episodes"])[0]["seasonid"]
item = {
"label": "Season %d" % season,
"info": {
"count": season_id,
"tvshowtitle": meta["seriesname"],
"season": season,
},
"properties": {
"fanart_image": m("fanart"),
},
}
season_banners = [banner for banner in meta["banners"] if banner["bannertype"] == "season" and int(banner["season"]) == season]
if season_banners:
item["icon"] = item["thumbnail"] = season_banners[0]["bannerpath"]
return item
def build_episode_list_items(show_meta, season):
episodes = [episode for episode in show_meta["episodes"] if int(episode["seasonnumber"]) == season]
episodes = sorted(episodes, key=lambda ep: int(ep["episodenumber"]))
for episode in episodes:
m = lambda x: episode.get(x) or ""
yield {
"label": m("episodename"),
"icon": m("filename"),
"thumbnail": m("filename"),
"info": {
"count": m("id"),
"season": season,
"episode": m("episodenumber"),
"title": m("episodename"),
"originaltitle": m("episodename"),
"plot": m("overview"),
"plot_outline": m("overview"),
"tagline": m("overview"),
"rating": float(m("rating") or 0),
"code": m("imdb_id"),
"premiered": m("firstaired"),
"cast": episode.get("gueststars") and filter(None, episode["gueststars"].split("|")) or [],
"tvshowtitle": show_meta.get("seriesname") or "",
"writer": episode.get("writer") and ", ".join(filter(None, episode["writer"].split("|"))) or "",
},
"properties": {
"fanart_image": show_meta.get("fanart") or "",
},
}
| gpl-3.0 |
ryanlovett/datahub | scripts/rsync-active-users.py | 2 | 5300 | """
rsync home directories of active users only.
When rsyncing home directories of users across disks, it is extremely
helpful to only do so for users who have been recently active. The hub
API allows us to see who those users are, and we can rsync just their
home directories.
An environment variable 'JUPYTERHUB_ADMIN' must be set with an admin token,
obtainable from {hub_url}/hub/token by an admin user.
"""
import os
import requests
from dateutil.parser import parse
from datetime import datetime, timedelta, timezone
import argparse
import subprocess
# Copied from https://github.com/minrk/escapism/blob/d1d406c69b9ab0b14aa562d98a9e198adf9c047a/escapism.py
# this is the library JuptyerHub uses to escape usernames into a form that works for filesystem paths
import warnings
import string
import sys
SAFE = set(string.ascii_letters + string.digits)
ESCAPE_CHAR = '_'
def _escape_char(c, escape_char=ESCAPE_CHAR):
"""Escape a single character"""
buf = []
for byte in c.encode('utf8'):
buf.append(escape_char)
buf.append('%X' % byte)
return ''.join(buf)
def escape(to_escape, safe=SAFE, escape_char=ESCAPE_CHAR, allow_collisions=False):
"""Escape a string so that it only contains characters in a safe set.
Characters outside the safe list will be escaped with _%x_,
where %x is the hex value of the character.
If `allow_collisions` is True, occurrences of `escape_char`
in the input will not be escaped.
In this case, `unescape` cannot be used to reverse the transform
because occurrences of the escape char in the resulting string are ambiguous.
Only use this mode when:
1. collisions cannot occur or do not matter, and
2. unescape will never be called.
.. versionadded: 1.0
allow_collisions argument.
Prior to 1.0, behavior was the same as allow_collisions=False (default).
"""
if isinstance(to_escape, bytes):
# always work on text
to_escape = to_escape.decode('utf8')
if not isinstance(safe, set):
safe = set(safe)
if allow_collisions:
safe.add(escape_char)
elif escape_char in safe:
warnings.warn(
"Escape character %r cannot be a safe character."
" Set allow_collisions=True if you want to allow ambiguous escaped strings."
% escape_char,
RuntimeWarning,
stacklevel=2,
)
safe.remove(escape_char)
chars = []
for c in to_escape:
if c in safe:
chars.append(c)
else:
chars.append(_escape_char(c, escape_char))
return u''.join(chars)
def get_all_users(hub_url, token):
url = f'{hub_url}/hub/api/users'
resp = requests.get(url, headers={
'Authorization': f'token {token}'
})
users = resp.json()
for user in users:
if user['last_activity']:
user['last_activity'] = parse(user.get('last_activity'))
return users
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument('hub_url',
help='Base URL of the JupyterHub to talk to'
)
argparser.add_argument('hours_ago',
help='How recently should the user have been active to be rsynced',
type=int,
)
argparser.add_argument('src_basedir',
help='Base directory containing home directories to be rsynced'
)
argparser.add_argument('dest_basedir',
help='Base directory where home directories should be rsynced to'
)
argparser.add_argument('--actually-run-rsync',
action='store_true',
help="Actually run rsync, otherwise we just dry-run"
)
args = argparser.parse_args()
if 'JUPYTERHUB_TOKEN' not in os.environ:
print('Could not find JUPYTERHUB_TOKEN in environment.')
print('Please get an admin user\'s token from {args.hub_url}/hub/token')
sys.exit(1)
time_since = datetime.now(timezone.utc) - timedelta(hours=args.hours_ago)
users_since = []
for user in get_all_users(args.hub_url, os.environ['JUPYTERHUB_TOKEN']):
if user['last_activity']:
if user['last_activity'] >= time_since:
users_since.append(user['name'])
safe_chars = set(string.ascii_lowercase + string.digits)
for user in users_since:
# Escaping logic from https://github.com/jupyterhub/kubespawner/blob/0eecad35d8829d8d599be876ee26c192d622e442/kubespawner/spawner.py#L1340
homedir = escape(user, safe_chars, '-').lower()
src_homedir = os.path.join(args.src_basedir, homedir)
if not os.path.exists(src_homedir):
print(f"Directory {src_homedir} does not exist for user {user}, aborting")
sys.exit(1)
dest_homedir = os.path.join(args.dest_basedir, homedir)
rsync_cmd = [
'rsync', '-av',
'--delete', '--ignore-errors',
src_homedir, args.dest_basedir
]
print('Running ' + ' '.join(rsync_cmd))
if args.actually_run_rsync:
subprocess.check_call(rsync_cmd)
if not args.actually_run_rsync:
print("No rsync commands were actually performed")
print("Check the rsync commands output, and then run this command with `--actually-run-rsync`")
if __name__ == '__main__':
main()
| bsd-3-clause |
ONSdigital/ras-frontstage | tests/integration/test_secure_message.py | 1 | 11415 | import unittest
from unittest.mock import patch
import requests_mock
from frontstage import app
from frontstage.exceptions.exceptions import IncorrectAccountAccessError
from tests.integration.mocked_services import (
conversation_list_json,
encoded_jwt_token,
message_json,
url_banner_api,
url_get_conversation_count,
url_get_survey_long_name,
url_get_thread,
url_get_threads,
url_send_message,
)
def create_api_error(status_code, data=None):
error_json = {"error": {"status_code": status_code, "data": data}}
return error_json
class TestSecureMessage(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.app.testing = True
self.app.set_cookie("localhost", "authorization", "session_key")
self.patcher = patch("redis.StrictRedis.get", return_value=encoded_jwt_token)
self.patcher.start()
self.message_form = {
"subject": "subject",
"body": "body",
"send": "Send",
"thread_id": "7bc5d41b-0549-40b3-ba76-42f6d4cf3fdb",
}
self.headers = {
"Authorization": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyX2lkIjoicmluZ3JhbUBub3d3aGVyZS5jb20iLCJ1c2VyX3Njb3BlcyI6WyJjaS5yZWFkIiwiY2kud3JpdGUiXX0.se0BJtNksVtk14aqjp7SvnXzRbEKoqXb8Q5U9VVdy54" # NOQA
}
def tearDown(self):
self.patcher.stop()
@requests_mock.mock()
def test_get_thread_success(self, mock_request):
mock_request.get(url_banner_api, status_code=404)
mock_request.get(url_get_thread, json={"messages": [message_json], "is_closed": False})
mock_request.get(url_get_conversation_count, json={"total": 0})
mock_request.get(
url_get_survey_long_name,
json={
"id": "02b9c366-7397-42f7-942a-76dc5876d86d",
"shortName": "QBS",
"longName": "Quarterly Business Survey",
"surveyRef": "139",
"legalBasis": "Statistics of Trade Act 1947",
"surveyType": "Business",
"surveyMode": "EQ",
"legalBasisRef": "STA1947",
},
)
response = self.app.get(
"secure-message/threads/9e3465c0-9172-4974-a7d1-3a01592d1594", headers=self.headers, follow_redirects=True
)
self.assertEqual(response.status_code, 200)
self.assertTrue("Peter Griffin".encode() in response.data)
self.assertTrue("testy2".encode() in response.data)
self.assertTrue("something else".encode() in response.data)
self.assertTrue("Quarterly Business Survey".encode() in response.data)
self.assertTrue("OFFICE FOR NATIONAL STATISTICS".encode() in response.data)
self.assertIn("You can now make changes to your name".encode(), response.data)
@requests_mock.mock()
@patch("frontstage.controllers.conversation_controller.try_message_count_from_session")
def test_get_thread_failure(self, mock_request, message_count):
mock_request.get(url_banner_api, status_code=404)
message_count.return_value = 0
message_json_copy = message_json.copy()
del message_json_copy["@business_details"]
mock_request.get(url_get_thread, json={"messages": [message_json_copy]})
response = self.app.get(
"secure-message/threads/9e3465c0-9172-4974-a7d1-3a01592d1594", headers=self.headers, follow_redirects=True
)
self.assertEqual(response.status_code, 500)
@requests_mock.mock()
@patch("frontstage.controllers.conversation_controller.try_message_count_from_session")
def test_create_message_get(self, mock_request, message_count):
mock_request.get(url_banner_api, status_code=404)
message_count.return_value = 0
response = self.app.get(
"/secure-message/create-message/?case_id=123&ru_ref=456&survey=789",
headers=self.headers,
follow_redirects=True,
)
self.assertEqual(response.status_code, 200)
self.assertTrue("Create message".encode() in response.data)
@requests_mock.mock()
@patch("frontstage.controllers.conversation_controller.try_message_count_from_session")
def test_create_message_post_success(self, mock_request, message_count):
mock_request.get(url_banner_api, status_code=404)
message_count.return_value = 0
sent_message_response = {
"msg_id": "d43b6609-0875-4ef8-a34e-f7df1bcc8029",
"status": "201",
"thread_id": "8caeff79-6067-4f2a-96e0-08617fdeb496",
}
mock_request.post(url_send_message, json=sent_message_response)
mock_request.get(url_get_threads, json=conversation_list_json)
response = self.app.post(
"/secure-message/create-message/?case_id=123&ru_ref=456&survey=789",
data=self.message_form,
headers=self.headers,
follow_redirects=True,
)
self.assertEqual(response.status_code, 200)
self.assertTrue("ONS Business Surveys Team".encode() in response.data)
@requests_mock.mock()
@patch("frontstage.controllers.conversation_controller.try_message_count_from_session")
def test_create_message_post_success_api_failure(self, mock_request, message_count):
mock_request.get(url_banner_api, status_code=404)
message_count.return_value = 0
mock_request.post(url_send_message, status_code=500)
response = self.app.post(
"/secure-message/create-message/?case_id=123&ru_ref=456&survey=789",
data=self.message_form,
headers=self.headers,
follow_redirects=True,
)
self.assertEqual(response.status_code, 500)
self.assertTrue("An error has occurred".encode() in response.data)
@requests_mock.mock()
@patch("frontstage.controllers.conversation_controller.try_message_count_from_session")
def test_create_message_post_bad_gateway(self, mock_request, message_count):
mock_request.get(url_banner_api, status_code=404)
message_count.return_value = 0
mock_request.post(url_send_message, status_code=502)
response = self.app.post(
"/secure-message/create-message/?case_id=123&ru_ref=456&survey=789",
data=self.message_form,
headers=self.headers,
follow_redirects=True,
)
self.assertEqual(response.status_code, 500)
self.assertTrue("An error has occurred".encode() in response.data)
@requests_mock.mock()
@patch("frontstage.controllers.conversation_controller.try_message_count_from_session")
def test_create_message_post_no_body(self, mock_request, message_count):
mock_request.get(url_banner_api, status_code=404)
message_count.return_value = 0
del self.message_form["body"]
response = self.app.post(
"/secure-message/create-message/?case_id=123&ru_ref=456&survey=789",
data=self.message_form,
headers=self.headers,
follow_redirects=True,
)
self.assertEqual(response.status_code, 200)
self.assertTrue("Message is required".encode() in response.data)
@requests_mock.mock()
@patch("frontstage.controllers.conversation_controller.try_message_count_from_session")
def test_create_message_post_body_too_long(self, mock_request, message_count):
mock_request.get(url_banner_api, status_code=404)
message_count.return_value = 0
self.message_form["body"] = "a" * 50100
response = self.app.post(
"/secure-message/create-message/?case_id=123&ru_ref=456&survey=789",
data=self.message_form,
headers=self.headers,
follow_redirects=True,
)
self.assertEqual(response.status_code, 200)
self.assertTrue("Message must be less than 50000 characters".encode() in response.data)
@requests_mock.mock()
@patch("frontstage.controllers.conversation_controller.try_message_count_from_session")
def test_create_message_post_no_case_id(self, mock_request, message_count):
mock_request.get(url_banner_api, status_code=404)
message_count.return_value = 0
sent_message_response = {
"msg_id": "d43b6609-0875-4ef8-a34e-f7df1bcc8029",
"status": "201",
"thread_id": "8caeff79-6067-4f2a-96e0-08617fdeb496",
}
mock_request.post(url_send_message, json=sent_message_response)
mock_request.get(url_get_threads, json=conversation_list_json)
response = self.app.post(
"/secure-message/create-message/?ru_ref=456&survey=789",
data=self.message_form,
headers=self.headers,
follow_redirects=True,
)
# case id is optional
self.assertEqual(response.status_code, 200)
self.assertTrue("ONS Business Surveys Team".encode() in response.data)
@requests_mock.mock()
@patch("frontstage.controllers.conversation_controller.try_message_count_from_session")
def test_create_message_post_no_survey_id(self, mock_request, message_count):
mock_request.get(url_banner_api, status_code=404)
message_count.return_value = 0
response = self.app.post(
"/secure-message/create-message/?case_id=123&ru_ref=456",
data=self.message_form,
headers=self.headers,
follow_redirects=True,
)
self.assertEqual(response.status_code, 400)
@requests_mock.mock()
@patch("frontstage.controllers.conversation_controller.try_message_count_from_session")
def test_create_message_post_no_ru_ref(self, mock_request, message_count):
mock_request.get(url_banner_api, status_code=404)
message_count.return_value = 0
response = self.app.post(
"/secure-message/create-message/?case_id=123&survey=789",
data=self.message_form,
headers=self.headers,
follow_redirects=True,
)
self.assertEqual(response.status_code, 400)
@requests_mock.mock()
@patch("frontstage.controllers.conversation_controller.try_message_count_from_session")
def test_get_thread_wrong_account(self, mock_request, message_count):
mock_request.get(url_banner_api, status_code=404)
message_count.return_value = 0
mock_request.get(url_get_thread, status_code=404, json={"messages": [message_json], "is_closed": False})
self.assertRaises(IncorrectAccountAccessError)
@requests_mock.mock()
@patch("frontstage.controllers.conversation_controller._create_get_conversation_headers")
@patch("frontstage.controllers.conversation_controller.try_message_count_from_session")
def test_secure_message_unauthorized_return(self, mock_request, authorization, message_count):
mock_request.get(url_banner_api, status_code=404)
message_count.return_value = 0
authorization.return_value = {"Authorization": "wrong authorization"}
mock_request.get(url_get_thread, status_code=403)
response = self.app.get(
"secure-message/threads/9e3465c0-9172-4974-a7d1-3a01592d1594", headers=self.headers, follow_redirects=True
)
self.assertTrue("The page you are trying to view is not for this account.".encode() in response.data)
| mit |
ychfan/tensorflow | tensorflow/python/keras/_impl/keras/engine/topology_test.py | 17 | 26480 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#,============================================================================
"""Tests for layer graphs construction & handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.keras._impl import keras
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
try:
import yaml # pylint:disable=g-import-not-at-top
except ImportError:
yaml = None
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
class TopologyConstructionTest(test.TestCase):
def test_get_updates_for(self):
a = keras.layers.Input(shape=(2,))
dense_layer = keras.layers.Dense(1)
dense_layer.add_update(0, inputs=a)
dense_layer.add_update(1, inputs=None)
self.assertListEqual(dense_layer.get_updates_for(a), [0])
self.assertListEqual(dense_layer.get_updates_for(None), [1])
def test_get_losses_for(self):
a = keras.layers.Input(shape=(2,))
dense_layer = keras.layers.Dense(1)
dense_layer.add_loss(0, inputs=a)
dense_layer.add_loss(1, inputs=None)
self.assertListEqual(dense_layer.get_losses_for(a), [0])
self.assertListEqual(dense_layer.get_losses_for(None), [1])
def test_trainable_weights(self):
a = keras.layers.Input(shape=(2,))
b = keras.layers.Dense(1)(a)
model = keras.models.Model(a, b)
weights = model.weights
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
model.trainable = True
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.layers[1].trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
# sequential model
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_dim=2))
weights = model.weights
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
model.trainable = True
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.layers[0].trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
def test_weight_loading(self):
with self.test_session():
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
model = keras.models.Model(a, b)
x = np.random.random((3, 2))
ref_y = model.predict(x)
weights = model.get_weights()
model.set_weights(weights)
y = model.predict(x)
self.assertAllClose(ref_y, y)
with self.assertRaises(ValueError):
model.set_weights(weights[1:])
with self.assertRaises(ValueError):
model.set_weights(weights[::-1])
if h5py is None:
return # Skip rest of test if H5py isn't available.
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
model.save_weights(h5_path)
model.load_weights(h5_path)
y = model.predict(x)
self.assertAllClose(ref_y, y)
model.load_weights(h5_path, by_name=True)
y = model.predict(x)
self.assertAllClose(ref_y, y)
def test_learning_phase(self):
with self.test_session():
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
a_2 = keras.layers.Dense(16, name='dense_1')(a)
dp = keras.layers.Dropout(0.5, name='dropout')
b_2 = dp(b)
self.assertFalse(a_2._uses_learning_phase)
self.assertTrue(b_2._uses_learning_phase)
# test merge
m = keras.layers.concatenate([a_2, b_2])
self.assertTrue(m._uses_learning_phase)
# Test recursion
model = keras.models.Model([a, b], [a_2, b_2])
self.assertTrue(model.uses_learning_phase)
c = keras.layers.Input(shape=(32,), name='input_c')
d = keras.layers.Input(shape=(32,), name='input_d')
c_2, b_2 = model([c, d])
self.assertTrue(c_2._uses_learning_phase)
self.assertTrue(b_2._uses_learning_phase)
# try actually running graph
fn = keras.backend.function(
model.inputs + [keras.backend.learning_phase()], model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs_no_dp = fn([input_a_np, input_b_np, 0])
fn_outputs_dp = fn([input_a_np, input_b_np, 1])
# output a: nothing changes
self.assertEqual(fn_outputs_no_dp[0].sum(), fn_outputs_dp[0].sum())
# output b: dropout applied
self.assertNotEqual(fn_outputs_no_dp[1].sum(), fn_outputs_dp[1].sum())
def test_layer_call_arguments(self):
# Test the ability to pass and serialize arguments to `call`.
inp = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(inp)
x = keras.layers.Dropout(0.5)(x, training=True)
model = keras.models.Model(inp, x)
self.assertFalse(model.uses_learning_phase)
# Test that argument is kept when applying the model
inp2 = keras.layers.Input(shape=(2,))
out2 = model(inp2)
self.assertFalse(out2._uses_learning_phase)
# Test that argument is kept after loading a model
config = model.get_config()
model = keras.models.Model.from_config(config)
self.assertFalse(model.uses_learning_phase)
def test_node_construction(self):
# test basics
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
with self.assertRaises(ValueError):
_ = keras.layers.Input(shape=(32,), batch_shape=(10, 32))
with self.assertRaises(ValueError):
_ = keras.layers.Input(shape=(32,), unknown_kwarg=None)
self.assertListEqual(a.get_shape().as_list(), [None, 32])
a_layer, a_node_index, a_tensor_index = a._keras_history
b_layer, _, _ = b._keras_history
self.assertEqual(len(a_layer._inbound_nodes), 1)
self.assertEqual(a_tensor_index, 0)
node = a_layer._inbound_nodes[a_node_index]
self.assertEqual(node.outbound_layer, a_layer)
self.assertListEqual(node.inbound_layers, [])
self.assertListEqual(node.input_tensors, [a])
self.assertListEqual(node.input_shapes, [(None, 32)])
self.assertListEqual(node.output_tensors, [a])
self.assertListEqual(node.output_shapes, [(None, 32)])
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
self.assertEqual(len(dense._inbound_nodes), 2)
self.assertEqual(len(dense._outbound_nodes), 0)
self.assertListEqual(dense._inbound_nodes[0].inbound_layers, [a_layer])
self.assertEqual(dense._inbound_nodes[0].outbound_layer, dense)
self.assertListEqual(dense._inbound_nodes[1].inbound_layers, [b_layer])
self.assertEqual(dense._inbound_nodes[1].outbound_layer, dense)
self.assertListEqual(dense._inbound_nodes[0].input_tensors, [a])
self.assertListEqual(dense._inbound_nodes[1].input_tensors, [b])
# test layer properties
test_layer = keras.layers.Dense(16, name='test_layer')
a_test = test_layer(a)
self.assertListEqual(test_layer.kernel.get_shape().as_list(), [32, 16])
self.assertEqual(test_layer.input, a)
self.assertEqual(test_layer.output, a_test)
self.assertEqual(test_layer.input_shape, (None, 32))
self.assertEqual(test_layer.output_shape, (None, 16))
self.assertEqual(dense.get_input_at(0), a)
self.assertEqual(dense.get_input_at(1), b)
self.assertEqual(dense.get_output_at(0), a_2)
self.assertEqual(dense.get_output_at(1), b_2)
self.assertEqual(dense.get_input_shape_at(0), (None, 32))
self.assertEqual(dense.get_input_shape_at(1), (None, 32))
self.assertEqual(dense.get_output_shape_at(0), (None, 16))
self.assertEqual(dense.get_output_shape_at(1), (None, 16))
self.assertEqual(dense.get_input_mask_at(0), None)
self.assertEqual(dense.get_input_mask_at(1), None)
self.assertEqual(dense.get_output_mask_at(0), None)
self.assertEqual(dense.get_output_mask_at(1), None)
def test_multi_input_layer(self):
with self.test_session():
# test multi-input layer
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
self.assertListEqual(merged.get_shape().as_list(), [None, 16 * 2])
merge_layer, merge_node_index, merge_tensor_index = merged._keras_history
self.assertEqual(merge_node_index, 0)
self.assertEqual(merge_tensor_index, 0)
self.assertEqual(len(merge_layer._inbound_nodes), 1)
self.assertEqual(len(merge_layer._outbound_nodes), 0)
self.assertEqual(len(merge_layer._inbound_nodes[0].input_tensors), 2)
self.assertEqual(len(merge_layer._inbound_nodes[0].inbound_layers), 2)
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
self.assertEqual(len(model.layers), 6)
output_shapes = model._compute_output_shape([(None, 32), (None, 32)])
self.assertListEqual(output_shapes[0].as_list(), [None, 64])
self.assertListEqual(output_shapes[1].as_list(), [None, 5])
self.assertListEqual(
model.compute_mask([a, b], [None, None]), [None, None])
# we don't check names of first 2 layers (inputs) because
# ordering of same-level layers is not fixed
self.assertListEqual([l.name for l in model.layers][2:],
['dense_1', 'merge', 'dense_2', 'dense_3'])
self.assertListEqual([l.name for l in model._input_layers],
['input_a', 'input_b'])
self.assertListEqual([l.name for l in model._output_layers],
['dense_2', 'dense_3'])
# actually run model
fn = keras.backend.function(model.inputs, model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)])
# test get_source_inputs
self.assertListEqual(keras.engine.topology.get_source_inputs(c), [a, b])
# serialization / deserialization
json_config = model.to_json()
recreated_model = keras.models.model_from_json(json_config)
recreated_model.compile('rmsprop', 'mse')
self.assertListEqual([l.name for l in recreated_model.layers][2:],
['dense_1', 'merge', 'dense_2', 'dense_3'])
self.assertListEqual([l.name for l in recreated_model._input_layers],
['input_a', 'input_b'])
self.assertListEqual([l.name for l in recreated_model._output_layers],
['dense_2', 'dense_3'])
fn = keras.backend.function(recreated_model.inputs,
recreated_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)])
def test_recursion(self):
with self.test_session():
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
e = keras.layers.Input(shape=(32,), name='input_e')
f = keras.layers.Input(shape=(32,), name='input_f')
g, h = model([e, f])
self.assertListEqual(g.get_shape().as_list(), c.get_shape().as_list())
self.assertListEqual(h.get_shape().as_list(), d.get_shape().as_list())
# test separate manipulation of different layer outputs
i = keras.layers.Dense(7, name='dense_4')(h)
final_model = keras.models.Model(
inputs=[e, f], outputs=[i, g], name='final')
self.assertEqual(len(final_model.inputs), 2)
self.assertEqual(len(final_model.outputs), 2)
self.assertEqual(len(final_model.layers), 4)
# we don't check names of first 2 layers (inputs) because
# ordering of same-level layers is not fixed
self.assertListEqual([layer.name for layer in final_model.layers][2:],
['model', 'dense_4'])
self.assertListEqual(
model.compute_mask([e, f], [None, None]), [None, None])
self.assertListEqual(
final_model._compute_output_shape([(10, 32), (10, 32)]), [(10, 7),
(10, 64)])
# run recursive model
fn = keras.backend.function(final_model.inputs, final_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)])
# test serialization
model_config = final_model.get_config()
recreated_model = keras.models.Model.from_config(model_config)
fn = keras.backend.function(recreated_model.inputs,
recreated_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)])
def test_multi_input_multi_output_recursion(self):
with self.test_session():
# test multi-input multi-output
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
_, n = model([j, k])
o = keras.layers.Input(shape=(32,), name='input_o')
p = keras.layers.Input(shape=(32,), name='input_p')
q, _ = model([o, p])
self.assertListEqual(n.get_shape().as_list(), [None, 5])
self.assertListEqual(q.get_shape().as_list(), [None, 64])
s = keras.layers.concatenate([n, q], name='merge_nq')
self.assertListEqual(s.get_shape().as_list(), [None, 64 + 5])
# test with single output as 1-elem list
multi_io_model = keras.models.Model([j, k, o, p], [s])
fn = keras.backend.function(multi_io_model.inputs, multi_io_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
# test with single output as tensor
multi_io_model = keras.models.Model([j, k, o, p], s)
fn = keras.backend.function(multi_io_model.inputs, multi_io_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
# note that the output of the function will still be a 1-elem list
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
# test serialization
model_config = multi_io_model.get_config()
recreated_model = keras.models.Model.from_config(model_config)
fn = keras.backend.function(recreated_model.inputs,
recreated_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
# note that the output of the function will still be a 1-elem list
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
config = model.get_config()
keras.models.Model.from_config(config)
model.summary()
json_str = model.to_json()
keras.models.model_from_json(json_str)
if yaml is not None:
yaml_str = model.to_yaml()
keras.models.model_from_yaml(yaml_str)
def test_invalid_graphs(self):
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
# input is not an Input tensor
j = keras.layers.Input(shape=(32,), name='input_j')
j = keras.layers.Dense(32)(j)
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j, k], [m, n])
# disconnected graph
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j], [m, n])
# redundant outputs
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
keras.models.Model([j, k], [m, n, n])
# redundant inputs
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j, k, j], [m, n])
# i have not idea what I'm doing: garbage as inputs/outputs
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j, k], [m, n, 0])
def test_raw_tf_compatibility(self):
# test calling layers/models on TF tensors
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
tf_model = keras.models.Model([j, k], [m, n])
j_tf = array_ops.placeholder(dtype=dtypes.float32, shape=(None, 32))
k_tf = array_ops.placeholder(dtype=dtypes.float32, shape=(None, 32))
m_tf, n_tf = tf_model([j_tf, k_tf])
self.assertListEqual(m_tf.get_shape().as_list(), [None, 64])
self.assertListEqual(n_tf.get_shape().as_list(), [None, 5])
# test merge
keras.layers.concatenate([j_tf, k_tf], axis=1)
keras.layers.add([j_tf, k_tf])
# test tensor input
x = array_ops.placeholder(shape=(None, 2), dtype=dtypes.float32)
keras.layers.InputLayer(input_tensor=x)
x = keras.layers.Input(tensor=x)
keras.layers.Dense(2)(x)
def test_basic_masking(self):
a = keras.layers.Input(shape=(10, 32), name='input_a')
b = keras.layers.Masking()(a)
model = keras.models.Model(a, b)
self.assertEqual(model.output_mask.get_shape().as_list(), [None, 10])
def test_weight_preprocessing(self):
input_dim = 3
output_dim = 3
size = 2
cases = [
[
(keras.layers.Bidirectional(keras.layers.SimpleRNN(2))),
[np.random.random((2, 1)), np.random.random((2, 1))],
(None, 3, 2),
],
[
(keras.layers.TimeDistributed(keras.layers.Dense(1))),
[np.random.random((2, 1)), np.random.random((1,))],
(None, 3, 2),
],
[
(keras.layers.Conv1D(output_dim, size, use_bias=False)),
[np.random.random((output_dim, input_dim, size, 1))],
(None, 4, input_dim),
],
[
(keras.layers.Conv2D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size))],
(None, input_dim, 4, 4),
],
[
(keras.layers.Conv2DTranspose(output_dim, size,
use_bias=False,
data_format='channels_last')),
[np.random.random((size, size, input_dim, output_dim))],
(None, 4, 4, input_dim),
],
[
(keras.layers.Conv3D(output_dim, size,
use_bias=False, data_format='channels_first')),
[np.random.random((output_dim, input_dim, size, size, size))],
(None, input_dim, 4, 4, 4),
],
[
(keras.layers.GRU(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
[
(keras.layers.LSTM(output_dim)),
[np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,)),
np.random.random((input_dim, output_dim)),
np.random.random((output_dim, output_dim)),
np.random.random((output_dim,))],
(None, 4, input_dim),
],
]
for layer, weights, input_shape in cases:
layer.build(input_shape)
_ = keras.engine.topology.preprocess_weights_for_loading(
layer, weights, original_keras_version='1')
model = keras.models.Sequential([keras.layers.Dense(2, input_dim=2)])
_ = keras.engine.topology.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
x = keras.Input((2,))
y = keras.layers.Dense(2)(x)
model = keras.models.Model(x, y)
_ = keras.engine.topology.preprocess_weights_for_loading(
model, model.weights, original_keras_version='1')
def test_layer_sharing_at_heterogenous_depth(self):
with self.test_session():
x_val = np.random.random((10, 5))
x = keras.Input(shape=(5,))
a = keras.layers.Dense(5, name='A')
b = keras.layers.Dense(5, name='B')
output = a(b(a(b(x))))
m = keras.models.Model(x, output)
output_val = m.predict(x_val)
config = m.get_config()
weights = m.get_weights()
m2 = keras.models.Model.from_config(config)
m2.set_weights(weights)
output_val_2 = m2.predict(x_val)
self.assertAllClose(output_val, output_val_2, atol=1e-6)
def test_layer_sharing_at_heterogenous_depth_with_concat(self):
with self.test_session():
input_shape = (16, 9, 3)
input_layer = keras.Input(shape=input_shape)
a = keras.layers.Dense(3, name='dense_A')
b = keras.layers.Dense(3, name='dense_B')
c = keras.layers.Dense(3, name='dense_C')
x1 = b(a(input_layer))
x2 = a(c(input_layer))
output = keras.layers.concatenate([x1, x2])
m = keras.models.Model(inputs=input_layer, outputs=output)
x_val = np.random.random((10, 16, 9, 3))
output_val = m.predict(x_val)
config = m.get_config()
weights = m.get_weights()
m2 = keras.models.Model.from_config(config)
m2.set_weights(weights)
output_val_2 = m2.predict(x_val)
self.assertAllClose(output_val, output_val_2, atol=1e-6)
if __name__ == '__main__':
test.main()
| apache-2.0 |
alilotfi/django | django/conf/locale/pt/formats.py | 504 | 1717 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j \d\e F \d\e Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = r'j \d\e F \d\e Y à\s H:i'
YEAR_MONTH_FORMAT = r'F \d\e Y'
MONTH_DAY_FORMAT = r'j \d\e F'
SHORT_DATE_FORMAT = 'd/m/Y'
SHORT_DATETIME_FORMAT = 'd/m/Y H:i'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%d/%m/%Y', '%d/%m/%y', # '2006-10-25', '25/10/2006', '25/10/06'
# '%d de %b de %Y', '%d de %b, %Y', # '25 de Out de 2006', '25 Out, 2006'
# '%d de %B de %Y', '%d de %B, %Y', # '25 de Outubro de 2006', '25 de Outubro, 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M:%S.%f', # '25/10/2006 14:30:59.000200'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59'
'%d/%m/%y %H:%M:%S.%f', # '25/10/06 14:30:59.000200'
'%d/%m/%y %H:%M', # '25/10/06 14:30'
'%d/%m/%y', # '25/10/06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
qizenguf/MLC-STT | src/arch/mips/MipsISA.py | 61 | 2521 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.SimObject import SimObject
from m5.params import *
from m5.proxy import *
class MipsISA(SimObject):
type = 'MipsISA'
cxx_class = 'MipsISA::ISA'
cxx_header = "arch/mips/isa.hh"
system = Param.System(Parent.any, "System this ISA object belongs to")
num_threads = Param.UInt8(1, "Maximum number this ISA can handle")
num_vpes = Param.UInt8(1, "Maximum number of vpes this ISA can handle")
| bsd-3-clause |
benchisell/photostream-bc | flask/lib/python2.7/site-packages/pip-1.5.6-py2.7.egg/pip/_vendor/html5lib/ihatexml.py | 1727 | 16581 | from __future__ import absolute_import, division, unicode_literals
import re
import warnings
from .constants import DataLossWarning
baseChar = """
[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] |
[#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] |
[#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] |
[#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 |
[#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] |
[#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] |
[#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] |
[#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] |
[#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 |
[#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] |
[#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] |
[#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D |
[#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] |
[#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] |
[#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] |
[#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] |
[#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] |
[#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] |
[#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 |
[#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] |
[#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] |
[#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] |
[#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] |
[#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] |
[#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] |
[#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] |
[#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] |
[#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] |
[#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] |
[#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A |
#x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 |
#x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] |
#x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] |
[#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] |
[#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C |
#x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 |
[#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] |
[#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] |
[#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 |
[#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] |
[#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B |
#x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE |
[#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] |
[#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 |
[#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] |
[#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]"""
ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]"""
combiningCharacter = """
[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] |
[#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 |
[#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] |
[#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] |
#x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] |
[#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] |
[#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 |
#x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] |
[#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC |
[#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] |
#x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] |
[#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] |
[#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] |
[#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] |
[#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] |
[#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] |
#x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 |
[#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] |
#x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] |
[#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] |
[#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] |
#x3099 | #x309A"""
digit = """
[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] |
[#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] |
[#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] |
[#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]"""
extender = """
#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 |
#[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]"""
letter = " | ".join([baseChar, ideographic])
# Without the
name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter,
extender])
nameFirst = " | ".join([letter, "_"])
reChar = re.compile(r"#x([\d|A-F]{4,4})")
reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]")
def charStringToList(chars):
charRanges = [item.strip() for item in chars.split(" | ")]
rv = []
for item in charRanges:
foundMatch = False
for regexp in (reChar, reCharRange):
match = regexp.match(item)
if match is not None:
rv.append([hexToInt(item) for item in match.groups()])
if len(rv[-1]) == 1:
rv[-1] = rv[-1] * 2
foundMatch = True
break
if not foundMatch:
assert len(item) == 1
rv.append([ord(item)] * 2)
rv = normaliseCharList(rv)
return rv
def normaliseCharList(charList):
charList = sorted(charList)
for item in charList:
assert item[1] >= item[0]
rv = []
i = 0
while i < len(charList):
j = 1
rv.append(charList[i])
while i + j < len(charList) and charList[i + j][0] <= rv[-1][1] + 1:
rv[-1][1] = charList[i + j][1]
j += 1
i += j
return rv
# We don't really support characters above the BMP :(
max_unicode = int("FFFF", 16)
def missingRanges(charList):
rv = []
if charList[0] != 0:
rv.append([0, charList[0][0] - 1])
for i, item in enumerate(charList[:-1]):
rv.append([item[1] + 1, charList[i + 1][0] - 1])
if charList[-1][1] != max_unicode:
rv.append([charList[-1][1] + 1, max_unicode])
return rv
def listToRegexpStr(charList):
rv = []
for item in charList:
if item[0] == item[1]:
rv.append(escapeRegexp(chr(item[0])))
else:
rv.append(escapeRegexp(chr(item[0])) + "-" +
escapeRegexp(chr(item[1])))
return "[%s]" % "".join(rv)
def hexToInt(hex_str):
return int(hex_str, 16)
def escapeRegexp(string):
specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}",
"[", "]", "|", "(", ")", "-")
for char in specialCharacters:
string = string.replace(char, "\\" + char)
return string
# output from the above
nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
# Simpler things
nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\-\'()+,./:=?;!*#@$_%]")
class InfosetFilter(object):
replacementRegexp = re.compile(r"U[\dA-F]{5,5}")
def __init__(self, replaceChars=None,
dropXmlnsLocalName=False,
dropXmlnsAttrNs=False,
preventDoubleDashComments=False,
preventDashAtCommentEnd=False,
replaceFormFeedCharacters=True,
preventSingleQuotePubid=False):
self.dropXmlnsLocalName = dropXmlnsLocalName
self.dropXmlnsAttrNs = dropXmlnsAttrNs
self.preventDoubleDashComments = preventDoubleDashComments
self.preventDashAtCommentEnd = preventDashAtCommentEnd
self.replaceFormFeedCharacters = replaceFormFeedCharacters
self.preventSingleQuotePubid = preventSingleQuotePubid
self.replaceCache = {}
def coerceAttribute(self, name, namespace=None):
if self.dropXmlnsLocalName and name.startswith("xmlns:"):
warnings.warn("Attributes cannot begin with xmlns", DataLossWarning)
return None
elif (self.dropXmlnsAttrNs and
namespace == "http://www.w3.org/2000/xmlns/"):
warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning)
return None
else:
return self.toXmlName(name)
def coerceElement(self, name, namespace=None):
return self.toXmlName(name)
def coerceComment(self, data):
if self.preventDoubleDashComments:
while "--" in data:
warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning)
data = data.replace("--", "- -")
return data
def coerceCharacters(self, data):
if self.replaceFormFeedCharacters:
for i in range(data.count("\x0C")):
warnings.warn("Text cannot contain U+000C", DataLossWarning)
data = data.replace("\x0C", " ")
# Other non-xml characters
return data
def coercePubid(self, data):
dataOutput = data
for char in nonPubidCharRegexp.findall(data):
warnings.warn("Coercing non-XML pubid", DataLossWarning)
replacement = self.getReplacementCharacter(char)
dataOutput = dataOutput.replace(char, replacement)
if self.preventSingleQuotePubid and dataOutput.find("'") >= 0:
warnings.warn("Pubid cannot contain single quote", DataLossWarning)
dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'"))
return dataOutput
def toXmlName(self, name):
nameFirst = name[0]
nameRest = name[1:]
m = nonXmlNameFirstBMPRegexp.match(nameFirst)
if m:
warnings.warn("Coercing non-XML name", DataLossWarning)
nameFirstOutput = self.getReplacementCharacter(nameFirst)
else:
nameFirstOutput = nameFirst
nameRestOutput = nameRest
replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest))
for char in replaceChars:
warnings.warn("Coercing non-XML name", DataLossWarning)
replacement = self.getReplacementCharacter(char)
nameRestOutput = nameRestOutput.replace(char, replacement)
return nameFirstOutput + nameRestOutput
def getReplacementCharacter(self, char):
if char in self.replaceCache:
replacement = self.replaceCache[char]
else:
replacement = self.escapeChar(char)
return replacement
def fromXmlName(self, name):
for item in set(self.replacementRegexp.findall(name)):
name = name.replace(item, self.unescapeChar(item))
return name
def escapeChar(self, char):
replacement = "U%05X" % ord(char)
self.replaceCache[char] = replacement
return replacement
def unescapeChar(self, charcode):
return chr(int(charcode[1:], 16))
| bsd-3-clause |
xamfoo/thumbor-docker | setup/thumbor/vows/meta_vows.py | 3 | 3362 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
from os.path import abspath, join, dirname
from pyvows import Vows, expect
from tornado_pyvows.context import TornadoHTTPContext
from thumbor.app import ThumborServiceApp
from thumbor.importer import Importer
from thumbor.config import Config
from thumbor.context import Context, ServerParameters
storage_path = abspath(join(dirname(__file__), 'fixtures/'))
class BaseContext(TornadoHTTPContext):
def __init__(self, *args, **kw):
super(BaseContext, self).__init__(*args, **kw)
@Vows.batch
class GetMeta(BaseContext):
def get_app(self):
cfg = Config(
SECURITY_KEY='ACME-SEC',
LOADER='thumbor.loaders.file_loader',
RESULT_STORAGE='thumbor.result_storages.file_storage',
RESULT_STORAGE_STORES_UNSAFE=True,
RESULT_STORAGE_EXPIRATION_SECONDS=2592000,
FILE_LOADER_ROOT_PATH=storage_path,
OPTIMIZERS=[
'thumbor.optimizers.jpegtran'
]
)
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
application = ThumborServiceApp(ctx)
return application
class WithMetadata(TornadoHTTPContext):
def topic(self):
response = self.get('/unsafe/meta/800x400/image.jpg')
return (response.code, response.headers)
def should_be_200(self, response):
code, _ = response
expect(code).to_equal(200)
class FromCacheWithMetadata(TornadoHTTPContext):
def topic(self):
response = self.get('/unsafe/meta/800x400/image.jpg')
return (response.code, response.headers)
def should_be_200(self, response):
code, _ = response
expect(code).to_equal(200)
@Vows.batch
class GetMetaWithoutStorage(BaseContext):
def get_app(self):
cfg = Config(
SECURITY_KEY='ACME-SEC',
LOADER='thumbor.loaders.file_loader',
RESULT_STORAGE='thumbor.result_storages.file_storage',
RESULT_STORAGE_STORES_UNSAFE=False,
RESULT_STORAGE_EXPIRATION_SECONDS=2592000,
FILE_LOADER_ROOT_PATH=storage_path,
STORAGE='thumbor.storages.no_storage',
OPTIMIZERS=[
'thumbor.optimizers.jpegtran'
]
)
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
application = ThumborServiceApp(ctx)
return application
class WithMetadata(TornadoHTTPContext):
def topic(self):
response = self.get('/unsafe/meta/800x400/image.jpg')
return (response.code, response.headers)
def should_be_200(self, response):
code, _ = response
expect(code).to_equal(200)
| mit |
eduNEXT/edunext-platform | openedx/core/lib/api/tests/test_exceptions.py | 4 | 1463 | """
Test Custom Exceptions
"""
import ddt
from django.test import TestCase
from rest_framework import exceptions as drf_exceptions
import six
@ddt.ddt
class TestDictExceptionsAllowDictDetails(TestCase):
"""
Test that standard DRF exceptions can return dictionaries in error details.
"""
def test_drf_errors_are_not_coerced_to_strings(self):
# Demonstrate that dictionaries in exceptions are not coerced to strings.
exc = drf_exceptions.AuthenticationFailed({u'error_code': -1})
self.assertNotIsInstance(exc.detail, six.string_types)
@ddt.data(
drf_exceptions.AuthenticationFailed,
drf_exceptions.NotAuthenticated,
drf_exceptions.NotFound,
drf_exceptions.ParseError,
drf_exceptions.PermissionDenied,
)
def test_exceptions_allows_dict_detail(self, exception_class):
exc = exception_class({u'error_code': -1})
self.assertEqual(exc.detail, {u'error_code': u'-1'})
def test_method_not_allowed_allows_dict_detail(self):
exc = drf_exceptions.MethodNotAllowed(u'POST', {u'error_code': -1})
self.assertEqual(exc.detail, {u'error_code': u'-1'})
def test_not_acceptable_allows_dict_detail(self):
exc = drf_exceptions.NotAcceptable({u'error_code': -1}, available_renderers=['application/json'])
self.assertEqual(exc.detail, {u'error_code': u'-1'})
self.assertEqual(exc.available_renderers, ['application/json'])
| agpl-3.0 |
tanmaythakur/django | django/db/backends/postgresql_psycopg2/introspection.py | 326 | 10060 | from __future__ import unicode_literals
from collections import namedtuple
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.utils.encoding import force_text
FieldInfo = namedtuple('FieldInfo', FieldInfo._fields + ('default',))
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type codes to Django Field types.
data_types_reverse = {
16: 'BooleanField',
17: 'BinaryField',
20: 'BigIntegerField',
21: 'SmallIntegerField',
23: 'IntegerField',
25: 'TextField',
700: 'FloatField',
701: 'FloatField',
869: 'GenericIPAddressField',
1042: 'CharField', # blank-padded
1043: 'CharField',
1082: 'DateField',
1083: 'TimeField',
1114: 'DateTimeField',
1184: 'DateTimeField',
1266: 'TimeField',
1700: 'DecimalField',
}
ignored_tables = []
_get_indexes_query = """
SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND attr.attrelid = c.oid
AND attr.attnum = idx.indkey[0]
AND c.relname = %s"""
def get_field_type(self, data_type, description):
field_type = super(DatabaseIntrospection, self).get_field_type(data_type, description)
if field_type == 'IntegerField' and description.default and 'nextval' in description.default:
return 'AutoField'
return field_type
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
cursor.execute("""
SELECT c.relname, c.relkind
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)""")
return [TableInfo(row[0], {'r': 't', 'v': 'v'}.get(row[1]))
for row in cursor.fetchall()
if row[0] not in self.ignored_tables]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
# As cursor.description does not return reliably the nullable property,
# we have to query the information_schema (#7783)
cursor.execute("""
SELECT column_name, is_nullable, column_default
FROM information_schema.columns
WHERE table_name = %s""", [table_name])
field_map = {line[0]: line[1:] for line in cursor.fetchall()}
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [FieldInfo(*((force_text(line[0]),) + line[1:6]
+ (field_map[force_text(line[0])][0] == 'YES', field_map[force_text(line[0])][1])))
for line in cursor.description]
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
cursor.execute("""
SELECT c2.relname, a1.attname, a2.attname
FROM pg_constraint con
LEFT JOIN pg_class c1 ON con.conrelid = c1.oid
LEFT JOIN pg_class c2 ON con.confrelid = c2.oid
LEFT JOIN pg_attribute a1 ON c1.oid = a1.attrelid AND a1.attnum = con.conkey[1]
LEFT JOIN pg_attribute a2 ON c2.oid = a2.attrelid AND a2.attnum = con.confkey[1]
WHERE c1.relname = %s
AND con.contype = 'f'""", [table_name])
relations = {}
for row in cursor.fetchall():
relations[row[1]] = (row[2], row[0])
return relations
def get_key_columns(self, cursor, table_name):
key_columns = []
cursor.execute("""
SELECT kcu.column_name, ccu.table_name AS referenced_table, ccu.column_name AS referenced_column
FROM information_schema.constraint_column_usage ccu
LEFT JOIN information_schema.key_column_usage kcu
ON ccu.constraint_catalog = kcu.constraint_catalog
AND ccu.constraint_schema = kcu.constraint_schema
AND ccu.constraint_name = kcu.constraint_name
LEFT JOIN information_schema.table_constraints tc
ON ccu.constraint_catalog = tc.constraint_catalog
AND ccu.constraint_schema = tc.constraint_schema
AND ccu.constraint_name = tc.constraint_name
WHERE kcu.table_name = %s AND tc.constraint_type = 'FOREIGN KEY'""", [table_name])
key_columns.extend(cursor.fetchall())
return key_columns
def get_indexes(self, cursor, table_name):
# This query retrieves each index on the given table, including the
# first associated field name
cursor.execute(self._get_indexes_query, [table_name])
indexes = {}
for row in cursor.fetchall():
# row[1] (idx.indkey) is stored in the DB as an array. It comes out as
# a string of space-separated integers. This designates the field
# indexes (1-based) of the fields that have indexes on the table.
# Here, we skip any indexes across multiple fields.
if ' ' in row[1]:
continue
if row[0] not in indexes:
indexes[row[0]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[3]:
indexes[row[0]]['primary_key'] = True
if row[2]:
indexes[row[0]]['unique'] = True
return indexes
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Loop over the key table, collecting things as constraints
# This will get PKs, FKs, and uniques, but not CHECK
cursor.execute("""
SELECT
kc.constraint_name,
kc.column_name,
c.constraint_type,
array(SELECT table_name::text || '.' || column_name::text
FROM information_schema.constraint_column_usage
WHERE constraint_name = kc.constraint_name)
FROM information_schema.key_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
kc.table_schema = %s AND
kc.table_name = %s
ORDER BY kc.ordinal_position ASC
""", ["public", table_name])
for constraint, column, kind, used_cols in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": kind.lower() == "primary key",
"unique": kind.lower() in ["primary key", "unique"],
"foreign_key": tuple(used_cols[0].split(".", 1)) if kind.lower() == "foreign key" else None,
"check": False,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get CHECK constraint columns
cursor.execute("""
SELECT kc.constraint_name, kc.column_name
FROM information_schema.constraint_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
c.constraint_type = 'CHECK' AND
kc.table_schema = %s AND
kc.table_name = %s
""", ["public", table_name])
for constraint, column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": True,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get indexes
cursor.execute("""
SELECT
c2.relname,
ARRAY(
SELECT (SELECT attname FROM pg_catalog.pg_attribute WHERE attnum = i AND attrelid = c.oid)
FROM unnest(idx.indkey) i
),
idx.indisunique,
idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND c.relname = %s
""", [table_name])
for index, columns, unique, primary in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": list(columns),
"primary_key": primary,
"unique": unique,
"foreign_key": None,
"check": False,
"index": True,
}
return constraints
| bsd-3-clause |
marcel-dancak/QGIS | python/plugins/processing/gui/RangePanel.py | 12 | 3088 | # -*- coding: utf-8 -*-
"""
***************************************************************************
RangePanel.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import warnings
from qgis.PyQt import uic
from qgis.PyQt.QtCore import pyqtSignal
from qgis.PyQt.QtWidgets import QDialog
from qgis.core import QgsProcessingParameterNumber
pluginPath = os.path.split(os.path.dirname(__file__))[0]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'widgetRangeSelector.ui'))
class RangePanel(BASE, WIDGET):
hasChanged = pyqtSignal()
def __init__(self, param):
super(RangePanel, self).__init__(None)
self.setupUi(self)
self.param = param
# Integer or Double range
if self.param.dataType() == QgsProcessingParameterNumber.Integer:
self.spnMin.setDecimals(0)
self.spnMax.setDecimals(0)
if param.defaultValue() is not None:
self.setValue(param.defaultValue())
values = self.getValues()
# Spin range logic
self.spnMin.valueChanged.connect(lambda: self.setMinMax())
self.spnMax.valueChanged.connect(lambda: self.setMaxMin())
def setMinMax(self):
values = self.getValues()
if values[0] >= values[1]:
self.spnMax.setValue(values[0])
self.hasChanged.emit()
def setMaxMin(self):
values = self.getValues()
if values[0] >= values[1]:
self.spnMin.setValue(values[1])
self.hasChanged.emit()
def getValue(self):
return '{},{}'.format(self.spnMin.value(), self.spnMax.value())
def getValues(self):
value = self.getValue()
if value:
return [float(a) for a in value.split(',')]
def setValue(self, value):
try:
values = value.split(',')
minVal = float(values[0])
maxVal = float(values[1])
self.spnMin.setValue(float(minVal))
self.spnMax.setValue(float(maxVal))
except:
return
| gpl-2.0 |
coffenbacher/askbot-devel | askbot/views/readers.py | 4 | 30365 | # encoding:utf-8
"""
:synopsis: views "read-only" for main textual content
By main textual content is meant - text of Questions, Answers and Comments.
The "read-only" requirement here is not 100% strict, as for example "question" view does
allow adding new comments via Ajax form post.
"""
import datetime
import logging
import urllib
import operator
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.http import HttpResponse
from django.http import Http404
from django.http import HttpResponseNotAllowed
from django.http import HttpResponseBadRequest
from django.core.paginator import Paginator, EmptyPage, InvalidPage
from django.template.loader import get_template
from django.template import RequestContext
from django.utils import simplejson
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from django.utils import translation
from django.views.decorators import csrf
from django.core.urlresolvers import reverse
from django.core import exceptions as django_exceptions
from django.contrib.humanize.templatetags import humanize
from django.http import QueryDict
from django.conf import settings as django_settings
import askbot
from askbot import exceptions
from askbot.utils.diff import textDiff as htmldiff
from askbot.forms import AnswerForm
from askbot.forms import ShowQuestionForm
from askbot.forms import GetUserItemsForm
from askbot.forms import GetDataForPostForm
from askbot.utils.loading import load_module
from askbot import conf
from askbot import models
from askbot.models.tag import Tag
from askbot import const
from askbot.startup_procedures import domain_is_bad
from askbot.utils import functions
from askbot.utils.html import sanitize_html
from askbot.utils.decorators import anonymous_forbidden, ajax_only, get_only
from askbot.search.state_manager import SearchState, DummySearchState
from askbot.templatetags import extra_tags
from askbot.conf import settings as askbot_settings
from askbot.views import context
# used in index page
#todo: - take these out of const or settings
from askbot.models import Post, Vote
INDEX_PAGE_SIZE = 30
INDEX_AWARD_SIZE = 15
INDEX_TAGS_SIZE = 25
# used in tags list
DEFAULT_PAGE_SIZE = 60
# used in questions
# used in answers
#refactor? - we have these
#views that generate a listing of questions in one way or another:
#index, unanswered, questions, search, tag
#should we dry them up?
#related topics - information drill-down, search refinement
def index(request):#generates front page - shows listing of questions sorted in various ways
"""index view mapped to the root url of the Q&A site
"""
return HttpResponseRedirect(reverse('questions'))
def questions(request, **kwargs):
"""
List of Questions, Tagged questions, and Unanswered questions.
matching search query or user selection
"""
#before = datetime.datetime.now()
if request.method != 'GET':
return HttpResponseNotAllowed(['GET'])
search_state = SearchState(
user_logged_in=request.user.is_authenticated(),
**kwargs
)
qs, meta_data = models.Thread.objects.run_advanced_search(
request_user=request.user, search_state=search_state
)
if meta_data['non_existing_tags']:
search_state = search_state.remove_tags(meta_data['non_existing_tags'])
paginator = Paginator(qs, search_state.page_size)
if paginator.num_pages < search_state.page:
search_state.page = 1
page = paginator.page(search_state.page)
page.object_list = list(page.object_list) # evaluate the queryset
# INFO: Because for the time being we need question posts and thread authors
# down the pipeline, we have to precache them in thread objects
models.Thread.objects.precache_view_data_hack(threads=page.object_list)
related_tags = Tag.objects.get_related_to_search(
threads=page.object_list,
ignored_tag_names=meta_data.get('ignored_tag_names',[])
)
tag_list_type = askbot_settings.TAG_LIST_FORMAT
if tag_list_type == 'cloud': #force cloud to sort by name
related_tags = sorted(related_tags, key = operator.attrgetter('name'))
contributors = list(
models.Thread.objects.get_thread_contributors(
thread_list=page.object_list
).only('id', 'username', 'gravatar')
)
paginator_context = {
'is_paginated' : (paginator.count > search_state.page_size),
'pages': paginator.num_pages,
'current_page_number': search_state.page,
'page_object': page,
'base_url' : search_state.query_string(),
'page_size' : search_state.page_size,
}
# We need to pass the rss feed url based
# on the search state to the template.
# We use QueryDict to get a querystring
# from dicts and arrays. Much cleaner
# than parsing and string formating.
rss_query_dict = QueryDict("").copy()
if search_state.query:
# We have search string in session - pass it to
# the QueryDict
rss_query_dict.update({"q": search_state.query})
if search_state.tags:
# We have tags in session - pass it to the
# QueryDict but as a list - we want tags+
rss_query_dict.setlist("tags", search_state.tags)
context_feed_url = '/%sfeeds/rss/?%s' % (
django_settings.ASKBOT_URL,
rss_query_dict.urlencode()
) # Format the url with the QueryDict
reset_method_count = len(filter(None, [search_state.query, search_state.tags, meta_data.get('author_name', None)]))
if request.is_ajax():
q_count = paginator.count
#todo: words
question_counter = ungettext('%(q_num)s question', '%(q_num)s questions', q_count)
question_counter = question_counter % {'q_num': humanize.intcomma(q_count),}
if q_count > search_state.page_size:
paginator_tpl = get_template('main_page/paginator.html')
paginator_html = paginator_tpl.render(
RequestContext(
request, {
'context': paginator_context,
'questions_count': q_count,
'page_size' : search_state.page_size,
'search_state': search_state,
}
)
)
else:
paginator_html = ''
questions_tpl = get_template('main_page/questions_loop.html')
questions_html = questions_tpl.render(
RequestContext(
request, {
'threads': page,
'search_state': search_state,
'reset_method_count': reset_method_count,
'request': request
}
)
)
ajax_data = {
'query_data': {
'tags': search_state.tags,
'sort_order': search_state.sort,
'ask_query_string': search_state.ask_query_string(),
},
'paginator': paginator_html,
'question_counter': question_counter,
'faces': [],#[extra_tags.gravatar(contributor, 48) for contributor in contributors],
'feed_url': context_feed_url,
'query_string': search_state.query_string(),
'page_size' : search_state.page_size,
'questions': questions_html.replace('\n',''),
'non_existing_tags': meta_data['non_existing_tags'],
}
ajax_data['related_tags'] = [{
'name': escape(tag.name),
'used_count': humanize.intcomma(tag.local_used_count)
} for tag in related_tags]
#here we add and then delete some items
#to allow extra context processor to work
ajax_data['tags'] = related_tags
ajax_data['interesting_tag_names'] = None
ajax_data['threads'] = page
extra_context = context.get_extra(
'ASKBOT_QUESTIONS_PAGE_EXTRA_CONTEXT',
request,
ajax_data
)
del ajax_data['tags']
del ajax_data['interesting_tag_names']
del ajax_data['threads']
ajax_data.update(extra_context)
return HttpResponse(simplejson.dumps(ajax_data), mimetype = 'application/json')
else: # non-AJAX branch
template_data = {
'active_tab': 'questions',
'author_name' : meta_data.get('author_name',None),
'contributors' : contributors,
'context' : paginator_context,
'is_unanswered' : False,#remove this from template
'interesting_tag_names': meta_data.get('interesting_tag_names', None),
'ignored_tag_names': meta_data.get('ignored_tag_names', None),
'subscribed_tag_names': meta_data.get('subscribed_tag_names', None),
'language_code': translation.get_language(),
'name_of_anonymous_user' : models.get_name_of_anonymous_user(),
'page_class': 'main-page',
'page_size': search_state.page_size,
'query': search_state.query,
'threads' : page,
'questions_count' : paginator.count,
'reset_method_count': reset_method_count,
'scope': search_state.scope,
'show_sort_by_relevance': conf.should_show_sort_by_relevance(),
'search_tags' : search_state.tags,
'sort': search_state.sort,
'tab_id' : search_state.sort,
'tags' : related_tags,
'tag_list_type' : tag_list_type,
'font_size' : extra_tags.get_tag_font_size(related_tags),
'display_tag_filter_strategy_choices': conf.get_tag_display_filter_strategy_choices(),
'email_tag_filter_strategy_choices': conf.get_tag_email_filter_strategy_choices(),
'query_string': search_state.query_string(),
'search_state': search_state,
'feed_url': context_feed_url,
}
extra_context = context.get_extra(
'ASKBOT_QUESTIONS_PAGE_EXTRA_CONTEXT',
request,
template_data
)
template_data.update(extra_context)
template_data.update(context.get_for_tag_editor())
#and one more thing:) give admin user heads up about
#setting the domain name if they have not done that yet
#todo: move this out to a separate middleware
if request.user.is_authenticated() and request.user.is_administrator():
if domain_is_bad():
url = reverse(
'group_settings',
kwargs = {'group': 'QA_SITE_SETTINGS'}
)
url = url + '#id_QA_SITE_SETTINGS__APP_URL'
msg = _(
'Please go to '
'<a href="%s">"settings->URLs, keywords and greetings"</a> '
'and set the base url for your site to function properly'
) % url
request.user.message_set.create(message=msg)
return render(request, 'main_page.html', template_data)
def get_top_answers(request):
"""returns a snippet of html of users answers"""
form = GetUserItemsForm(request.GET)
if form.is_valid():
owner = models.User.objects.get(id=form.cleaned_data['user_id'])
paginator = owner.get_top_answers_paginator(visitor=request.user)
answers = paginator.page(form.cleaned_data['page_number']).object_list
template = get_template('user_profile/user_answers_list.html')
answers_html = template.render({'top_answers': answers})
json_string = simplejson.dumps({
'html': answers_html,
'num_answers': paginator.count}
)
return HttpResponse(json_string, content_type='application/json')
else:
return HttpResponseBadRequest()
def tags(request):#view showing a listing of available tags - plain list
#1) Get parameters. This normally belongs to form cleaning.
post_data = request.GET
sortby = post_data.get('sort', 'used')
try:
page = int(post_data.get('page', '1'))
except ValueError:
page = 1
if sortby == 'name':
order_by = 'name'
else:
order_by = '-used_count'
query = post_data.get('query', '').strip()
tag_list_type = askbot_settings.TAG_LIST_FORMAT
#2) Get query set for the tags.
query_params = {
'deleted': False,
'language_code': translation.get_language()
}
if query != '':
query_params['name__icontains'] = query
tags_qs = Tag.objects.filter(**query_params).exclude(used_count=0)
tags_qs = tags_qs.order_by(order_by)
#3) Start populating the template context.
data = {
'active_tab': 'tags',
'page_class': 'tags-page',
'tag_list_type' : tag_list_type,
'stag' : query,
'tab_id' : sortby,
'keywords' : query,
'search_state': SearchState(*[None for x in range(8)])
}
if tag_list_type == 'list':
#plain listing is paginated
objects_list = Paginator(tags_qs, DEFAULT_PAGE_SIZE)
try:
tags = objects_list.page(page)
except (EmptyPage, InvalidPage):
tags = objects_list.page(objects_list.num_pages)
paginator_data = {
'is_paginated' : (objects_list.num_pages > 1),
'pages': objects_list.num_pages,
'current_page_number': page,
'page_object': tags,
'base_url' : reverse('tags') + '?sort=%s&' % sortby
}
paginator_context = functions.setup_paginator(paginator_data)
data['paginator_context'] = paginator_context
else:
#tags for the tag cloud are given without pagination
tags = tags_qs
font_size = extra_tags.get_tag_font_size(tags)
data['font_size'] = font_size
data['tags'] = tags
data.update(context.get_extra('ASKBOT_TAGS_PAGE_EXTRA_CONTEXT', request, data))
if request.is_ajax():
template = get_template('tags/content.html')
template_context = RequestContext(request, data)
json_data = {'success': True, 'html': template.render(template_context)}
json_string = simplejson.dumps(json_data)
return HttpResponse(json_string, content_type='application/json')
else:
return render(request, 'tags.html', data)
@csrf.csrf_protect
def question(request, id):#refactor - long subroutine. display question body, answers and comments
"""view that displays body of the question and
all answers to it
todo: convert this view into class
"""
#process url parameters
#todo: fix inheritance of sort method from questions
#before = datetime.datetime.now()
form = ShowQuestionForm(request.GET)
form.full_clean()#always valid
show_answer = form.cleaned_data['show_answer']
show_comment = form.cleaned_data['show_comment']
show_page = form.cleaned_data['show_page']
answer_sort_method = form.cleaned_data['answer_sort_method']
#load question and maybe refuse showing deleted question
#if the question does not exist - try mapping to old questions
#and and if it is not found again - then give up
try:
question_post = models.Post.objects.filter(
post_type = 'question',
id = id
).select_related('thread')[0]
except IndexError:
# Handle URL mapping - from old Q/A/C/ URLs to the new one
try:
question_post = models.Post.objects.filter(
post_type='question',
old_question_id = id
).select_related('thread')[0]
except IndexError:
raise Http404
if show_answer:
try:
old_answer = models.Post.objects.get_answers().get(old_answer_id=show_answer)
return HttpResponseRedirect(old_answer.get_absolute_url())
except models.Post.DoesNotExist:
pass
elif show_comment:
try:
old_comment = models.Post.objects.get_comments().get(old_comment_id=show_comment)
return HttpResponseRedirect(old_comment.get_absolute_url())
except models.Post.DoesNotExist:
pass
try:
question_post.assert_is_visible_to(request.user)
except exceptions.QuestionHidden, error:
request.user.message_set.create(message = unicode(error))
return HttpResponseRedirect(reverse('index'))
#redirect if slug in the url is wrong
if request.path.split('/')[-2] != question_post.slug:
logging.debug('no slug match!')
question_url = '?'.join((
question_post.get_absolute_url(),
urllib.urlencode(request.GET)
))
return HttpResponseRedirect(question_url)
#resolve comment and answer permalinks
#they go first because in theory both can be moved to another question
#this block "returns" show_post and assigns actual comment and answer
#to show_comment and show_answer variables
#in the case if the permalinked items or their parents are gone - redirect
#redirect also happens if id of the object's origin post != requested id
show_post = None #used for permalinks
if show_comment:
#if url calls for display of a specific comment,
#check that comment exists, that it belongs to
#the current question
#if it is an answer comment and the answer is hidden -
#redirect to the default view of the question
#if the question is hidden - redirect to the main page
#in addition - if url points to a comment and the comment
#is for the answer - we need the answer object
try:
show_comment = models.Post.objects.get_comments().get(id=show_comment)
except models.Post.DoesNotExist:
error_message = _(
'Sorry, the comment you are looking for has been '
'deleted and is no longer accessible'
)
request.user.message_set.create(message = error_message)
return HttpResponseRedirect(question_post.thread.get_absolute_url())
if str(show_comment.thread._question_post().id) != str(id):
return HttpResponseRedirect(show_comment.get_absolute_url())
show_post = show_comment.parent
try:
show_comment.assert_is_visible_to(request.user)
except exceptions.AnswerHidden, error:
request.user.message_set.create(message = unicode(error))
#use reverse function here because question is not yet loaded
return HttpResponseRedirect(reverse('question', kwargs = {'id': id}))
except exceptions.QuestionHidden, error:
request.user.message_set.create(message = unicode(error))
return HttpResponseRedirect(reverse('index'))
elif show_answer:
#if the url calls to view a particular answer to
#question - we must check whether the question exists
#whether answer is actually corresponding to the current question
#and that the visitor is allowed to see it
show_post = get_object_or_404(models.Post, post_type='answer', id=show_answer)
if str(show_post.thread._question_post().id) != str(id):
return HttpResponseRedirect(show_post.get_absolute_url())
try:
show_post.assert_is_visible_to(request.user)
except django_exceptions.PermissionDenied, error:
request.user.message_set.create(message = unicode(error))
return HttpResponseRedirect(reverse('question', kwargs = {'id': id}))
thread = question_post.thread
if getattr(django_settings, 'ASKBOT_MULTILINGUAL', False):
if thread.language_code != translation.get_language():
return HttpResponseRedirect(thread.get_absolute_url())
logging.debug('answer_sort_method=' + unicode(answer_sort_method))
#load answers and post id's->athor_id mapping
#posts are pre-stuffed with the correctly ordered comments
question_post, answers, post_to_author, published_answer_ids = thread.get_post_data_for_question_view(
sort_method=answer_sort_method,
user=request.user
)
user_votes = {}
user_post_id_list = list()
#todo: cache this query set, but again takes only 3ms!
if request.user.is_authenticated():
user_votes = Vote.objects.filter(
user=request.user,
voted_post__id__in = post_to_author.keys()
).values_list('voted_post_id', 'vote')
user_votes = dict(user_votes)
#we can avoid making this query by iterating through
#already loaded posts
user_post_id_list = [
post_id for post_id in post_to_author if post_to_author[post_id] == request.user.id
]
#resolve page number and comment number for permalinks
show_comment_position = None
if show_comment:
show_page = show_comment.get_page_number(answer_posts=answers)
show_comment_position = show_comment.get_order_number()
elif show_answer:
show_page = show_post.get_page_number(answer_posts=answers)
objects_list = Paginator(answers, const.ANSWERS_PAGE_SIZE)
if show_page > objects_list.num_pages:
return HttpResponseRedirect(question_post.get_absolute_url())
page_objects = objects_list.page(show_page)
#count visits
#import ipdb; ipdb.set_trace()
if functions.not_a_robot_request(request):
#todo: split this out into a subroutine
#todo: merge view counts per user and per session
#1) view count per session
update_view_count = False
if 'question_view_times' not in request.session:
request.session['question_view_times'] = {}
last_seen = request.session['question_view_times'].get(question_post.id, None)
if thread.last_activity_by_id != request.user.id:
if last_seen:
if last_seen < thread.last_activity_at:
update_view_count = True
else:
update_view_count = True
request.session['question_view_times'][question_post.id] = \
datetime.datetime.now()
#2) run the slower jobs in a celery task
from askbot import tasks
tasks.record_question_visit.delay(
question_post = question_post,
user_id = request.user.id,
update_view_count = update_view_count
)
paginator_data = {
'is_paginated' : (objects_list.count > const.ANSWERS_PAGE_SIZE),
'pages': objects_list.num_pages,
'current_page_number': show_page,
'page_object': page_objects,
'base_url' : request.path + '?sort=%s&' % answer_sort_method,
}
paginator_context = functions.setup_paginator(paginator_data)
#todo: maybe consolidate all activity in the thread
#for the user into just one query?
favorited = thread.has_favorite_by_user(request.user)
is_cacheable = True
if show_page != 1:
is_cacheable = False
elif show_comment_position > askbot_settings.MAX_COMMENTS_TO_SHOW:
is_cacheable = False
#maybe load draft
initial = {}
if request.user.is_authenticated():
#todo: refactor into methor on thread
drafts = models.DraftAnswer.objects.filter(
author=request.user,
thread=thread
)
if drafts.count() > 0:
initial['text'] = drafts[0].text
custom_answer_form_path = getattr(django_settings, 'ASKBOT_NEW_ANSWER_FORM', None)
if custom_answer_form_path:
answer_form_class = load_module(custom_answer_form_path)
else:
answer_form_class = AnswerForm
answer_form = answer_form_class(initial=initial, user=request.user)
user_can_post_comment = (
request.user.is_authenticated() and request.user.can_post_comment()
)
new_answer_allowed = True
previous_answer = None
if request.user.is_authenticated():
if askbot_settings.LIMIT_ONE_ANSWER_PER_USER:
for answer in answers:
if answer.author == request.user:
new_answer_allowed = False
previous_answer = answer
break
if request.user.is_authenticated() and askbot_settings.GROUPS_ENABLED:
group_read_only = request.user.is_read_only()
else:
group_read_only = False
data = {
'is_cacheable': False,#is_cacheable, #temporary, until invalidation fix
'long_time': const.LONG_TIME,#"forever" caching
'page_class': 'question-page',
'active_tab': 'questions',
'question' : question_post,
'thread': thread,
'thread_is_moderated': thread.is_moderated(),
'user_is_thread_moderator': thread.has_moderator(request.user),
'published_answer_ids': published_answer_ids,
'answer' : answer_form,
'editor_is_unfolded': answer_form.has_data(),
'answers' : page_objects.object_list,
'answer_count': thread.get_answer_count(request.user),
'category_tree_data': askbot_settings.CATEGORY_TREE,
'user_votes': user_votes,
'user_post_id_list': user_post_id_list,
'user_can_post_comment': user_can_post_comment,#in general
'new_answer_allowed': new_answer_allowed,
'oldest_answer_id': thread.get_oldest_answer_id(request.user),
'previous_answer': previous_answer,
'tab_id' : answer_sort_method,
'favorited' : favorited,
'similar_threads' : thread.get_similar_threads(),
'language_code': translation.get_language(),
'paginator_context' : paginator_context,
'show_post': show_post,
'show_comment': show_comment,
'show_comment_position': show_comment_position,
'group_read_only': group_read_only,
}
#shared with ...
if askbot_settings.GROUPS_ENABLED:
data['sharing_info'] = thread.get_sharing_info()
data.update(context.get_for_tag_editor())
extra = context.get_extra('ASKBOT_QUESTION_PAGE_EXTRA_CONTEXT', request, data)
data.update(extra)
#print 'generated in ', datetime.datetime.now() - before
return render(request, 'question.html', data)
def revisions(request, id, post_type = None):
assert post_type in ('question', 'answer')
post = get_object_or_404(models.Post, post_type=post_type, id=id)
revisions = list(models.PostRevision.objects.filter(post=post))
revisions.reverse()
for i, revision in enumerate(revisions):
if i == 0:
revision.diff = sanitize_html(revisions[i].html)
revision.summary = _('initial version')
else:
revision.diff = htmldiff(
sanitize_html(revisions[i-1].html),
sanitize_html(revision.html)
)
data = {
'page_class':'revisions-page',
'active_tab':'questions',
'post': post,
'revisions': revisions,
}
return render(request, 'revisions.html', data)
@csrf.csrf_exempt
@ajax_only
@anonymous_forbidden
@get_only
def get_comment(request):
"""returns text of a comment by id
via ajax response requires request method get
and request must be ajax
"""
id = int(request.GET['id'])
comment = models.Post.objects.get(post_type='comment', id=id)
request.user.assert_can_edit_comment(comment)
try:
rev = comment.revisions.get(revision=0)
except models.PostRevision.DoesNotExist:
rev = comment.get_latest_revision()
return {'text': rev.text}
@csrf.csrf_exempt
@ajax_only
@anonymous_forbidden
@get_only
def get_perms_data(request):
"""returns details about permitted activities
according to the users reputation
"""
items = (
'MIN_REP_TO_VOTE_UP',
'MIN_REP_TO_VOTE_DOWN',
)
if askbot_settings.MIN_DAYS_TO_ANSWER_OWN_QUESTION > 0:
items += ('MIN_REP_TO_ANSWER_OWN_QUESTION',)
if askbot_settings.ACCEPTING_ANSWERS_ENABLED:
items += (
'MIN_REP_TO_ACCEPT_OWN_ANSWER',
'MIN_REP_TO_ACCEPT_ANY_ANSWER',
)
items += (
'MIN_REP_TO_FLAG_OFFENSIVE',
'MIN_REP_TO_DELETE_OTHERS_COMMENTS',
'MIN_REP_TO_DELETE_OTHERS_POSTS',
'MIN_REP_TO_UPLOAD_FILES',
'MIN_REP_TO_INSERT_LINK',
'MIN_REP_TO_SUGGEST_LINK',
'MIN_REP_TO_CLOSE_OTHERS_QUESTIONS',
'MIN_REP_TO_RETAG_OTHERS_QUESTIONS',
'MIN_REP_TO_EDIT_WIKI',
'MIN_REP_TO_EDIT_OTHERS_POSTS',
'MIN_REP_TO_VIEW_OFFENSIVE_FLAGS',
)
if askbot_settings.ALLOW_ASKING_BY_EMAIL or askbot_settings.REPLY_BY_EMAIL:
items += (
'MIN_REP_TO_POST_BY_EMAIL',
'MIN_REP_TO_TWEET_ON_OTHERS_ACCOUNTS',
)
data = list()
for item in items:
setting = (
askbot_settings.get_description(item),
getattr(askbot_settings, item)
)
data.append(setting)
template = get_template('widgets/user_perms.html')
html = template.render({
'user': request.user,
'perms_data': data
})
return {'html': html}
@ajax_only
@get_only
def get_post_html(request):
post = models.Post.objects.get(id=request.GET['post_id'])
post.assert_is_visible_to(request.user)
return {'post_html': post.html}
| gpl-3.0 |
vainotuisk/icecreamratings | ENV/lib/python2.7/site-packages/pip/req/req_file.py | 11 | 8550 | """
Requirements file parsing
"""
from __future__ import absolute_import
import os
import re
import shlex
import optparse
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves import filterfalse
import pip
from pip.download import get_file_content
from pip.req.req_install import InstallRequirement
from pip.exceptions import (RequirementsFileParseError)
from pip.utils import normalize_name
from pip import cmdoptions
__all__ = ['parse_requirements']
SCHEME_RE = re.compile(r'^(http|https|file):', re.I)
COMMENT_RE = re.compile(r'(^|\s)+#.*$')
SUPPORTED_OPTIONS = [
cmdoptions.editable,
cmdoptions.requirements,
cmdoptions.no_index,
cmdoptions.index_url,
cmdoptions.find_links,
cmdoptions.extra_index_url,
cmdoptions.allow_external,
cmdoptions.allow_all_external,
cmdoptions.no_allow_external,
cmdoptions.allow_unsafe,
cmdoptions.no_allow_unsafe,
cmdoptions.use_wheel,
cmdoptions.no_use_wheel,
cmdoptions.always_unzip,
cmdoptions.no_binary,
cmdoptions.only_binary,
]
# options to be passed to requirements
SUPPORTED_OPTIONS_REQ = [
cmdoptions.install_options,
cmdoptions.global_options
]
# the 'dest' string values
SUPPORTED_OPTIONS_REQ_DEST = [o().dest for o in SUPPORTED_OPTIONS_REQ]
def parse_requirements(filename, finder=None, comes_from=None, options=None,
session=None, wheel_cache=None):
"""
Parse a requirements file and yield InstallRequirement instances.
:param filename: Path or url of requirements file.
:param finder: Instance of pip.index.PackageFinder.
:param comes_from: Origin description of requirements.
:param options: Global options.
:param session: Instance of pip.download.PipSession.
:param wheel_cache: Instance of pip.wheel.WheelCache
"""
if session is None:
raise TypeError(
"parse_requirements() missing 1 required keyword argument: "
"'session'"
)
_, content = get_file_content(
filename, comes_from=comes_from, session=session
)
lines = content.splitlines()
lines = ignore_comments(lines)
lines = join_lines(lines)
lines = skip_regex(lines, options)
for line_number, line in enumerate(lines, 1):
req_iter = process_line(line, filename, line_number, finder,
comes_from, options, session, wheel_cache)
for req in req_iter:
yield req
def process_line(line, filename, line_number, finder=None, comes_from=None,
options=None, session=None, wheel_cache=None):
"""Process a single requirements line; This can result in creating/yielding
requirements, or updating the finder.
For lines that contain requirements, the only options that have an effect
are from SUPPORTED_OPTIONS_REQ, and they are scoped to the
requirement. Other options from SUPPORTED_OPTIONS may be present, but are
ignored.
For lines that do not contain requirements, the only options that have an
effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may
be present, but are ignored. These lines may contain multiple options
(although our docs imply only one is supported), and all our parsed and
affect the finder.
"""
parser = build_parser()
defaults = parser.get_default_values()
defaults.index_url = None
if finder:
# `finder.format_control` will be updated during parsing
defaults.format_control = finder.format_control
opts, args = parser.parse_args(shlex.split(line), defaults)
# yield a line requirement
if args:
args_line = ' '.join(args)
comes_from = '-r %s (line %s)' % (filename, line_number)
isolated = options.isolated_mode if options else False
if options:
cmdoptions.check_install_build_global(options, opts)
# get the options that apply to requirements
req_options = {}
for dest in SUPPORTED_OPTIONS_REQ_DEST:
if dest in opts.__dict__ and opts.__dict__[dest]:
req_options[dest] = opts.__dict__[dest]
yield InstallRequirement.from_line(
args_line, comes_from, isolated=isolated, options=req_options,
wheel_cache=wheel_cache
)
# yield an editable requirement
elif opts.editables:
comes_from = '-r %s (line %s)' % (filename, line_number)
isolated = options.isolated_mode if options else False
default_vcs = options.default_vcs if options else None
yield InstallRequirement.from_editable(
opts.editables[0], comes_from=comes_from,
default_vcs=default_vcs, isolated=isolated,
wheel_cache=wheel_cache
)
# parse a nested requirements file
elif opts.requirements:
req_path = opts.requirements[0]
# original file is over http
if SCHEME_RE.search(filename):
# do a url join so relative paths work
req_path = urllib_parse.urljoin(filename, req_path)
# original file and nested file are paths
elif not SCHEME_RE.search(req_path):
# do a join so relative paths work
req_dir = os.path.dirname(filename)
req_path = os.path.join(os.path.dirname(filename), req_path)
# TODO: Why not use `comes_from='-r {} (line {})'` here as well?
parser = parse_requirements(
req_path, finder, comes_from, options, session,
wheel_cache=wheel_cache
)
for req in parser:
yield req
# set finder options
elif finder:
if opts.index_url:
finder.index_urls = [opts.index_url]
if opts.use_wheel is False:
finder.use_wheel = False
pip.index.fmt_ctl_no_use_wheel(finder.format_control)
if opts.no_index is True:
finder.index_urls = []
if opts.allow_all_external:
finder.allow_all_external = opts.allow_all_external
if opts.extra_index_urls:
finder.index_urls.extend(opts.extra_index_urls)
if opts.allow_external:
finder.allow_external |= set(
[normalize_name(v).lower() for v in opts.allow_external])
if opts.allow_unverified:
# Remove after 7.0
finder.allow_unverified |= set(
[normalize_name(v).lower() for v in opts.allow_unverified])
if opts.find_links:
# FIXME: it would be nice to keep track of the source
# of the find_links: support a find-links local path
# relative to a requirements file.
value = opts.find_links[0]
req_dir = os.path.dirname(os.path.abspath(filename))
relative_to_reqs_file = os.path.join(req_dir, value)
if os.path.exists(relative_to_reqs_file):
value = relative_to_reqs_file
finder.find_links.append(value)
def build_parser():
"""
Return a parser for parsing requirement lines
"""
parser = optparse.OptionParser(add_help_option=False)
option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ
for option_factory in option_factories:
option = option_factory()
parser.add_option(option)
# By default optparse sys.exits on parsing errors. We want to wrap
# that in our own exception.
def parser_exit(self, msg):
raise RequirementsFileParseError(msg)
parser.exit = parser_exit
return parser
def join_lines(iterator):
"""
Joins a line ending in '\' with the previous line.
"""
lines = []
for line in iterator:
if not line.endswith('\\'):
if lines:
lines.append(line)
yield ''.join(lines)
lines = []
else:
yield line
else:
lines.append(line.strip('\\'))
# TODO: handle space after '\'.
# TODO: handle '\' on last line.
def ignore_comments(iterator):
"""
Strips and filters empty or commented lines.
"""
for line in iterator:
line = COMMENT_RE.sub('', line)
line = line.strip()
if line:
yield line
def skip_regex(lines, options):
"""
Optionally exclude lines that match '--skip-requirements-regex'
"""
skip_regex = options.skip_requirements_regex if options else None
if skip_regex:
lines = filterfalse(re.compile(skip_regex).search, lines)
return lines
| bsd-3-clause |
eammx/proyectosWeb | cursoPython/lib/python3.6/site-packages/setuptools/command/install_egg_info.py | 13 | 3195 | from distutils import log, dir_util
import os, sys
from setuptools import Command
from setuptools import namespaces
from setuptools.archive_util import unpack_archive
import pkg_resources
class install_egg_info(namespaces.Installer, Command):
"""Install an .egg-info directory for the package"""
description = "Install an .egg-info directory for the package"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
self.install_layout = None
self.prefix_option = None
def finalize_options(self):
self.set_undefined_options('install_lib',
('install_dir', 'install_dir'))
self.set_undefined_options('install',('install_layout','install_layout'))
if sys.hexversion > 0x2060000:
self.set_undefined_options('install',('prefix_option','prefix_option'))
ei_cmd = self.get_finalized_command("egg_info")
basename = pkg_resources.Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version
).egg_name() + '.egg-info'
if self.install_layout:
if not self.install_layout.lower() in ['deb']:
raise DistutilsOptionError("unknown value for --install-layout")
self.install_layout = self.install_layout.lower()
basename = basename.replace('-py%s' % pkg_resources.PY_MAJOR, '')
elif self.prefix_option or 'real_prefix' in sys.__dict__:
# don't modify for virtualenv
pass
else:
basename = basename.replace('-py%s' % pkg_resources.PY_MAJOR, '')
self.source = ei_cmd.egg_info
self.target = os.path.join(self.install_dir, basename)
self.outputs = []
def run(self):
self.run_command('egg_info')
if os.path.isdir(self.target) and not os.path.islink(self.target):
dir_util.remove_tree(self.target, dry_run=self.dry_run)
elif os.path.exists(self.target):
self.execute(os.unlink, (self.target,), "Removing " + self.target)
if not self.dry_run:
pkg_resources.ensure_directory(self.target)
self.execute(
self.copytree, (), "Copying %s to %s" % (self.source, self.target)
)
self.install_namespaces()
def get_outputs(self):
return self.outputs
def copytree(self):
# Copy the .egg-info tree to site-packages
def skimmer(src, dst):
# filter out source-control directories; note that 'src' is always
# a '/'-separated path, regardless of platform. 'dst' is a
# platform-specific path.
for skip in '.svn/', 'CVS/':
if src.startswith(skip) or '/' + skip in src:
return None
if self.install_layout and self.install_layout in ['deb'] and src.startswith('SOURCES.txt'):
log.info("Skipping SOURCES.txt")
return None
self.outputs.append(dst)
log.debug("Copying %s to %s", src, dst)
return dst
unpack_archive(self.source, self.target, skimmer)
| mit |
WiReD-/metagoofil | hachoir_parser/audio/midi.py | 84 | 9168 | """
Musical Instrument Digital Interface (MIDI) audio file parser.
Documentation:
- Standard MIDI File Format, Dustin Caldwell (downloaded on wotsit.org)
Author: Victor Stinner
Creation: 27 december 2006
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, Bits, ParserError,
String, UInt32, UInt24, UInt16, UInt8, Enum, RawBits, RawBytes)
from hachoir_core.endian import BIG_ENDIAN
from hachoir_core.text_handler import textHandler, hexadecimal
from hachoir_core.tools import createDict, humanDurationNanosec
from hachoir_parser.common.tracker import NOTE_NAME
MAX_FILESIZE = 10 * 1024 * 1024
class Integer(Bits):
def __init__(self, parent, name, description=None):
Bits.__init__(self, parent, name, 8, description)
stream = parent.stream
addr = self.absolute_address
value = 0
while True:
bits = stream.readBits(addr, 8, parent.endian)
value = (value << 7) + (bits & 127)
if not(bits & 128):
break
addr += 8
self._size += 8
if 32 < self._size:
raise ParserError("Integer size is bigger than 32-bit")
self.createValue = lambda: value
def parseNote(parser):
yield Enum(UInt8(parser, "note", "Note number"), NOTE_NAME)
yield UInt8(parser, "velocity")
def parseControl(parser):
yield UInt8(parser, "control", "Controller number")
yield UInt8(parser, "value", "New value")
def parsePatch(parser):
yield UInt8(parser, "program", "New program number")
def parseChannel(parser, size=1):
yield UInt8(parser, "channel", "Channel number")
def parsePitch(parser):
yield UInt8(parser, "bottom", "(least sig) 7 bits of value")
yield UInt8(parser, "top", "(most sig) 7 bits of value")
def parseText(parser, size):
yield String(parser, "text", size)
def parseSMPTEOffset(parser, size):
yield RawBits(parser, "padding", 1)
yield Enum(Bits(parser, "frame_rate", 2),
{0:"24 fps", 1:"25 fps", 2:"30 fps (drop frame)", 3:"30 fps"})
yield Bits(parser, "hour", 5)
yield UInt8(parser, "minute")
yield UInt8(parser, "second")
yield UInt8(parser, "frame")
yield UInt8(parser, "subframe", "100 subframes per frame")
def formatTempo(field):
return humanDurationNanosec(field.value*1000)
def parseTempo(parser, size):
yield textHandler(UInt24(parser, "microsec_quarter", "Microseconds per quarter note"), formatTempo)
def parseTimeSignature(parser, size):
yield UInt8(parser, "numerator", "Numerator of time signature")
yield UInt8(parser, "denominator", "denominator of time signature 2=quarter 3=eighth, etc.")
yield UInt8(parser, "nb_tick", "Number of ticks in metronome click")
yield UInt8(parser, "nb_32nd_note", "Number of 32nd notes to the quarter note")
class Command(FieldSet):
COMMAND = {}
for channel in xrange(16):
COMMAND[0x80+channel] = ("Note off (channel %u)" % channel, parseNote)
COMMAND[0x90+channel] = ("Note on (channel %u)" % channel, parseNote)
COMMAND[0xA0+channel] = ("Key after-touch (channel %u)" % channel, parseNote)
COMMAND[0xB0+channel] = ("Control change (channel %u)" % channel, parseControl)
COMMAND[0xC0+channel] = ("Program (patch) change (channel %u)" % channel, parsePatch)
COMMAND[0xD0+channel] = ("Channel after-touch (channel %u)" % channel, parseChannel)
COMMAND[0xE0+channel] = ("Pitch wheel change (channel %u)" % channel, parsePitch)
COMMAND_DESC = createDict(COMMAND, 0)
COMMAND_PARSER = createDict(COMMAND, 1)
META_COMMAND_TEXT = 1
META_COMMAND_NAME = 3
META_COMMAND = {
0x00: ("Sets the track's sequence number", None),
0x01: ("Text event", parseText),
0x02: ("Copyright info", parseText),
0x03: ("Sequence or Track name", parseText),
0x04: ("Track instrument name", parseText),
0x05: ("Lyric", parseText),
0x06: ("Marker", parseText),
0x07: ("Cue point", parseText),
0x20: ("MIDI Channel Prefix", parseChannel),
0x2F: ("End of the track", None),
0x51: ("Set tempo", parseTempo),
0x54: ("SMPTE offset", parseSMPTEOffset),
0x58: ("Time Signature", parseTimeSignature),
0x59: ("Key signature", None),
0x7F: ("Sequencer specific information", None),
}
META_COMMAND_DESC = createDict(META_COMMAND, 0)
META_COMMAND_PARSER = createDict(META_COMMAND, 1)
def __init__(self, *args, **kwargs):
if 'prev_command' in kwargs:
self.prev_command = kwargs['prev_command']
del kwargs['prev_command']
else:
self.prev_command = None
self.command = None
FieldSet.__init__(self, *args, **kwargs)
def createFields(self):
yield Integer(self, "time", "Delta time in ticks")
next = self.stream.readBits(self.absolute_address+self.current_size, 8, self.root.endian)
if next & 0x80 == 0:
# "Running Status" command
if self.prev_command is None:
raise ParserError("Running Status command not preceded by another command.")
self.command = self.prev_command.command
else:
yield Enum(textHandler(UInt8(self, "command"), hexadecimal), self.COMMAND_DESC)
self.command = self["command"].value
if self.command == 0xFF:
yield Enum(textHandler(UInt8(self, "meta_command"), hexadecimal), self.META_COMMAND_DESC)
yield UInt8(self, "data_len")
size = self["data_len"].value
if size:
command = self["meta_command"].value
if command in self.META_COMMAND_PARSER:
parser = self.META_COMMAND_PARSER[command]
else:
parser = None
if parser:
for field in parser(self, size):
yield field
else:
yield RawBytes(self, "data", size)
else:
if self.command not in self.COMMAND_PARSER:
raise ParserError("Unknown command: %s" % self["command"].display)
parser = self.COMMAND_PARSER[self.command]
for field in parser(self):
yield field
def createDescription(self):
if "meta_command" in self:
return self["meta_command"].display
else:
return self.COMMAND_DESC[self.command]
class Track(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = (8 + self["size"].value) * 8
def createFields(self):
yield String(self, "marker", 4, "Track marker (MTrk)", charset="ASCII")
yield UInt32(self, "size")
cur = None
if True:
while not self.eof:
cur = Command(self, "command[]", prev_command=cur)
yield cur
else:
size = self["size"].value
if size:
yield RawBytes(self, "raw", size)
def createDescription(self):
command = self["command[0]"]
if "meta_command" in command \
and command["meta_command"].value in (Command.META_COMMAND_TEXT, Command.META_COMMAND_NAME) \
and "text" in command:
return command["text"].value.strip("\r\n")
else:
return ""
class Header(FieldSet):
static_size = 10*8
FILE_FORMAT = {
0: "Single track",
1: "Multiple tracks, synchronous",
2: "Multiple tracks, asynchronous",
}
def createFields(self):
yield UInt32(self, "size")
yield Enum(UInt16(self, "file_format"), self.FILE_FORMAT)
yield UInt16(self, "nb_track")
yield UInt16(self, "delta_time", "Delta-time ticks per quarter note")
def createDescription(self):
return "%s; %s tracks" % (
self["file_format"].display, self["nb_track"].value)
class MidiFile(Parser):
MAGIC = "MThd"
PARSER_TAGS = {
"id": "midi",
"category": "audio",
"file_ext": ["mid", "midi"],
"mime": (u"audio/mime", ),
"magic": ((MAGIC, 0),),
"min_size": 64,
"description": "MIDI audio"
}
endian = BIG_ENDIAN
def validate(self):
if self.stream.readBytes(0, 4) != self.MAGIC:
return "Invalid signature"
if self["header/size"].value != 6:
return "Invalid header size"
return True
def createFields(self):
yield String(self, "signature", 4, r"MIDI signature (MThd)", charset="ASCII")
yield Header(self, "header")
while not self.eof:
yield Track(self, "track[]")
def createDescription(self):
return "MIDI audio: %s" % self["header"].description
def createContentSize(self):
count = self["/header/nb_track"].value - 1
start = self["track[%u]" % count].absolute_address
# Search "End of track" of last track
end = self.stream.searchBytes("\xff\x2f\x00", start, MAX_FILESIZE*8)
if end is not None:
return end + 3*8
return None
| gpl-2.0 |
pyjs/pyjs | pyjswidgets/pyjamas/chart/Annotation.py | 7 | 6234 | """
* Copyright 2007,2008,2009 John C. Gunther
* Copyright (C) 2009 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
*
* Licensed under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http:#www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the
* License.
*
"""
import math
from pyjamas.ui.HTML import HTML
from pyjamas.chart.GChartUtil import htmlWidth, htmlHeight
from pyjamas.chart.GChartConsts import NAI
from pyjamas.chart.GChartConsts import DEFAULT_FONT_COLOR
from pyjamas.chart.GChartConsts import DEFAULT_ANNOTATION_FONTSIZE
from pyjamas.chart.GChartConsts import DEFAULT_WIDGET_WIDTH_UPPERBOUND
from pyjamas.chart.GChartConsts import DEFAULT_WIDGET_HEIGHT_UPPERBOUND
from pyjamas.chart.GChartConsts import CHARHEIGHT_TO_FONTSIZE_UPPERBOUND
from pyjamas.chart.GChartConsts import CHARWIDTH_TO_FONTSIZE_UPPERBOUND
"""
* Annotates (labels) a chart symbol. Users access this class via
* wrapper methods of the Point class, and via various tick-label
* related methods of the Axis class.
*
"""
HTML_LEN = len("<html>")
BR_LEN = len("<br>")
# Returns number of chars in first <br>-delimited line of
# given string. A very crude way to estimate (especially
# HTML) width in characters, but user can give explicit
# widths when the width estimates based on this char width
# heuristic fail them.
def getNumberOfCharsWide(s):
result = 0
if not s.startswith("<html>"):
result = len(s)
else:
result = htmlWidth(s)
return result
class Annotation:
def __init__(self):
self.fontColor = DEFAULT_FONT_COLOR
self.fontSize = DEFAULT_ANNOTATION_FONTSIZE
self.fontStyle = "normal"
self.fontWeight = "normal"
self.location = None
self.text = None
self.widget = None # may be used in lieu of text or HTML
self.visible = True
self.xShift = 0
self.yShift = 0
self._isHTML = False; # no break tags ==> plain text
# Estimated number of lines, width in chars, of annotation
# text (not used by Widgets)
self.numberOfLinesHigh = 0
self.numberOfCharsWide = 0
self.widthUpperBound = NAI
self.heightUpperBound = NAI
"""
* Computes parameters used to estimate the width and height
* of the (invisible) enclosing 1x1 Grid of an annotation
* (used to align, center, etc. the annotation) <p>
*
"""
def analyzeHTML(self, s):
result = None
if None == s:
self._isHTML = False
self.numberOfLinesHigh = 0
self.numberOfCharsWide = 0
elif hasattr(s, "startswith") and not s.startswith("<html>"):
# no html==>plain text
self._isHTML = False
self.numberOfLinesHigh = 1
self.numberOfCharsWide = len(s)
result = s
else:
# HTML
self._isHTML = True
# <html> is just a flag, not a tag, so strip it out.
result = s[HTML_LEN:]
if self.widthUpperBound == NAI:
self.numberOfCharsWide = htmlWidth(result)
if self.heightUpperBound == NAI:
self.numberOfLinesHigh = htmlHeight(result)
return result
def getFontColor(self):
return self.fontColor
def getFontSize(self):
return self.fontSize
def getLocation(self):
return self.location
def isHTML(self):
return self._isHTML
def getText(self):
if self._isHTML:
return "<html>" + (self.text or "")
return self.text
def getVisible(self):
return self.visible
def getXShift(self):
return self.xShift
def getYShift(self):
return self.yShift
def setFontColor(self, cssColor):
self.fontColor = cssColor
def setFontSize(self, fontSize):
self.fontSize = fontSize
def setFontWeight(self, cssWeight):
self.fontWeight = cssWeight
def setFontStyle(self, cssStyle):
self.fontStyle = cssStyle
def getFontWeight(self):
return self.fontWeight
def getFontStyle(self):
return self.fontStyle
def setLocation(self, location):
self.location = location
def setText(self, text, widthUpperBound=NAI, heightUpperBound=NAI):
self.widthUpperBound = widthUpperBound
self.heightUpperBound = heightUpperBound
self.text = self.analyzeHTML(text)
self.widget = None
def setVisible(self, visible):
self.visible = visible
def setWidget(self, widget,
widthUpperBound=DEFAULT_WIDGET_WIDTH_UPPERBOUND,
heightUpperBound=DEFAULT_WIDGET_HEIGHT_UPPERBOUND):
if isinstance(widget, basestring):
widget = HTML(widget)
self.widthUpperBound = widthUpperBound
self.heightUpperBound = heightUpperBound
self.text = None
self.widget = widget
def getWidget(self):
return self.widget
def setXShift(self, xShift):
self.xShift = xShift
def setYShift(self, yShift):
self.yShift = yShift
def getHeightUpperBound(self):
result = 0
if self.heightUpperBound != NAI:
result = self.heightUpperBound
else:
result = int (math.ceil(self.fontSize *
self.numberOfLinesHigh *
CHARHEIGHT_TO_FONTSIZE_UPPERBOUND))
return result
def getWidthUpperBound(self):
result = 0
if self.widthUpperBound != NAI:
result = self.widthUpperBound
else:
result = int ( math.ceil(self.fontSize *
self.numberOfCharsWide *
CHARWIDTH_TO_FONTSIZE_UPPERBOUND))
return result
# end of class Annotation
| apache-2.0 |
themurph/openshift-tools | ansible/roles/lib_git/build/ansible/git_clone.py | 13 | 1587 | # pylint: skip-file
def main():
'''
ansible git module for cloning
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str', choices=['present']),
dest=dict(default=None, required=True, type='str'),
repo=dict(default=None, required=True, type='str'),
branch=dict(default=None, required=False, type='str'),
bare=dict(default=False, required=False, type='bool'),
ssh_key=dict(default=None, required=False, type='str'),
),
supports_check_mode=False,
)
git = GitClone(module.params['dest'],
module.params['repo'],
module.params['branch'],
module.params['bare'],
module.params['ssh_key'])
state = module.params['state']
if state == 'present':
results = git.clone()
if results['returncode'] != 0:
module.fail_json(msg=results)
if results['no_clone_needed'] == True:
module.exit_json(changed=False, results=results, state="present")
module.exit_json(changed=True, results=results, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
if __name__ == '__main__':
from ansible.module_utils.basic import *
main()
| apache-2.0 |
minorua/QGIS | python/core/additions/qgsfunction.py | 17 | 6610 | # -*- coding: utf-8 -*-
"""
***************************************************************************
qgsfunction.py
---------------------
Date : May 2018
Copyright : (C) 2018 by Denis Rouzaud
Email : denis@opengis.ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
import inspect
import string
from builtins import str
from qgis.PyQt.QtCore import QCoreApplication
from qgis._core import QgsExpressionFunction, QgsExpression, QgsMessageLog, QgsFeatureRequest, Qgis
def register_function(function, arg_count, group, usesgeometry=False,
referenced_columns=[QgsFeatureRequest.ALL_ATTRIBUTES], handlesnull=False, **kwargs):
"""
Register a Python function to be used as a expression function.
Functions should take (values, feature, parent) as args:
Example:
def myfunc(values, feature, parent):
pass
They can also shortcut naming feature and parent args by using *args
if they are not needed in the function.
Example:
def myfunc(values, *args):
pass
Functions should return a value compatible with QVariant
Eval errors can be raised using parent.setEvalErrorString("Error message")
:param function:
:param arg_count:
:param group:
:param usesgeometry:
:param handlesnull: Needs to be set to True if this function does not always return NULL if any parameter is NULL. Default False.
:return:
"""
class QgsPyExpressionFunction(QgsExpressionFunction):
def __init__(self, func, name, args, group, helptext='', usesGeometry=True,
referencedColumns=QgsFeatureRequest.ALL_ATTRIBUTES, expandargs=False, handlesNull=False):
QgsExpressionFunction.__init__(self, name, args, group, helptext)
self.function = func
self.expandargs = expandargs
self.uses_geometry = usesGeometry
self.referenced_columns = referencedColumns
self.handles_null = handlesNull
def func(self, values, context, parent, node):
feature = None
if context:
feature = context.feature()
try:
if self.expandargs:
values.append(feature)
values.append(parent)
if inspect.getargspec(self.function).args[-1] == 'context':
values.append(context)
return self.function(*values)
else:
if inspect.getargspec(self.function).args[-1] == 'context':
self.function(values, feature, parent, context)
return self.function(values, feature, parent)
except Exception as ex:
parent.setEvalErrorString(str(ex))
return None
def usesGeometry(self, node):
return self.uses_geometry
def referencedColumns(self, node):
return self.referenced_columns
def handlesNull(self):
return self.handles_null
helptemplate = string.Template("""<h3>$name function</h3><br>$doc""")
name = kwargs.get('name', function.__name__)
helptext = kwargs.get('helpText') or function.__doc__ or ''
helptext = helptext.strip()
expandargs = False
if arg_count == "auto":
# Work out the number of args we need.
# Number of function args - 2. The last two args are always feature, parent.
args = inspect.getargspec(function).args
number = len(args)
arg_count = number - 2
if args[-1] == 'context':
arg_count -= 1
expandargs = True
register = kwargs.get('register', True)
if register and QgsExpression.isFunctionName(name):
if not QgsExpression.unregisterFunction(name):
msgtitle = QCoreApplication.translate("UserExpressions", "User expressions")
msg = QCoreApplication.translate("UserExpressions",
"The user expression {0} already exists and could not be unregistered.").format(
name)
QgsMessageLog.logMessage(msg + "\n", msgtitle, Qgis.Warning)
return None
function.__name__ = name
helptext = helptemplate.safe_substitute(name=name, doc=helptext)
f = QgsPyExpressionFunction(function, name, arg_count, group, helptext, usesgeometry, referenced_columns,
expandargs, handlesnull)
# This doesn't really make any sense here but does when used from a decorator context
# so it can stay.
if register:
QgsExpression.registerFunction(f)
return f
def qgsfunction(args='auto', group='custom', **kwargs):
"""
Decorator function used to define a user expression function.
:param args: Number of parameters, set to 'auto' to accept a variable length of parameters.
:param group: The expression group to which this expression should be added.
:param \**kwargs:
See below
:Keyword Arguments:
* *referenced_columns* (``list``) --
An array of field names on which this expression works. Can be set to ``[QgsFeatureRequest.ALL_ATTRIBUTES]``. By default empty.
* *usesgeometry* (``bool``) --
Defines if this expression requires the geometry. By default False.
* *handlesnull* (``bool``) --
Defines if this expression has custom handling for NULL values. If False, the result will always be NULL as soon as any parameter is NULL. False by default.
Example:
@qgsfunction(2, 'test'):
def add(values, feature, parent):
pass
Will create and register a function in QgsExpression called 'add' in the
'test' group that takes two arguments.
or not using feature and parent:
Example:
@qgsfunction(2, 'test'):
def add(values, *args):
pass
"""
def wrapper(func):
return register_function(func, args, group, **kwargs)
return wrapper
| gpl-2.0 |
barma1309/Kalista | .virtualenvs/Kalista/lib/python3.4/site-packages/django/utils/deconstruct.py | 502 | 2047 | from importlib import import_module
from django.utils.version import get_docs_version
def deconstructible(*args, **kwargs):
"""
Class decorator that allow the decorated class to be serialized
by the migrations subsystem.
Accepts an optional kwarg `path` to specify the import path.
"""
path = kwargs.pop('path', None)
def decorator(klass):
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
obj = super(klass, cls).__new__(cls)
obj._constructor_args = (args, kwargs)
return obj
def deconstruct(obj):
"""
Returns a 3-tuple of class import path, positional arguments,
and keyword arguments.
"""
# Python 2/fallback version
if path:
module_name, _, name = path.rpartition('.')
else:
module_name = obj.__module__
name = obj.__class__.__name__
# Make sure it's actually there and not an inner class
module = import_module(module_name)
if not hasattr(module, name):
raise ValueError(
"Could not find object %s in %s.\n"
"Please note that you cannot serialize things like inner "
"classes. Please move the object into the main module "
"body to use migrations.\n"
"For more information, see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values"
% (name, module_name, get_docs_version()))
return (
path or '%s.%s' % (obj.__class__.__module__, name),
obj._constructor_args[0],
obj._constructor_args[1],
)
klass.__new__ = staticmethod(__new__)
klass.deconstruct = deconstruct
return klass
if not args:
return decorator
return decorator(*args, **kwargs)
| gpl-3.0 |
joshuairl/ccpd-platform | lms/_scripts/fckeditor/_samples/py/sampleposteddata.py | 22 | 1984 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This page lists the data posted by a form.
"""
import cgi
import os
# Tell the browser to render html
print "Content-Type: text/html"
print ""
try:
# Create a cgi object
form = cgi.FieldStorage()
except Exception, e:
print e
# Document header
print """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<title>FCKeditor - Samples - Posted Data</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="robots" content="noindex, nofollow">
<link href="../sample.css" rel="stylesheet" type="text/css" />
</head>
<body>
"""
# This is the real work
print """
<h1>FCKeditor - Samples - Posted Data</h1>
This page lists all data posted by the form.
<hr>
<table border="1" cellspacing="0" id="outputSample">
<colgroup><col width="80"><col></colgroup>
<thead>
<tr>
<th>Field Name</th>
<th>Value</th>
</tr>
</thead>
"""
for key in form.keys():
try:
value = form[key].value
print """
<tr>
<th>%s</th>
<td><pre>%s</pre></td>
</tr>
""" % (key, value)
except Exception, e:
print e
print "</table>"
# For testing your environments
print "<hr>"
for key in os.environ.keys():
print "%s: %s<br>" % (key, os.environ.get(key, ""))
print "<hr>"
# Document footer
print """
</body>
</html>
"""
| mit |
fajoy/nova | nova/scheduler/driver.py | 1 | 12001 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 OpenStack, LLC.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Scheduler base class that all Schedulers should inherit from
"""
import sys
from nova.compute import api as compute_api
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import db
from nova import exception
from nova import notifications
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common.notifier import api as notifier
from nova.openstack.common import timeutils
from nova import servicegroup
LOG = logging.getLogger(__name__)
scheduler_driver_opts = [
cfg.StrOpt('scheduler_host_manager',
default='nova.scheduler.host_manager.HostManager',
help='The scheduler host manager class to use'),
cfg.IntOpt('scheduler_max_attempts',
default=3,
help='Maximum number of attempts to schedule an instance'),
]
CONF = cfg.CONF
CONF.register_opts(scheduler_driver_opts)
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('instances_path', 'nova.compute.manager')
CONF.import_opt('libvirt_type', 'nova.virt.libvirt.driver')
def handle_schedule_error(context, ex, instance_uuid, request_spec):
if not isinstance(ex, exception.NoValidHost):
LOG.exception(_("Exception during scheduler.run_instance"))
compute_utils.add_instance_fault_from_exc(context,
instance_uuid, ex, sys.exc_info())
state = vm_states.ERROR.upper()
LOG.warning(_('Setting instance to %(state)s state.'),
locals(), instance_uuid=instance_uuid)
# update instance state and notify on the transition
(old_ref, new_ref) = db.instance_update_and_get_original(context,
instance_uuid, {'vm_state': vm_states.ERROR,
'task_state': None})
notifications.send_update(context, old_ref, new_ref,
service="scheduler")
properties = request_spec.get('instance_properties', {})
payload = dict(request_spec=request_spec,
instance_properties=properties,
instance_id=instance_uuid,
state=vm_states.ERROR,
method='run_instance',
reason=ex)
notifier.notify(context, notifier.publisher_id("scheduler"),
'scheduler.run_instance', notifier.ERROR, payload)
def instance_update_db(context, instance_uuid):
'''Clear the host and node - set the scheduled_at field of an Instance.
:returns: An Instance with the updated fields set properly.
'''
now = timeutils.utcnow()
values = {'host': None, 'node': None, 'scheduled_at': now}
return db.instance_update(context, instance_uuid, values)
def encode_instance(instance, local=True):
"""Encode locally created instance for return via RPC"""
# TODO(comstud): I would love to be able to return the full
# instance information here, but we'll need some modifications
# to the RPC code to handle datetime conversions with the
# json encoding/decoding. We should be able to set a default
# json handler somehow to do it.
#
# For now, I'll just return the instance ID and let the caller
# do a DB lookup :-/
if local:
return dict(id=instance['id'], _is_precooked=False)
else:
inst = dict(instance)
inst['_is_precooked'] = True
return inst
class Scheduler(object):
"""The base class that all Scheduler classes should inherit from."""
def __init__(self):
self.host_manager = importutils.import_object(
CONF.scheduler_host_manager)
self.compute_api = compute_api.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
def update_service_capabilities(self, service_name, host, capabilities):
"""Process a capability update from a service node."""
self.host_manager.update_service_capabilities(service_name,
host, capabilities)
def hosts_up(self, context, topic):
"""Return the list of hosts that have a running service for topic."""
services = db.service_get_all_by_topic(context, topic)
return [service['host']
for service in services
if self.servicegroup_api.service_is_up(service)]
def schedule_prep_resize(self, context, image, request_spec,
filter_properties, instance, instance_type,
reservations):
"""Must override schedule_prep_resize method for scheduler to work."""
msg = _("Driver must implement schedule_prep_resize")
raise NotImplementedError(msg)
def schedule_run_instance(self, context, request_spec,
admin_password, injected_files,
requested_networks, is_first_time,
filter_properties):
"""Must override schedule_run_instance method for scheduler to work."""
msg = _("Driver must implement schedule_run_instance")
raise NotImplementedError(msg)
def schedule_live_migration(self, context, instance, dest,
block_migration, disk_over_commit):
"""Live migration scheduling method.
:param context:
:param instance: instance dict
:param dest: destination host
:param block_migration: if true, block_migration.
:param disk_over_commit: if True, consider real(not virtual)
disk size.
:return:
The host where instance is running currently.
Then scheduler send request that host.
"""
# Check we can do live migration
self._live_migration_src_check(context, instance)
self._live_migration_dest_check(context, instance, dest)
self._live_migration_common_check(context, instance, dest)
migrate_data = self.compute_rpcapi.check_can_live_migrate_destination(
context, instance, dest, block_migration, disk_over_commit)
# Perform migration
src = instance['host']
self.compute_rpcapi.live_migration(context, host=src,
instance=instance, dest=dest,
block_migration=block_migration,
migrate_data=migrate_data)
def _live_migration_src_check(self, context, instance_ref):
"""Live migration check routine (for src host).
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
"""
# TODO(johngar) why is this not in the API layer?
# Checking instance is running.
if instance_ref['power_state'] != power_state.RUNNING:
raise exception.InstanceNotRunning(
instance_id=instance_ref['uuid'])
# Checking src host exists and compute node
src = instance_ref['host']
try:
services = db.service_get_all_compute_by_host(context, src)
except exception.NotFound:
raise exception.ComputeServiceUnavailable(host=src)
# Checking src host is alive.
if not self.servicegroup_api.service_is_up(services[0]):
raise exception.ComputeServiceUnavailable(host=src)
def _live_migration_dest_check(self, context, instance_ref, dest):
"""Live migration check routine (for destination host).
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
"""
# Checking dest exists and compute node.
dservice_refs = db.service_get_all_compute_by_host(context, dest)
dservice_ref = dservice_refs[0]
# Checking dest host is alive.
if not self.servicegroup_api.service_is_up(dservice_ref):
raise exception.ComputeServiceUnavailable(host=dest)
# Checking whether The host where instance is running
# and dest is not same.
src = instance_ref['host']
if dest == src:
raise exception.UnableToMigrateToSelf(
instance_id=instance_ref['uuid'], host=dest)
# Check memory requirements
self._assert_compute_node_has_enough_memory(context,
instance_ref, dest)
def _live_migration_common_check(self, context, instance_ref, dest):
"""Live migration common check routine.
The following checks are based on
http://wiki.libvirt.org/page/TodoPreMigrationChecks
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
"""
dservice_ref = self._get_compute_info(context, dest)
src = instance_ref['host']
oservice_ref = self._get_compute_info(context, src)
# Checking hypervisor is same.
orig_hypervisor = oservice_ref['hypervisor_type']
dest_hypervisor = dservice_ref['hypervisor_type']
if orig_hypervisor != dest_hypervisor:
raise exception.InvalidHypervisorType()
# Checking hypervisor version.
orig_hypervisor = oservice_ref['hypervisor_version']
dest_hypervisor = dservice_ref['hypervisor_version']
if orig_hypervisor > dest_hypervisor:
raise exception.DestinationHypervisorTooOld()
def _assert_compute_node_has_enough_memory(self, context,
instance_ref, dest):
"""Checks if destination host has enough memory for live migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
"""
# Getting total available memory of host
avail = self._get_compute_info(context, dest)['memory_mb']
# Getting total used memory and disk of host
# It should be sum of memories that are assigned as max value,
# because overcommitting is risky.
instance_refs = db.instance_get_all_by_host(context, dest)
used = sum([i['memory_mb'] for i in instance_refs])
mem_inst = instance_ref['memory_mb']
avail = avail - used
if not mem_inst or avail <= mem_inst:
instance_uuid = instance_ref['uuid']
reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
"Lack of memory(host:%(avail)s <= "
"instance:%(mem_inst)s)")
raise exception.MigrationError(reason=reason % locals())
def _get_compute_info(self, context, host):
"""get compute node's information specified by key
:param context: security context
:param host: hostname(must be compute node)
:param key: column name of compute_nodes
:return: value specified by key
"""
compute_node_ref = db.service_get_all_compute_by_host(context, host)
return compute_node_ref[0]['compute_node'][0]
| apache-2.0 |
westinedu/newertrends | zinnia/tests/sitemaps.py | 3 | 3381 | """Test cases for Zinnia's sitemaps"""
from django.test import TestCase
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from tagging.models import Tag
from zinnia.models import Entry
from zinnia.models import Author
from zinnia.models import Category
from zinnia.managers import PUBLISHED
from zinnia.sitemaps import EntrySitemap
from zinnia.sitemaps import CategorySitemap
from zinnia.sitemaps import AuthorSitemap
from zinnia.sitemaps import TagSitemap
class ZinniaSitemapsTestCase(TestCase):
"""Test cases for Sitemaps classes provided"""
urls = 'zinnia.tests.urls'
def setUp(self):
self.site = Site.objects.get_current()
self.author = User.objects.create(username='admin',
email='admin@example.com')
self.category = Category.objects.create(title='Tests', slug='tests')
params = {'title': 'My entry 1', 'content': 'My content 1',
'tags': 'zinnia, test', 'slug': 'my-entry-1',
'status': PUBLISHED}
self.entry_1 = Entry.objects.create(**params)
self.entry_1.authors.add(self.author)
self.entry_1.categories.add(self.category)
self.entry_1.sites.add(self.site)
params = {'title': 'My entry 2', 'content': 'My content 2',
'tags': 'zinnia', 'slug': 'my-entry-2',
'status': PUBLISHED}
self.entry_2 = Entry.objects.create(**params)
self.entry_2.authors.add(self.author)
self.entry_2.categories.add(self.category)
self.entry_2.sites.add(self.site)
def test_entry_sitemap(self):
sitemap = EntrySitemap()
self.assertEquals(len(sitemap.items()), 2)
self.assertEquals(sitemap.lastmod(self.entry_1),
self.entry_1.last_update)
def test_category_sitemap(self):
sitemap = CategorySitemap()
self.assertEquals(len(sitemap.items()), 1)
self.assertEquals(sitemap.lastmod(self.category),
self.entry_2.creation_date)
self.assertEquals(sitemap.lastmod(Category.objects.create(
title='New', slug='new')), None)
self.assertEquals(sitemap.priority(self.category), '1.0')
def test_author_sitemap(self):
sitemap = AuthorSitemap()
authors = sitemap.items()
self.assertEquals(len(authors), 1)
self.assertEquals(sitemap.lastmod(authors[0]),
self.entry_2.creation_date)
self.assertEquals(sitemap.lastmod(Author.objects.create(
username='New', email='new@example.com')), None)
self.assertEquals(sitemap.location(self.author), '/authors/admin/')
def test_tag_sitemap(self):
sitemap = TagSitemap()
zinnia_tag = Tag.objects.get(name='zinnia')
self.assertEquals(len(sitemap.items()), 2)
self.assertEquals(sitemap.lastmod(zinnia_tag),
self.entry_2.creation_date)
self.assertEquals(sitemap.priority(zinnia_tag), '1.0')
self.assertEquals(sitemap.location(zinnia_tag), '/tags/zinnia/')
def test_category_sitemap_zero_division_error(self):
Entry.objects.all().delete()
category_sitemap = CategorySitemap()
category_sitemap.items()
self.assertEquals(category_sitemap.priority(self.category), '0.5')
| bsd-3-clause |
ecosoft-odoo/odoo | addons/marketing_campaign/__openerp__.py | 260 | 3127 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Marketing Campaigns',
'version': '1.1',
'depends': ['marketing',
'document',
'email_template',
'decimal_precision'
],
'author': 'OpenERP SA',
'category': 'Marketing',
'description': """
This module provides leads automation through marketing campaigns (campaigns can in fact be defined on any resource, not just CRM Leads).
=========================================================================================================================================
The campaigns are dynamic and multi-channels. The process is as follows:
------------------------------------------------------------------------
* Design marketing campaigns like workflows, including email templates to
send, reports to print and send by email, custom actions
* Define input segments that will select the items that should enter the
campaign (e.g leads from certain countries.)
* Run your campaign in simulation mode to test it real-time or accelerated,
and fine-tune it
* You may also start the real campaign in manual mode, where each action
requires manual validation
* Finally launch your campaign live, and watch the statistics as the
campaign does everything fully automatically.
While the campaign runs you can of course continue to fine-tune the parameters,
input segments, workflow.
**Note:** If you need demo data, you can install the marketing_campaign_crm_demo
module, but this will also install the CRM application as it depends on
CRM Leads.
""",
'website': 'https://www.odoo.com/page/lead-automation',
'data': [
'marketing_campaign_view.xml',
'marketing_campaign_data.xml',
'marketing_campaign_workflow.xml',
'report/campaign_analysis_view.xml',
'security/marketing_campaign_security.xml',
'security/ir.model.access.csv'
],
'demo': ['marketing_campaign_demo.xml'],
'test': ['test/marketing_campaign.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
undefinedv/Jingubang | sqlmap/thirdparty/beautifulsoup/__init__.py | 30 | 1672 | #!/usr/bin/env python
#
# Copyright (c) 2004-2010, Leonard Richardson
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the the Beautiful Soup Consortium and All
# Night Kosher Bakery nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
#
pass
| gpl-3.0 |
crr0004/taiga-back | taiga/projects/tasks/admin.py | 13 | 2415 | # Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
from taiga.projects.attachments.admin import AttachmentInline
from . import models
class TaskAdmin(admin.ModelAdmin):
list_display = ["project", "milestone", "user_story", "ref", "subject",]
list_display_links = ["ref", "subject",]
list_filter = ["project"]
# inlines = [AttachmentInline]
def get_object(self, *args, **kwargs):
self.obj = super().get_object(*args, **kwargs)
return self.obj
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if (db_field.name in ["status", "milestone", "user_story"]
and getattr(self, 'obj', None)):
kwargs["queryset"] = db_field.related.parent_model.objects.filter(
project=self.obj.project)
elif (db_field.name in ["owner", "assigned_to"]
and getattr(self, 'obj', None)):
kwargs["queryset"] = db_field.related.parent_model.objects.filter(
memberships__project=self.obj.project)
return super().formfield_for_foreignkey(db_field, request, **kwargs)
def formfield_for_manytomany(self, db_field, request, **kwargs):
if (db_field.name in ["watchers"]
and getattr(self, 'obj', None)):
kwargs["queryset"] = db_field.related.parent_model.objects.filter(
memberships__project=self.obj.project)
return super().formfield_for_manytomany(db_field, request, **kwargs)
admin.site.register(models.Task, TaskAdmin)
| agpl-3.0 |
AkhilHector/Rex.Inc | .env/lib/python2.7/site-packages/pip/_vendor/requests/__init__.py | 412 | 1861 | # -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('https://www.python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post('http://httpbin.org/post', data=payload)
>>> print(r.text)
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2015 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '2.7.0'
__build__ = 0x020700
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2015 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
from .packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
| mit |
bitmaintech/p2pool | SOAPpy/Types.py | 289 | 52214 | from __future__ import nested_scopes
"""
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: Types.py 1496 2010-03-04 23:46:17Z pooryorick $'
from version import __version__
import UserList
import base64
import cgi
import urllib
import copy
import re
import time
from types import *
# SOAPpy modules
from Errors import *
from NS import NS
from Utilities import encodeHexString, cleanDate
from Config import Config
###############################################################################
# Utility functions
###############################################################################
def isPrivate(name): return name[0]=='_'
def isPublic(name): return name[0]!='_'
###############################################################################
# Types and Wrappers
###############################################################################
class anyType:
_validURIs = (NS.XSD, NS.XSD2, NS.XSD3, NS.ENC)
def __init__(self, data = None, name = None, typed = 1, attrs = None):
if self.__class__ == anyType:
raise Error, "anyType can't be instantiated directly"
if type(name) in (ListType, TupleType):
self._ns, self._name = name
else:
self._ns = self._validURIs[0]
self._name = name
self._typed = typed
self._attrs = {}
self._cache = None
self._type = self._typeName()
self._data = self._checkValueSpace(data)
if attrs != None:
self._setAttrs(attrs)
def __str__(self):
if hasattr(self,'_name') and self._name:
return "<%s %s at %d>" % (self.__class__, self._name, id(self))
return "<%s at %d>" % (self.__class__, id(self))
__repr__ = __str__
def _checkValueSpace(self, data):
return data
def _marshalData(self):
return str(self._data)
def _marshalAttrs(self, ns_map, builder):
a = ''
for attr, value in self._attrs.items():
ns, n = builder.genns(ns_map, attr[0])
a += n + ' %s%s="%s"' % \
(ns, attr[1], cgi.escape(str(value), 1))
return a
def _fixAttr(self, attr):
if type(attr) in (StringType, UnicodeType):
attr = (None, attr)
elif type(attr) == ListType:
attr = tuple(attr)
elif type(attr) != TupleType:
raise AttributeError, "invalid attribute type"
if len(attr) != 2:
raise AttributeError, "invalid attribute length"
if type(attr[0]) not in (NoneType, StringType, UnicodeType):
raise AttributeError, "invalid attribute namespace URI type"
return attr
def _getAttr(self, attr):
attr = self._fixAttr(attr)
try:
return self._attrs[attr]
except:
return None
def _setAttr(self, attr, value):
attr = self._fixAttr(attr)
if type(value) is StringType:
value = unicode(value)
self._attrs[attr] = value
def _setAttrs(self, attrs):
if type(attrs) in (ListType, TupleType):
for i in range(0, len(attrs), 2):
self._setAttr(attrs[i], attrs[i + 1])
return
if type(attrs) == DictType:
d = attrs
elif isinstance(attrs, anyType):
d = attrs._attrs
else:
raise AttributeError, "invalid attribute type"
for attr, value in d.items():
self._setAttr(attr, value)
def _setMustUnderstand(self, val):
self._setAttr((NS.ENV, "mustUnderstand"), val)
def _getMustUnderstand(self):
return self._getAttr((NS.ENV, "mustUnderstand"))
def _setActor(self, val):
self._setAttr((NS.ENV, "actor"), val)
def _getActor(self):
return self._getAttr((NS.ENV, "actor"))
def _typeName(self):
return self.__class__.__name__[:-4]
def _validNamespaceURI(self, URI, strict):
if not hasattr(self, '_typed') or not self._typed:
return None
if URI in self._validURIs:
return URI
if not strict:
return self._ns
raise AttributeError, \
"not a valid namespace for type %s" % self._type
class voidType(anyType):
pass
class stringType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type:" % self._type
return data
def _marshalData(self):
return self._data
class untypedType(stringType):
def __init__(self, data = None, name = None, attrs = None):
stringType.__init__(self, data, name, 0, attrs)
class IDType(stringType): pass
class NCNameType(stringType): pass
class NameType(stringType): pass
class ENTITYType(stringType): pass
class IDREFType(stringType): pass
class languageType(stringType): pass
class NMTOKENType(stringType): pass
class QNameType(stringType): pass
class tokenType(anyType):
_validURIs = (NS.XSD2, NS.XSD3)
__invalidre = '[\n\t]|^ | $| '
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
if type(self.__invalidre) == StringType:
self.__invalidre = re.compile(self.__invalidre)
if self.__invalidre.search(data):
raise ValueError, "invalid %s value" % self._type
return data
class normalizedStringType(anyType):
_validURIs = (NS.XSD3,)
__invalidre = '[\n\r\t]'
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
if type(self.__invalidre) == StringType:
self.__invalidre = re.compile(self.__invalidre)
if self.__invalidre.search(data):
raise ValueError, "invalid %s value" % self._type
return data
class CDATAType(normalizedStringType):
_validURIs = (NS.XSD2,)
class booleanType(anyType):
def __int__(self):
return self._data
__nonzero__ = __int__
def _marshalData(self):
return ['false', 'true'][self._data]
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if data in (0, '0', 'false', ''):
return 0
if data in (1, '1', 'true'):
return 1
raise ValueError, "invalid %s value" % self._type
class decimalType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType):
raise Error, "invalid %s value" % self._type
return data
class floatType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType) or \
data < -3.4028234663852886E+38 or \
data > 3.4028234663852886E+38:
raise ValueError, "invalid %s value: %s" % (self._type, repr(data))
return data
def _marshalData(self):
return "%.18g" % self._data # More precision
class doubleType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType) or \
data < -1.7976931348623158E+308 or \
data > 1.7976931348623157E+308:
raise ValueError, "invalid %s value: %s" % (self._type, repr(data))
return data
def _marshalData(self):
return "%.18g" % self._data # More precision
class durationType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
try:
# A tuple or a scalar is OK, but make them into a list
if type(data) == TupleType:
data = list(data)
elif type(data) != ListType:
data = [data]
if len(data) > 6:
raise Exception, "too many values"
# Now check the types of all the components, and find
# the first nonzero element along the way.
f = -1
for i in range(len(data)):
if data[i] == None:
data[i] = 0
continue
if type(data[i]) not in \
(IntType, LongType, FloatType):
raise Exception, "element %d a bad type" % i
if data[i] and f == -1:
f = i
# If they're all 0, just use zero seconds.
if f == -1:
self._cache = 'PT0S'
return (0,) * 6
# Make sure only the last nonzero element has a decimal fraction
# and only the first element is negative.
d = -1
for i in range(f, len(data)):
if data[i]:
if d != -1:
raise Exception, \
"all except the last nonzero element must be " \
"integers"
if data[i] < 0 and i > f:
raise Exception, \
"only the first nonzero element can be negative"
elif data[i] != long(data[i]):
d = i
# Pad the list on the left if necessary.
if len(data) < 6:
n = 6 - len(data)
f += n
d += n
data = [0] * n + data
# Save index of the first nonzero element and the decimal
# element for _marshalData.
self.__firstnonzero = f
self.__decimal = d
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
t = 0
if d[self.__firstnonzero] < 0:
s = '-P'
else:
s = 'P'
t = 0
for i in range(self.__firstnonzero, len(d)):
if d[i]:
if i > 2 and not t:
s += 'T'
t = 1
if self.__decimal == i:
s += "%g" % abs(d[i])
else:
s += "%d" % long(abs(d[i]))
s += ['Y', 'M', 'D', 'H', 'M', 'S'][i]
self._cache = s
return self._cache
class timeDurationType(durationType):
_validURIs = (NS.XSD, NS.XSD2, NS.ENC)
class dateTimeType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.time()
if (type(data) in (IntType, LongType)):
data = list(time.gmtime(data)[:6])
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[:6])
data[5] += f
elif type(data) in (ListType, TupleType):
if len(data) < 6:
raise Exception, "not enough values"
if len(data) > 9:
raise Exception, "too many values"
data = list(data[:6])
cleanDate(data)
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02d-%02dT%02d:%02d:%02d" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
f = d[5] - int(d[5])
if f != 0:
s += ("%g" % f)[1:]
s += 'Z'
self._cache = s
return self._cache
class recurringInstantType(anyType):
_validURIs = (NS.XSD,)
def _checkValueSpace(self, data):
try:
if data == None:
data = list(time.gmtime(time.time())[:6])
if (type(data) in (IntType, LongType)):
data = list(time.gmtime(data)[:6])
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[:6])
data[5] += f
elif type(data) in (ListType, TupleType):
if len(data) < 1:
raise Exception, "not enough values"
if len(data) > 9:
raise Exception, "too many values"
data = list(data[:6])
if len(data) < 6:
data += [0] * (6 - len(data))
f = len(data)
for i in range(f):
if data[i] == None:
if f < i:
raise Exception, \
"only leftmost elements can be none"
else:
f = i
break
cleanDate(data, f)
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
e = list(d)
neg = ''
if not e[0]:
e[0] = '--'
else:
if e[0] < 0:
neg = '-'
e[0] = abs(e[0])
if e[0] < 100:
e[0] = '-' + "%02d" % e[0]
else:
e[0] = "%04d" % e[0]
for i in range(1, len(e)):
if e[i] == None or (i < 3 and e[i] == 0):
e[i] = '-'
else:
if e[i] < 0:
neg = '-'
e[i] = abs(e[i])
e[i] = "%02d" % e[i]
if d[5]:
f = abs(d[5] - int(d[5]))
if f:
e[5] += ("%g" % f)[1:]
s = "%s%s-%s-%sT%s:%s:%sZ" % ((neg,) + tuple(e))
self._cache = s
return self._cache
class timeInstantType(dateTimeType):
_validURIs = (NS.XSD, NS.XSD2, NS.ENC)
class timePeriodType(dateTimeType):
_validURIs = (NS.XSD2, NS.ENC)
class timeType(anyType):
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[3:6]
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[3:6])
data[2] += f
elif type(data) in (IntType, LongType):
data = time.gmtime(data)[3:6]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[3:6]
elif len(data) > 3:
raise Exception, "too many values"
data = [None, None, None] + list(data)
if len(data) < 6:
data += [0] * (6 - len(data))
cleanDate(data, 3)
data = data[3:]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
#s = ''
#
#s = time.strftime("%H:%M:%S", (0, 0, 0) + d + (0, 0, -1))
s = "%02d:%02d:%02d" % d
f = d[2] - int(d[2])
if f != 0:
s += ("%g" % f)[1:]
s += 'Z'
self._cache = s
return self._cache
class dateType(anyType):
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:3]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[0:3]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:3]
elif len(data) > 3:
raise Exception, "too many values"
data = list(data)
if len(data) < 3:
data += [1, 1, 1][len(data):]
data += [0, 0, 0]
cleanDate(data)
data = data[:3]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02d-%02dZ" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
self._cache = s
return self._cache
class gYearMonthType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:2]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[0:2]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:2]
elif len(data) > 2:
raise Exception, "too many values"
data = list(data)
if len(data) < 2:
data += [1, 1][len(data):]
data += [1, 0, 0, 0]
cleanDate(data)
data = data[:2]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02dZ" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
self._cache = s
return self._cache
class gYearType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:1]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:1]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04dZ" % abs(d)
if d < 0:
s = '-' + s
self._cache = s
return self._cache
class centuryType(anyType):
_validURIs = (NS.XSD2, NS.ENC)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:1] / 100
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:1] / 100
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%02dZ" % abs(d)
if d < 0:
s = '-' + s
self._cache = s
return self._cache
class yearType(gYearType):
_validURIs = (NS.XSD2, NS.ENC)
class gMonthDayType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[1:3]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[1:3]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:2]
elif len(data) > 2:
raise Exception, "too many values"
data = list(data)
if len(data) < 2:
data += [1, 1][len(data):]
data = [0] + data + [0, 0, 0]
cleanDate(data, 1)
data = data[1:3]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
self._cache = "--%02d-%02dZ" % self._data
return self._cache
class recurringDateType(gMonthDayType):
_validURIs = (NS.XSD2, NS.ENC)
class gMonthType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[1:2]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[1:2]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
if data[0] < 1 or data[0] > 12:
raise Exception, "bad value"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
self._cache = "--%02d--Z" % self._data
return self._cache
class monthType(gMonthType):
_validURIs = (NS.XSD2, NS.ENC)
class gDayType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[2:3]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[2:3]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
if data[0] < 1 or data[0] > 31:
raise Exception, "bad value"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
self._cache = "---%02dZ" % self._data
return self._cache
class recurringDayType(gDayType):
_validURIs = (NS.XSD2, NS.ENC)
class hexBinaryType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = encodeHexString(self._data)
return self._cache
class base64BinaryType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = base64.encodestring(self._data)
return self._cache
class base64Type(base64BinaryType):
_validURIs = (NS.ENC,)
class binaryType(anyType):
_validURIs = (NS.XSD, NS.ENC)
def __init__(self, data, name = None, typed = 1, encoding = 'base64',
attrs = None):
anyType.__init__(self, data, name, typed, attrs)
self._setAttr('encoding', encoding)
def _marshalData(self):
if self._cache == None:
if self._getAttr((None, 'encoding')) == 'base64':
self._cache = base64.encodestring(self._data)
else:
self._cache = encodeHexString(self._data)
return self._cache
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _setAttr(self, attr, value):
attr = self._fixAttr(attr)
if attr[1] == 'encoding':
if attr[0] != None or value not in ('base64', 'hex'):
raise AttributeError, "invalid encoding"
self._cache = None
anyType._setAttr(self, attr, value)
class anyURIType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = urllib.quote(self._data)
return self._cache
class uriType(anyURIType):
_validURIs = (NS.XSD,)
class uriReferenceType(anyURIType):
_validURIs = (NS.XSD2,)
class NOTATIONType(anyType):
def __init__(self, data, name = None, typed = 1, attrs = None):
if self.__class__ == NOTATIONType:
raise Error, "a NOTATION can't be instantiated directly"
anyType.__init__(self, data, name, typed, attrs)
class ENTITIESType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) in (StringType, UnicodeType):
return (data,)
if type(data) not in (ListType, TupleType) or \
filter (lambda x: type(x) not in (StringType, UnicodeType), data):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
return ' '.join(self._data)
class IDREFSType(ENTITIESType): pass
class NMTOKENSType(ENTITIESType): pass
class integerType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType):
raise ValueError, "invalid %s value" % self._type
return data
class nonPositiveIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data > 0:
raise ValueError, "invalid %s value" % self._type
return data
class non_Positive_IntegerType(nonPositiveIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'non-positive-integer'
class negativeIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data >= 0:
raise ValueError, "invalid %s value" % self._type
return data
class negative_IntegerType(negativeIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'negative-integer'
class longType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -9223372036854775808L or \
data > 9223372036854775807L:
raise ValueError, "invalid %s value" % self._type
return data
class intType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -2147483648L or \
data > 2147483647L:
raise ValueError, "invalid %s value" % self._type
return data
class shortType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -32768 or \
data > 32767:
raise ValueError, "invalid %s value" % self._type
return data
class byteType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -128 or \
data > 127:
raise ValueError, "invalid %s value" % self._type
return data
class nonNegativeIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data < 0:
raise ValueError, "invalid %s value" % self._type
return data
class non_Negative_IntegerType(nonNegativeIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'non-negative-integer'
class unsignedLongType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 18446744073709551615L:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedIntType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 4294967295L:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedShortType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 65535:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedByteType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 255:
raise ValueError, "invalid %s value" % self._type
return data
class positiveIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data <= 0:
raise ValueError, "invalid %s value" % self._type
return data
class positive_IntegerType(positiveIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'positive-integer'
# Now compound types
class compoundType(anyType):
def __init__(self, data = None, name = None, typed = 1, attrs = None):
if self.__class__ == compoundType:
raise Error, "a compound can't be instantiated directly"
anyType.__init__(self, data, name, typed, attrs)
self._keyord = []
if type(data) == DictType:
self.__dict__.update(data)
def _aslist(self, item=None):
if item is not None:
return self.__dict__[self._keyord[item]]
else:
return map( lambda x: self.__dict__[x], self._keyord)
def _asdict(self, item=None, encoding=Config.dict_encoding):
if item is not None:
if type(item) in (UnicodeType,StringType):
item = item.encode(encoding)
return self.__dict__[item]
else:
retval = {}
def fun(x): retval[x.encode(encoding)] = self.__dict__[x]
if hasattr(self, '_keyord'):
map( fun, self._keyord)
else:
for name in dir(self):
if isPublic(name):
retval[name] = getattr(self,name)
return retval
def __getitem__(self, item):
if type(item) == IntType:
return self.__dict__[self._keyord[item]]
else:
return getattr(self, item)
def __len__(self):
return len(self._keyord)
def __nonzero__(self):
return 1
def _keys(self):
return filter(lambda x: x[0] != '_', self.__dict__.keys())
def _addItem(self, name, value, attrs = None):
if name in self._keyord:
if type(self.__dict__[name]) != ListType:
self.__dict__[name] = [self.__dict__[name]]
self.__dict__[name].append(value)
else:
self.__dict__[name] = value
self._keyord.append(name)
def _placeItem(self, name, value, pos, subpos = 0, attrs = None):
if subpos == 0 and type(self.__dict__[name]) != ListType:
self.__dict__[name] = value
else:
self.__dict__[name][subpos] = value
# only add to key order list if it does not already
# exist in list
if not (name in self._keyord):
if pos < len(x):
self._keyord[pos] = name
else:
self._keyord.append(name)
def _getItemAsList(self, name, default = []):
try:
d = self.__dict__[name]
except:
return default
if type(d) == ListType:
return d
return [d]
def __str__(self):
return anyType.__str__(self) + ": " + str(self._asdict())
def __repr__(self):
return self.__str__()
class structType(compoundType):
pass
class headerType(structType):
_validURIs = (NS.ENV,)
def __init__(self, data = None, typed = 1, attrs = None):
structType.__init__(self, data, "Header", typed, attrs)
class bodyType(structType):
_validURIs = (NS.ENV,)
def __init__(self, data = None, typed = 1, attrs = None):
structType.__init__(self, data, "Body", typed, attrs)
class arrayType(UserList.UserList, compoundType):
def __init__(self, data = None, name = None, attrs = None,
offset = 0, rank = None, asize = 0, elemsname = None):
if data:
if type(data) not in (ListType, TupleType):
raise Error, "Data must be a sequence"
UserList.UserList.__init__(self, data)
compoundType.__init__(self, data, name, 0, attrs)
self._elemsname = elemsname or "item"
if data == None:
self._rank = rank
# According to 5.4.2.2 in the SOAP spec, each element in a
# sparse array must have a position. _posstate keeps track of
# whether we've seen a position or not. It's possible values
# are:
# -1 No elements have been added, so the state is indeterminate
# 0 An element without a position has been added, so no
# elements can have positions
# 1 An element with a position has been added, so all elements
# must have positions
self._posstate = -1
self._full = 0
if asize in ('', None):
asize = '0'
self._dims = map (lambda x: int(x), str(asize).split(','))
self._dims.reverse() # It's easier to work with this way
self._poss = [0] * len(self._dims) # This will end up
# reversed too
for i in range(len(self._dims)):
if self._dims[i] < 0 or \
self._dims[i] == 0 and len(self._dims) > 1:
raise TypeError, "invalid Array dimensions"
if offset > 0:
self._poss[i] = offset % self._dims[i]
offset = int(offset / self._dims[i])
# Don't break out of the loop if offset is 0 so we test all the
# dimensions for > 0.
if offset:
raise AttributeError, "invalid Array offset"
a = [None] * self._dims[0]
for i in range(1, len(self._dims)):
b = []
for j in range(self._dims[i]):
b.append(copy.deepcopy(a))
a = b
self.data = a
def _aslist(self, item=None):
if item is not None:
return self.data[int(item)]
else:
return self.data
def _asdict(self, item=None, encoding=Config.dict_encoding):
if item is not None:
if type(item) in (UnicodeType,StringType):
item = item.encode(encoding)
return self.data[int(item)]
else:
retval = {}
def fun(x): retval[str(x).encode(encoding)] = self.data[x]
map( fun, range(len(self.data)) )
return retval
def __getitem__(self, item):
try:
return self.data[int(item)]
except ValueError:
return getattr(self, item)
def __len__(self):
return len(self.data)
def __nonzero__(self):
return 1
def __str__(self):
return anyType.__str__(self) + ": " + str(self._aslist())
def _keys(self):
return filter(lambda x: x[0] != '_', self.__dict__.keys())
def _addItem(self, name, value, attrs):
if self._full:
raise ValueError, "Array is full"
pos = attrs.get((NS.ENC, 'position'))
if pos != None:
if self._posstate == 0:
raise AttributeError, \
"all elements in a sparse Array must have a " \
"position attribute"
self._posstate = 1
try:
if pos[0] == '[' and pos[-1] == ']':
pos = map (lambda x: int(x), pos[1:-1].split(','))
pos.reverse()
if len(pos) == 1:
pos = pos[0]
curpos = [0] * len(self._dims)
for i in range(len(self._dims)):
curpos[i] = pos % self._dims[i]
pos = int(pos / self._dims[i])
if pos == 0:
break
if pos:
raise Exception
elif len(pos) != len(self._dims):
raise Exception
else:
for i in range(len(self._dims)):
if pos[i] >= self._dims[i]:
raise Exception
curpos = pos
else:
raise Exception
except:
raise AttributeError, \
"invalid Array element position %s" % str(pos)
else:
if self._posstate == 1:
raise AttributeError, \
"only elements in a sparse Array may have a " \
"position attribute"
self._posstate = 0
curpos = self._poss
a = self.data
for i in range(len(self._dims) - 1, 0, -1):
a = a[curpos[i]]
if curpos[0] >= len(a):
a += [None] * (len(a) - curpos[0] + 1)
a[curpos[0]] = value
if pos == None:
self._poss[0] += 1
for i in range(len(self._dims) - 1):
if self._poss[i] < self._dims[i]:
break
self._poss[i] = 0
self._poss[i + 1] += 1
if self._dims[-1] and self._poss[-1] >= self._dims[-1]:
#self._full = 1
#FIXME: why is this occuring?
pass
def _placeItem(self, name, value, pos, subpos, attrs = None):
curpos = [0] * len(self._dims)
for i in range(len(self._dims)):
if self._dims[i] == 0:
curpos[0] = pos
break
curpos[i] = pos % self._dims[i]
pos = int(pos / self._dims[i])
if pos == 0:
break
if self._dims[i] != 0 and pos:
raise Error, "array index out of range"
a = self.data
for i in range(len(self._dims) - 1, 0, -1):
a = a[curpos[i]]
if curpos[0] >= len(a):
a += [None] * (len(a) - curpos[0] + 1)
a[curpos[0]] = value
class typedArrayType(arrayType):
def __init__(self, data = None, name = None, typed = None, attrs = None,
offset = 0, rank = None, asize = 0, elemsname = None, complexType = 0):
arrayType.__init__(self, data, name, attrs, offset, rank, asize,
elemsname)
self._typed = 1
self._type = typed
self._complexType = complexType
class faultType(structType, Error):
def __init__(self, faultcode = "", faultstring = "", detail = None):
self.faultcode = faultcode
self.faultstring = faultstring
if detail != None:
self.detail = detail
structType.__init__(self, None, 0)
def _setDetail(self, detail = None):
if detail != None:
self.detail = detail
else:
try: del self.detail
except AttributeError: pass
def __repr__(self):
if getattr(self, 'detail', None) != None:
return "<Fault %s: %s: %s>" % (self.faultcode,
self.faultstring,
self.detail)
else:
return "<Fault %s: %s>" % (self.faultcode, self.faultstring)
__str__ = __repr__
def __call__(self):
return (self.faultcode, self.faultstring, self.detail)
class SOAPException(Exception):
def __init__(self, code="", string="", detail=None):
self.value = ("SOAPpy SOAP Exception", code, string, detail)
self.code = code
self.string = string
self.detail = detail
def __str__(self):
return repr(self.value)
class RequiredHeaderMismatch(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MethodNotFound(Exception):
def __init__(self, value):
(val, detail) = value.split(":")
self.value = val
self.detail = detail
def __str__(self):
return repr(self.value, self.detail)
class AuthorizationFailed(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MethodFailed(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
#######
# Convert complex SOAPpy objects to native python equivalents
#######
def simplify(object, level=0):
"""
Convert the SOAPpy objects and their contents to simple python types.
This function recursively converts the passed 'container' object,
and all public subobjects. (Private subobjects have names that
start with '_'.)
Conversions:
- faultType --> raise python exception
- arrayType --> array
- compoundType --> dictionary
"""
if level > 10:
return object
if isinstance( object, faultType ):
if object.faultstring == "Required Header Misunderstood":
raise RequiredHeaderMismatch(object.detail)
elif object.faultstring == "Method Not Found":
raise MethodNotFound(object.detail)
elif object.faultstring == "Authorization Failed":
raise AuthorizationFailed(object.detail)
elif object.faultstring == "Method Failed":
raise MethodFailed(object.detail)
else:
se = SOAPException(object.faultcode, object.faultstring,
object.detail)
raise se
elif isinstance( object, arrayType ):
data = object._aslist()
for k in range(len(data)):
data[k] = simplify(data[k], level=level+1)
return data
elif isinstance( object, compoundType ) or isinstance(object, structType):
data = object._asdict()
for k in data.keys():
if isPublic(k):
data[k] = simplify(data[k], level=level+1)
return data
elif type(object)==DictType:
for k in object.keys():
if isPublic(k):
object[k] = simplify(object[k])
return object
elif type(object)==list:
for k in range(len(object)):
object[k] = simplify(object[k])
return object
else:
return object
def simplify_contents(object, level=0):
"""
Convert the contents of SOAPpy objects to simple python types.
This function recursively converts the sub-objects contained in a
'container' object to simple python types.
Conversions:
- faultType --> raise python exception
- arrayType --> array
- compoundType --> dictionary
"""
if level>10: return object
if isinstance( object, faultType ):
for k in object._keys():
if isPublic(k):
setattr(object, k, simplify(object[k], level=level+1))
raise object
elif isinstance( object, arrayType ):
data = object._aslist()
for k in range(len(data)):
object[k] = simplify(data[k], level=level+1)
elif isinstance(object, structType):
data = object._asdict()
for k in data.keys():
if isPublic(k):
setattr(object, k, simplify(data[k], level=level+1))
elif isinstance( object, compoundType ) :
data = object._asdict()
for k in data.keys():
if isPublic(k):
object[k] = simplify(data[k], level=level+1)
elif type(object)==DictType:
for k in object.keys():
if isPublic(k):
object[k] = simplify(object[k])
elif type(object)==list:
for k in range(len(object)):
object[k] = simplify(object[k])
return object
| gpl-3.0 |
fzheng/codejam | lib/python2.7/site-packages/IPython/lib/demo.py | 21 | 20690 | """Module for interactive demos using IPython.
This module implements a few classes for running Python scripts interactively
in IPython for demonstrations. With very simple markup (a few tags in
comments), you can control points where the script stops executing and returns
control to IPython.
Provided classes
----------------
The classes are (see their docstrings for further details):
- Demo: pure python demos
- IPythonDemo: demos with input to be processed by IPython as if it had been
typed interactively (so magics work, as well as any other special syntax you
may have added via input prefilters).
- LineDemo: single-line version of the Demo class. These demos are executed
one line at a time, and require no markup.
- IPythonLineDemo: IPython version of the LineDemo class (the demo is
executed a line at a time, but processed via IPython).
- ClearMixin: mixin to make Demo classes with less visual clutter. It
declares an empty marquee and a pre_cmd that clears the screen before each
block (see Subclassing below).
- ClearDemo, ClearIPDemo: mixin-enabled versions of the Demo and IPythonDemo
classes.
Inheritance diagram:
.. inheritance-diagram:: IPython.lib.demo
:parts: 3
Subclassing
-----------
The classes here all include a few methods meant to make customization by
subclassing more convenient. Their docstrings below have some more details:
- marquee(): generates a marquee to provide visible on-screen markers at each
block start and end.
- pre_cmd(): run right before the execution of each block.
- post_cmd(): run right after the execution of each block. If the block
raises an exception, this is NOT called.
Operation
---------
The file is run in its own empty namespace (though you can pass it a string of
arguments as if in a command line environment, and it will see those as
sys.argv). But at each stop, the global IPython namespace is updated with the
current internal demo namespace, so you can work interactively with the data
accumulated so far.
By default, each block of code is printed (with syntax highlighting) before
executing it and you have to confirm execution. This is intended to show the
code to an audience first so you can discuss it, and only proceed with
execution once you agree. There are a few tags which allow you to modify this
behavior.
The supported tags are:
# <demo> stop
Defines block boundaries, the points where IPython stops execution of the
file and returns to the interactive prompt.
You can optionally mark the stop tag with extra dashes before and after the
word 'stop', to help visually distinguish the blocks in a text editor:
# <demo> --- stop ---
# <demo> silent
Make a block execute silently (and hence automatically). Typically used in
cases where you have some boilerplate or initialization code which you need
executed but do not want to be seen in the demo.
# <demo> auto
Make a block execute automatically, but still being printed. Useful for
simple code which does not warrant discussion, since it avoids the extra
manual confirmation.
# <demo> auto_all
This tag can _only_ be in the first block, and if given it overrides the
individual auto tags to make the whole demo fully automatic (no block asks
for confirmation). It can also be given at creation time (or the attribute
set later) to override what's in the file.
While _any_ python file can be run as a Demo instance, if there are no stop
tags the whole file will run in a single block (no different that calling
first %pycat and then %run). The minimal markup to make this useful is to
place a set of stop tags; the other tags are only there to let you fine-tune
the execution.
This is probably best explained with the simple example file below. You can
copy this into a file named ex_demo.py, and try running it via::
from IPython.demo import Demo
d = Demo('ex_demo.py')
d()
Each time you call the demo object, it runs the next block. The demo object
has a few useful methods for navigation, like again(), edit(), jump(), seek()
and back(). It can be reset for a new run via reset() or reloaded from disk
(in case you've edited the source) via reload(). See their docstrings below.
Note: To make this simpler to explore, a file called "demo-exercizer.py" has
been added to the "docs/examples/core" directory. Just cd to this directory in
an IPython session, and type::
%run demo-exercizer.py
and then follow the directions.
Example
-------
The following is a very simple example of a valid demo file.
::
#################### EXAMPLE DEMO <ex_demo.py> ###############################
'''A simple interactive demo to illustrate the use of IPython's Demo class.'''
print 'Hello, welcome to an interactive IPython demo.'
# The mark below defines a block boundary, which is a point where IPython will
# stop execution and return to the interactive prompt. The dashes are actually
# optional and used only as a visual aid to clearly separate blocks while
# editing the demo code.
# <demo> stop
x = 1
y = 2
# <demo> stop
# the mark below makes this block as silent
# <demo> silent
print 'This is a silent block, which gets executed but not printed.'
# <demo> stop
# <demo> auto
print 'This is an automatic block.'
print 'It is executed without asking for confirmation, but printed.'
z = x+y
print 'z=',x
# <demo> stop
# This is just another normal block.
print 'z is now:', z
print 'bye!'
################### END EXAMPLE DEMO <ex_demo.py> ############################
"""
from __future__ import unicode_literals
#*****************************************************************************
# Copyright (C) 2005-2006 Fernando Perez. <Fernando.Perez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#
#*****************************************************************************
from __future__ import print_function
import os
import re
import shlex
import sys
from IPython.utils import io
from IPython.utils.text import marquee
from IPython.utils import openpy
from IPython.utils import py3compat
__all__ = ['Demo','IPythonDemo','LineDemo','IPythonLineDemo','DemoError']
class DemoError(Exception): pass
def re_mark(mark):
return re.compile(r'^\s*#\s+<demo>\s+%s\s*$' % mark,re.MULTILINE)
class Demo(object):
re_stop = re_mark('-*\s?stop\s?-*')
re_silent = re_mark('silent')
re_auto = re_mark('auto')
re_auto_all = re_mark('auto_all')
def __init__(self,src,title='',arg_str='',auto_all=None):
"""Make a new demo object. To run the demo, simply call the object.
See the module docstring for full details and an example (you can use
IPython.Demo? in IPython to see it).
Inputs:
- src is either a file, or file-like object, or a
string that can be resolved to a filename.
Optional inputs:
- title: a string to use as the demo name. Of most use when the demo
you are making comes from an object that has no filename, or if you
want an alternate denotation distinct from the filename.
- arg_str(''): a string of arguments, internally converted to a list
just like sys.argv, so the demo script can see a similar
environment.
- auto_all(None): global flag to run all blocks automatically without
confirmation. This attribute overrides the block-level tags and
applies to the whole demo. It is an attribute of the object, and
can be changed at runtime simply by reassigning it to a boolean
value.
"""
if hasattr(src, "read"):
# It seems to be a file or a file-like object
self.fname = "from a file-like object"
if title == '':
self.title = "from a file-like object"
else:
self.title = title
else:
# Assume it's a string or something that can be converted to one
self.fname = src
if title == '':
(filepath, filename) = os.path.split(src)
self.title = filename
else:
self.title = title
self.sys_argv = [src] + shlex.split(arg_str)
self.auto_all = auto_all
self.src = src
# get a few things from ipython. While it's a bit ugly design-wise,
# it ensures that things like color scheme and the like are always in
# sync with the ipython mode being used. This class is only meant to
# be used inside ipython anyways, so it's OK.
ip = get_ipython() # this is in builtins whenever IPython is running
self.ip_ns = ip.user_ns
self.ip_colorize = ip.pycolorize
self.ip_showtb = ip.showtraceback
self.ip_run_cell = ip.run_cell
self.shell = ip
# load user data and initialize data structures
self.reload()
def fload(self):
"""Load file object."""
# read data and parse into blocks
if hasattr(self, 'fobj') and self.fobj is not None:
self.fobj.close()
if hasattr(self.src, "read"):
# It seems to be a file or a file-like object
self.fobj = self.src
else:
# Assume it's a string or something that can be converted to one
self.fobj = openpy.open(self.fname)
def reload(self):
"""Reload source from disk and initialize state."""
self.fload()
self.src = "".join(openpy.strip_encoding_cookie(self.fobj))
src_b = [b.strip() for b in self.re_stop.split(self.src) if b]
self._silent = [bool(self.re_silent.findall(b)) for b in src_b]
self._auto = [bool(self.re_auto.findall(b)) for b in src_b]
# if auto_all is not given (def. None), we read it from the file
if self.auto_all is None:
self.auto_all = bool(self.re_auto_all.findall(src_b[0]))
else:
self.auto_all = bool(self.auto_all)
# Clean the sources from all markup so it doesn't get displayed when
# running the demo
src_blocks = []
auto_strip = lambda s: self.re_auto.sub('',s)
for i,b in enumerate(src_b):
if self._auto[i]:
src_blocks.append(auto_strip(b))
else:
src_blocks.append(b)
# remove the auto_all marker
src_blocks[0] = self.re_auto_all.sub('',src_blocks[0])
self.nblocks = len(src_blocks)
self.src_blocks = src_blocks
# also build syntax-highlighted source
self.src_blocks_colored = list(map(self.ip_colorize,self.src_blocks))
# ensure clean namespace and seek offset
self.reset()
def reset(self):
"""Reset the namespace and seek pointer to restart the demo"""
self.user_ns = {}
self.finished = False
self.block_index = 0
def _validate_index(self,index):
if index<0 or index>=self.nblocks:
raise ValueError('invalid block index %s' % index)
def _get_index(self,index):
"""Get the current block index, validating and checking status.
Returns None if the demo is finished"""
if index is None:
if self.finished:
print('Demo finished. Use <demo_name>.reset() if you want to rerun it.', file=io.stdout)
return None
index = self.block_index
else:
self._validate_index(index)
return index
def seek(self,index):
"""Move the current seek pointer to the given block.
You can use negative indices to seek from the end, with identical
semantics to those of Python lists."""
if index<0:
index = self.nblocks + index
self._validate_index(index)
self.block_index = index
self.finished = False
def back(self,num=1):
"""Move the seek pointer back num blocks (default is 1)."""
self.seek(self.block_index-num)
def jump(self,num=1):
"""Jump a given number of blocks relative to the current one.
The offset can be positive or negative, defaults to 1."""
self.seek(self.block_index+num)
def again(self):
"""Move the seek pointer back one block and re-execute."""
self.back(1)
self()
def edit(self,index=None):
"""Edit a block.
If no number is given, use the last block executed.
This edits the in-memory copy of the demo, it does NOT modify the
original source file. If you want to do that, simply open the file in
an editor and use reload() when you make changes to the file. This
method is meant to let you change a block during a demonstration for
explanatory purposes, without damaging your original script."""
index = self._get_index(index)
if index is None:
return
# decrease the index by one (unless we're at the very beginning), so
# that the default demo.edit() call opens up the sblock we've last run
if index>0:
index -= 1
filename = self.shell.mktempfile(self.src_blocks[index])
self.shell.hooks.editor(filename,1)
with open(filename, 'r') as f:
new_block = f.read()
# update the source and colored block
self.src_blocks[index] = new_block
self.src_blocks_colored[index] = self.ip_colorize(new_block)
self.block_index = index
# call to run with the newly edited index
self()
def show(self,index=None):
"""Show a single block on screen"""
index = self._get_index(index)
if index is None:
return
print(self.marquee('<%s> block # %s (%s remaining)' %
(self.title,index,self.nblocks-index-1)), file=io.stdout)
print(self.src_blocks_colored[index], file=io.stdout)
sys.stdout.flush()
def show_all(self):
"""Show entire demo on screen, block by block"""
fname = self.title
title = self.title
nblocks = self.nblocks
silent = self._silent
marquee = self.marquee
for index,block in enumerate(self.src_blocks_colored):
if silent[index]:
print(marquee('<%s> SILENT block # %s (%s remaining)' %
(title,index,nblocks-index-1)), file=io.stdout)
else:
print(marquee('<%s> block # %s (%s remaining)' %
(title,index,nblocks-index-1)), file=io.stdout)
print(block, end=' ', file=io.stdout)
sys.stdout.flush()
def run_cell(self,source):
"""Execute a string with one or more lines of code"""
exec(source, self.user_ns)
def __call__(self,index=None):
"""run a block of the demo.
If index is given, it should be an integer >=1 and <= nblocks. This
means that the calling convention is one off from typical Python
lists. The reason for the inconsistency is that the demo always
prints 'Block n/N, and N is the total, so it would be very odd to use
zero-indexing here."""
index = self._get_index(index)
if index is None:
return
try:
marquee = self.marquee
next_block = self.src_blocks[index]
self.block_index += 1
if self._silent[index]:
print(marquee('Executing silent block # %s (%s remaining)' %
(index,self.nblocks-index-1)), file=io.stdout)
else:
self.pre_cmd()
self.show(index)
if self.auto_all or self._auto[index]:
print(marquee('output:'), file=io.stdout)
else:
print(marquee('Press <q> to quit, <Enter> to execute...'), end=' ', file=io.stdout)
ans = py3compat.input().strip()
if ans:
print(marquee('Block NOT executed'), file=io.stdout)
return
try:
save_argv = sys.argv
sys.argv = self.sys_argv
self.run_cell(next_block)
self.post_cmd()
finally:
sys.argv = save_argv
except:
self.ip_showtb(filename=self.fname)
else:
self.ip_ns.update(self.user_ns)
if self.block_index == self.nblocks:
mq1 = self.marquee('END OF DEMO')
if mq1:
# avoid spurious print >>io.stdout,s if empty marquees are used
print(file=io.stdout)
print(mq1, file=io.stdout)
print(self.marquee('Use <demo_name>.reset() if you want to rerun it.'), file=io.stdout)
self.finished = True
# These methods are meant to be overridden by subclasses who may wish to
# customize the behavior of of their demos.
def marquee(self,txt='',width=78,mark='*'):
"""Return the input string centered in a 'marquee'."""
return marquee(txt,width,mark)
def pre_cmd(self):
"""Method called before executing each block."""
pass
def post_cmd(self):
"""Method called after executing each block."""
pass
class IPythonDemo(Demo):
"""Class for interactive demos with IPython's input processing applied.
This subclasses Demo, but instead of executing each block by the Python
interpreter (via exec), it actually calls IPython on it, so that any input
filters which may be in place are applied to the input block.
If you have an interactive environment which exposes special input
processing, you can use this class instead to write demo scripts which
operate exactly as if you had typed them interactively. The default Demo
class requires the input to be valid, pure Python code.
"""
def run_cell(self,source):
"""Execute a string with one or more lines of code"""
self.shell.run_cell(source)
class LineDemo(Demo):
"""Demo where each line is executed as a separate block.
The input script should be valid Python code.
This class doesn't require any markup at all, and it's meant for simple
scripts (with no nesting or any kind of indentation) which consist of
multiple lines of input to be executed, one at a time, as if they had been
typed in the interactive prompt.
Note: the input can not have *any* indentation, which means that only
single-lines of input are accepted, not even function definitions are
valid."""
def reload(self):
"""Reload source from disk and initialize state."""
# read data and parse into blocks
self.fload()
lines = self.fobj.readlines()
src_b = [l for l in lines if l.strip()]
nblocks = len(src_b)
self.src = ''.join(lines)
self._silent = [False]*nblocks
self._auto = [True]*nblocks
self.auto_all = True
self.nblocks = nblocks
self.src_blocks = src_b
# also build syntax-highlighted source
self.src_blocks_colored = map(self.ip_colorize,self.src_blocks)
# ensure clean namespace and seek offset
self.reset()
class IPythonLineDemo(IPythonDemo,LineDemo):
"""Variant of the LineDemo class whose input is processed by IPython."""
pass
class ClearMixin(object):
"""Use this mixin to make Demo classes with less visual clutter.
Demos using this mixin will clear the screen before every block and use
blank marquees.
Note that in order for the methods defined here to actually override those
of the classes it's mixed with, it must go /first/ in the inheritance
tree. For example:
class ClearIPDemo(ClearMixin,IPythonDemo): pass
will provide an IPythonDemo class with the mixin's features.
"""
def marquee(self,txt='',width=78,mark='*'):
"""Blank marquee that returns '' no matter what the input."""
return ''
def pre_cmd(self):
"""Method called before executing each block.
This one simply clears the screen."""
from IPython.utils.terminal import term_clear
term_clear()
class ClearDemo(ClearMixin,Demo):
pass
class ClearIPDemo(ClearMixin,IPythonDemo):
pass
| mit |
JioCloud/nova_test_latest | nova/paths.py | 72 | 2182 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from oslo_config import cfg
path_opts = [
cfg.StrOpt('pybasedir',
default=os.path.abspath(os.path.join(os.path.dirname(__file__),
'../')),
help='Directory where the nova python module is installed'),
cfg.StrOpt('bindir',
default=os.path.join(sys.prefix, 'local', 'bin'),
help='Directory where nova binaries are installed'),
cfg.StrOpt('state_path',
default='$pybasedir',
help="Top-level directory for maintaining nova's state"),
]
CONF = cfg.CONF
CONF.register_opts(path_opts)
def basedir_def(*args):
"""Return an uninterpolated path relative to $pybasedir."""
return os.path.join('$pybasedir', *args)
def bindir_def(*args):
"""Return an uninterpolated path relative to $bindir."""
return os.path.join('$bindir', *args)
def state_path_def(*args):
"""Return an uninterpolated path relative to $state_path."""
return os.path.join('$state_path', *args)
def basedir_rel(*args):
"""Return a path relative to $pybasedir."""
return os.path.join(CONF.pybasedir, *args)
def bindir_rel(*args):
"""Return a path relative to $bindir."""
return os.path.join(CONF.bindir, *args)
def state_path_rel(*args):
"""Return a path relative to $state_path."""
return os.path.join(CONF.state_path, *args)
| apache-2.0 |
juddc/Dipper | dip/tests/test_compiler.py | 1 | 1176 | import sys
sys.path.insert(0, "../")
import unittest
from dip.typesystem import DNull, DBool, DInteger, DString, DList
from dip.parser import DipperParser
from dip.compiler import FrameCompiler
from dip.interpreter import VirtualMachine
from dip.namespace import Namespace
class TestCompiler(unittest.TestCase):
def _execute_simple(self, code):
# set up the global namespace
globalns = Namespace("globals")
for node in DipperParser().parse(code):
if node.type == "Function":
globalns.add_func(node.name, FrameCompiler(node).mkfunc())
elif node.type == "Struct":
globalns.add_struct(node.name, node.mkstruct())
# set up a hacky way to extract data from the VM via a callback
result = [None] # we need a mutable object we can put data in
def getresult(val):
result[0] = val
# set up the VM
vm = VirtualMachine([], cb=getresult, debug=False)
vm.setglobals(globalns)
vm.run()
return result[0]
def test_simple(self):
pass
if __name__ == '__main__':
unittest.main() | mit |
hex108/docker-registry | docker_registry/toolkit.py | 10 | 11245 | # -*- coding: utf-8 -*-
import base64
import functools
import hashlib
import logging
import os
import random
import re
import string
import time
import urllib
import flask
from M2Crypto import RSA
import requests
from docker_registry.core import compat
json = compat.json
from . import storage
from .lib import config
cfg = config.load()
logger = logging.getLogger(__name__)
_re_docker_version = re.compile('docker/([^\s]+)')
_re_authorization = re.compile(r'(\w+)[:=][\s"]?([^",]+)"?')
_re_hex_image_id = re.compile(r'^([a-f0-9]{16}|[a-f0-9]{64})$')
def valid_image_id(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
image_id = kwargs.get('image_id', '')
if _re_hex_image_id.match(image_id):
return f(*args, **kwargs)
return api_error("Invalid image ID", 404)
return wrapper
def docker_client_version():
"""Try and extract the client version from the User-Agent string
So we can warn older versions of the Docker engine/daemon about
incompatible APIs. If we can't figure out the version (e.g. the
client is not a Docker engine), just return None.
"""
ua = flask.request.headers.get('user-agent', '')
m = _re_docker_version.search(ua)
if not m:
return
version = m.group(1)
if '-' in version:
version = version.split('-')[0]
try:
return tuple(int(x) for x in version)
except ValueError:
return
class SocketReader(object):
def __init__(self, fp):
self._fp = fp
self.handlers = []
def __iter__(self):
return self.iterate()
def iterate(self, chunk_size=-1):
if isinstance(self._fp, requests.Response):
if chunk_size == -1:
chunk_size = 1024
for chunk in self._fp.iter_content(chunk_size):
for handler in self.handlers:
handler(chunk)
yield chunk
else:
chunk = self._fp.read(chunk_size)
while chunk:
for handler in self.handlers:
handler(chunk)
yield chunk
chunk = self._fp.read(chunk_size)
def add_handler(self, handler):
self.handlers.append(handler)
def read(self, n=-1):
buf = self._fp.read(n)
if not buf:
return ''
for handler in self.handlers:
handler(buf)
return buf
def response(data=None, code=200, headers=None, raw=False):
if data is None:
data = True
h = {
'Cache-Control': 'no-cache',
'Expires': '-1',
'Content-Type': 'application/json'
}
if headers:
h.update(headers)
if h['Cache-Control'] == 'no-cache':
h['Pragma'] = 'no-cache'
try:
if raw is False:
data = json.dumps(data, sort_keys=True, skipkeys=True)
except TypeError:
data = str(data)
return flask.current_app.make_response((data, code, h))
def validate_parent_access(parent_id):
if cfg.standalone:
return True
auth = _parse_auth_header()
if not auth:
return False
full_repos_name = auth.get('repository', '').split('/')
if len(full_repos_name) != 2:
logger.debug('validate_parent: Invalid repository field')
return False
url = '{0}/v1/repositories/{1}/{2}/layer/{3}/access'.format(
cfg.index_endpoint, full_repos_name[0], full_repos_name[1], parent_id
)
headers = {'Authorization': flask.request.headers.get('authorization')}
resp = requests.get(url, verify=True, headers=headers)
if resp.status_code != 200:
logger.debug('validate_parent: index returns status {0}'.format(
resp.status_code
))
return False
try:
# Note(dmp): unicode patch XXX not applied! Assuming requests does it
logger.debug('validate_parent: Content: {0}'.format(resp.text))
return json.loads(resp.text).get('access', False)
except ValueError:
logger.debug('validate_parent: Wrong response format')
return False
def validate_token(auth):
full_repos_name = auth.get('repository', '').split('/')
if len(full_repos_name) != 2:
logger.debug('validate_token: Invalid repository field')
return False
url = '{0}/v1/repositories/{1}/{2}/images'.format(cfg.index_endpoint,
full_repos_name[0],
full_repos_name[1])
headers = {'Authorization': flask.request.headers.get('authorization')}
resp = requests.get(url, verify=True, headers=headers)
logger.debug('validate_token: Index returned {0}'.format(resp.status_code))
if resp.status_code != 200:
return False
store = storage.load()
try:
# Note(dmp): unicode patch XXX not applied (requests)
images_list = [i['id'] for i in json.loads(resp.text)]
store.put_content(store.images_list_path(*full_repos_name),
json.dumps(images_list))
except ValueError:
logger.debug('validate_token: Wrong format for images_list')
return False
return True
def get_remote_ip():
if 'X-Forwarded-For' in flask.request.headers:
return flask.request.headers.getlist('X-Forwarded-For')[0]
if 'X-Real-Ip' in flask.request.headers:
return flask.request.headers.getlist('X-Real-Ip')[0]
return flask.request.remote_addr
def is_ssl():
for header in ('X-Forwarded-Proto', 'X-Forwarded-Protocol'):
if header in flask.request.headers and (
flask.request.headers[header].lower() in ('https', 'ssl')
):
return True
return False
def _parse_auth_header():
auth = flask.request.headers.get('authorization', '')
if auth.split(' ')[0].lower() != 'token':
logger.debug('check_token: Invalid token format')
return None
logger.debug('Auth Token = {0}'.format(auth))
auth = dict(_re_authorization.findall(auth))
logger.debug('auth = {0}'.format(auth))
return auth
def check_token(args):
logger.debug('args = {0}'.format(args))
if cfg.disable_token_auth is True or cfg.standalone is True:
return True
auth = _parse_auth_header()
if not auth:
return False
if 'namespace' in args and 'repository' in args:
# We're authorizing an action on a repository,
# let's check that it matches the repos name provided in the token
full_repos_name = '{namespace}/{repository}'.format(**args)
logger.debug('full_repos_name = {0}'.format(full_repos_name))
if full_repos_name != auth.get('repository'):
logger.debug('check_token: Wrong repository name in the token:'
'{0} != {1}'.format(full_repos_name,
auth.get('repository')))
return False
# Check that the token `access' variable is aligned with the HTTP method
access = auth.get('access')
if access == 'write' and flask.request.method not in ['POST', 'PUT']:
logger.debug('check_token: Wrong access value in the token')
return False
if access == 'read' and flask.request.method != 'GET':
logger.debug('check_token: Wrong access value in the token')
return False
if access == 'delete' and flask.request.method != 'DELETE':
logger.debug('check_token: Wrong access value in the token')
return False
if validate_token(auth) is False:
return False
# Token is valid
return True
def check_signature():
pkey = cfg.privileged_key
if not pkey:
return False
headers = flask.request.headers
signature = headers.get('X-Signature')
if not signature:
logger.debug('No X-Signature header in request')
return False
sig = parse_content_signature(signature)
logger.debug('Parsed signature: {}'.format(sig))
sigdata = base64.b64decode(sig['data'])
header_keys = sorted([
x for x in headers.iterkeys() if x.startswith('X-Docker')
])
message = ','.join([flask.request.method, flask.request.path] +
['{}:{}'.format(k, headers[k]) for k in header_keys])
logger.debug('Signed message: {}'.format(message))
try:
return pkey.verify(message_digest(message), sigdata, 'sha1')
except RSA.RSAError as e:
logger.exception(e)
return False
def parse_content_signature(s):
lst = [x.strip().split('=', 1) for x in s.split(';')]
ret = {}
for k, v in lst:
ret[k] = v
return ret
def message_digest(s):
m = hashlib.new('sha1')
m.update(s)
return m.digest()
def requires_auth(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
if check_signature() is True or check_token(kwargs) is True:
return f(*args, **kwargs)
headers = {'WWW-Authenticate': 'Token'}
return api_error('Requires authorization', 401, headers)
return wrapper
def api_error(message, code=400, headers=None):
logger.debug('api_error: {0}'.format(message))
return response({'error': message}, code, headers)
def gen_random_string(length=16):
return ''.join([random.choice(string.ascii_uppercase + string.digits)
for x in range(length)])
def parse_repository_name(f):
@functools.wraps(f)
def wrapper(repository, *args, **kwargs):
parts = repository.rstrip('/').split('/', 1)
if len(parts) < 2:
namespace = 'library'
repository = parts[0]
else:
(namespace, repository) = parts
repository = urllib.quote_plus(repository)
return f(namespace=namespace, repository=repository, *args, **kwargs)
return wrapper
def exclusive_lock(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
lock_path = os.path.join(
'./', 'registry.{0}.lock'.format(f.func_name)
)
if os.path.exists(lock_path):
x = 0
while os.path.exists(lock_path) and x < 100:
logger.warn('Another process is creating the search database')
x += 1
time.sleep(1)
if x == 100:
raise Exception('Timedout waiting for db init')
return
lock_file = open(lock_path, 'w')
lock_file.close()
try:
result = f(*args, **kwargs)
finally:
os.remove(lock_path)
return result
return wrapper
def get_repository():
auth = flask.request.headers.get('authorization', '')
if not auth:
return
auth = dict(_re_authorization.findall(auth))
repository = auth.get('repository')
if repository is None:
return ('', '')
parts = repository.rstrip('/').split('/', 1)
if len(parts) < 2:
return ('library', parts[0])
return (parts[0], parts[1])
def get_endpoints(overcfg=None):
registry_endpoints = (overcfg or cfg).registry_endpoints
if not registry_endpoints:
# registry_endpoints = socket.gethostname()
registry_endpoints = flask.request.environ['HTTP_HOST']
return registry_endpoints
| apache-2.0 |
plamut/ggrc-core | src/ggrc_workflows/migrations/versions/20140903222805_4dd3191323da_add_response_options_to_task_group_tasks.py | 7 | 1959 | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Add response options to task group tasks
Revision ID: 4dd3191323da
Revises: 4c6ce142b434
Create Date: 2014-09-03 22:28:05.079477
"""
# revision identifiers, used by Alembic.
revision = '4dd3191323da'
down_revision = '4c6ce142b434'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from sqlalchemy.dialects import mysql
def upgrade():
op.add_column(
'cycle_task_group_object_tasks',
sa.Column('response_options', sa.Text(), default="[]", nullable=False))
op.add_column(
'cycle_task_group_object_tasks',
sa.Column(
'selected_response_options',
sa.Text(), default="[]", nullable=False))
op.add_column(
'cycle_task_group_object_tasks',
sa.Column('task_type', sa.String(length=250), nullable=False))
op.add_column(
'task_group_tasks',
sa.Column('response_options', sa.Text(), default="[]", nullable=False))
op.add_column(
'task_group_tasks',
sa.Column('task_type', sa.String(length=250), nullable=False))
ctgot_table = table('cycle_task_group_object_tasks',
column('id', sa.Integer),
column('response_options', sa.Text),
column('selected_response_options', sa.Text),
)
tgt_table = table('task_group_tasks',
column('id', sa.Integer),
column('response_options', sa.Text),
)
op.execute(ctgot_table.update().values(
response_options='[]',
selected_response_options='[]',
))
op.execute(tgt_table.update().values(
response_options='[]',
))
def downgrade():
op.drop_column('task_group_tasks', 'task_type')
op.drop_column('task_group_tasks', 'response_options')
op.drop_column('cycle_task_group_object_tasks', 'task_type')
op.drop_column('cycle_task_group_object_tasks', 'selected_response_options')
op.drop_column('cycle_task_group_object_tasks', 'response_options')
| apache-2.0 |
40223249-1/0622W17 | static/Brython3.1.1-20150328-091302/Lib/collections/__init__.py | 625 | 25849 | #__all__ = ['deque', 'defaultdict', 'Counter']
from _collections import deque, defaultdict
#from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList',
'UserString', 'Counter', 'OrderedDict']
# For bootstrapping reasons, the collection ABCs are defined in _abcoll.py.
# They should however be considered an integral part of collections.py.
# fixme brython.. there is an issue with _abcoll
#from _abcoll import *
#from _abcoll import Set
from _abcoll import MutableMapping
#import _abcoll
#__all__ += _abcoll.__all__
from collections.abc import *
import collections.abc
__all__ += collections.abc.__all__
from _collections import deque, defaultdict, namedtuple
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
import heapq as _heapq
#fixme brython
#from weakref import proxy as _proxy
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
from reprlib import recursive_repr as _recursive_repr
class Set(set):
pass
class Sequence(list):
pass
def _proxy(obj):
return obj
################################################################################
### OrderedDict
################################################################################
class _Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is in self.__hardroot with a weakref proxy in self.__root.
# The prev links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries, but keyword arguments are not recommended because
their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = link
root.prev = proxy(link)
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root.prev
link_prev = link.prev
link_prev.next = root
root.prev = link_prev
else:
link = root.next
link_next = link.next
root.next = link_next
link_next.prev = root
key = link.key
del self.__map[key]
value = dict.pop(self, key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last==False).
Raises KeyError if the element does not exist.
When last=True, acts like a fast version of self[key]=self.pop(key).
'''
link = self.__map[key]
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
root = self.__root
if last:
last = root.prev
link.prev = last
link.next = root
last.next = root.prev = link
else:
first = root.next
link.prev = root
link.next = first
root.next = first.prev = link
def __sizeof__(self):
sizeof = _sys.getsizeof
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
return size
#fixme brython.. Issue with _abcoll, which contains MutableMapping
update = __update = MutableMapping.update
keys = MutableMapping.keys
values = MutableMapping.values
items = MutableMapping.items
__ne__ = MutableMapping.__ne__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
#fixme, brython issue
#@_recursive_repr()
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S.
If not specified, the value defaults to None.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
all(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
########################################################################
### Counter
########################################################################
def _count_elements(mapping, iterable):
'Tally elements from the iterable.'
mapping_get = mapping.get
for elem in iterable:
mapping[elem] = mapping_get(elem, 0) + 1
#try: # Load C helper function if available
# from _collections import _count_elements
#except ImportError:
# pass
class Counter(dict):
'''Dict subclass for counting hashable items. Sometimes called a bag
or multiset. Elements are stored as dictionary keys and their counts
are stored as dictionary values.
>>> c = Counter('abcdeabcdabcaba') # count elements from a string
>>> c.most_common(3) # three most common elements
[('a', 5), ('b', 4), ('c', 3)]
>>> sorted(c) # list all unique elements
['a', 'b', 'c', 'd', 'e']
>>> ''.join(sorted(c.elements())) # list elements with repetitions
'aaaaabbbbcccdde'
>>> sum(c.values()) # total of all counts
15
>>> c['a'] # count of letter 'a'
5
>>> for elem in 'shazam': # update counts from an iterable
... c[elem] += 1 # by adding 1 to each element's count
>>> c['a'] # now there are seven 'a'
7
>>> del c['b'] # remove all 'b'
>>> c['b'] # now there are zero 'b'
0
>>> d = Counter('simsalabim') # make another counter
>>> c.update(d) # add in the second counter
>>> c['a'] # now there are nine 'a'
9
>>> c.clear() # empty the counter
>>> c
Counter()
Note: If a count is set to zero or reduced to zero, it will remain
in the counter until the entry is deleted or the counter is cleared:
>>> c = Counter('aaabbc')
>>> c['b'] -= 2 # reduce the count of 'b' by two
>>> c.most_common() # 'b' is still in, but its count is zero
[('a', 3), ('c', 1), ('b', 0)]
'''
# References:
# http://en.wikipedia.org/wiki/Multiset
# http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html
# http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm
# http://code.activestate.com/recipes/259174/
# Knuth, TAOCP Vol. II section 4.6.3
def __init__(self, iterable=None, **kwds):
'''Create a new, empty Counter object. And if given, count elements
from an input iterable. Or, initialize the count from another mapping
of elements to their counts.
>>> c = Counter() # a new, empty counter
>>> c = Counter('gallahad') # a new counter from an iterable
>>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping
>>> c = Counter(a=4, b=2) # a new counter from keyword args
'''
#super().__init__() #BE modified since super not supported
dict.__init__(self)
self.update(iterable, **kwds)
def __missing__(self, key):
'The count of elements not in the Counter is zero.'
# Needed so that self[missing_item] does not raise KeyError
return 0
def most_common(self, n=None):
'''List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
>>> Counter('abcdeabcdabcaba').most_common(3)
[('a', 5), ('b', 4), ('c', 3)]
'''
# Emulate Bag.sortedByCount from Smalltalk
if n is None:
return sorted(self.items(), key=_itemgetter(1), reverse=True)
return _heapq.nlargest(n, self.items(), key=_itemgetter(1))
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
# Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1
>>> prime_factors = Counter({2: 2, 3: 3, 17: 1})
>>> product = 1
>>> for factor in prime_factors.elements(): # loop over factors
... product *= factor # and multiply them
>>> product
1836
Note, if an element's count has been set to zero or is a negative
number, elements() will ignore it.
'''
# Emulate Bag.do from Smalltalk and Multiset.begin from C++.
return _chain.from_iterable(_starmap(_repeat, self.items()))
# Override dict methods where necessary
@classmethod
def fromkeys(cls, iterable, v=None):
# There is no equivalent method for counters because setting v=1
# means that no element can have a count greater than one.
raise NotImplementedError(
'Counter.fromkeys() is undefined. Use Counter(iterable) instead.')
def update(self, iterable=None, **kwds):
'''Like dict.update() but add counts instead of replacing them.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.update('witch') # add elements from another iterable
>>> d = Counter('watch')
>>> c.update(d) # add elements from another counter
>>> c['h'] # four 'h' in which, witch, and watch
4
'''
# The regular dict.update() operation makes no sense here because the
# replace behavior results in the some of original untouched counts
# being mixed-in with all of the other counts for a mismash that
# doesn't have a straight-forward interpretation in most counting
# contexts. Instead, we implement straight-addition. Both the inputs
# and outputs are allowed to contain zero and negative counts.
if iterable is not None:
if isinstance(iterable, Mapping):
if self:
self_get = self.get
for elem, count in iterable.items():
self[elem] = count + self_get(elem, 0)
else:
super().update(iterable) # fast path when counter is empty
else:
_count_elements(self, iterable)
if kwds:
self.update(kwds)
def subtract(self, iterable=None, **kwds):
'''Like dict.update() but subtracts counts instead of replacing them.
Counts can be reduced below zero. Both the inputs and outputs are
allowed to contain zero and negative counts.
Source can be an iterable, a dictionary, or another Counter instance.
>>> c = Counter('which')
>>> c.subtract('witch') # subtract elements from another iterable
>>> c.subtract(Counter('watch')) # subtract elements from another counter
>>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch
0
>>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch
-1
'''
if iterable is not None:
self_get = self.get
if isinstance(iterable, Mapping):
for elem, count in iterable.items():
self[elem] = self_get(elem, 0) - count
else:
for elem in iterable:
self[elem] = self_get(elem, 0) - 1
if kwds:
self.subtract(kwds)
def copy(self):
'Return a shallow copy.'
return self.__class__(self)
def __reduce__(self):
return self.__class__, (dict(self),)
def __delitem__(self, elem):
'Like dict.__delitem__() but does not raise KeyError for missing values.'
if elem in self:
super().__delitem__(elem)
def __repr__(self):
if not self:
return '%s()' % self.__class__.__name__
try:
items = ', '.join(map('%r: %r'.__mod__, self.most_common()))
return '%s({%s})' % (self.__class__.__name__, items)
except TypeError:
# handle case where values are not orderable
return '{0}({1!r})'.format(self.__class__.__name__, dict(self))
# Multiset-style mathematical operations discussed in:
# Knuth TAOCP Volume II section 4.6.3 exercise 19
# and at http://en.wikipedia.org/wiki/Multiset
#
# Outputs guaranteed to only include positive counts.
#
# To strip negative and zero counts, add-in an empty counter:
# c += Counter()
def __add__(self, other):
'''Add counts from two counters.
>>> Counter('abbb') + Counter('bcc')
Counter({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count + other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __sub__(self, other):
''' Subtract count, but keep only results with positive counts.
>>> Counter('abbbc') - Counter('bccd')
Counter({'b': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
newcount = count - other[elem]
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count < 0:
result[elem] = 0 - count
return result
def __or__(self, other):
'''Union is the maximum of value in either of the input counters.
>>> Counter('abbb') | Counter('bcc')
Counter({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = other_count if count < other_count else count
if newcount > 0:
result[elem] = newcount
for elem, count in other.items():
if elem not in self and count > 0:
result[elem] = count
return result
def __and__(self, other):
''' Intersection is the minimum of corresponding counts.
>>> Counter('abbb') & Counter('bcc')
Counter({'b': 1})
'''
if not isinstance(other, Counter):
return NotImplemented
result = Counter()
for elem, count in self.items():
other_count = other[elem]
newcount = count if count < other_count else other_count
if newcount > 0:
result[elem] = newcount
return result
########################################################################
### ChainMap (helper for configparser)
########################################################################
class ChainMap(MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
#fixme, brython
#@_recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
def __repr__(self):
return ','.join(str(_map) for _map in self.maps)
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self): # like Django's Context.push()
'New ChainMap with a new dict followed by all previous maps.'
return self.__class__({}, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
#raise KeyError('Key not found in the first mapping: {!r}'.format(key))
raise KeyError('Key not found in the first mapping: %s' % key)
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
################################################################################
### UserDict
################################################################################
class UserDict(MutableMapping):
# Start by filling-out the abstract methods
def __init__(self, dict=None, **kwargs):
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def __iter__(self):
return iter(self.data)
# Modify __contains__ to work correctly when __missing__ is present
def __contains__(self, key):
return key in self.data
# Now, add the methods in dicts but not in MutableMapping
def __repr__(self): return repr(self.data)
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
################################################################################
### UserList
################################################################################
################################################################################
### UserString
################################################################################
| gpl-3.0 |
siorai/AutoUploaderGoogleDrive | AutoUploaderGoogleDrive/CommandLine.py | 1 | 15287 | import os
import httplib2
import base64
import logging
import argparse
import re
import time
from sys import argv
from apiclient import discovery
from apiclient.http import MediaFileUpload
from AutoUploaderGoogleDrive.auth import Authorize
from AutoUploaderGoogleDrive.settings import *
from AutoUploaderGoogleDrive.temp import *
from AutoUploaderGoogleDrive.Rules import *
import rarfile
from rarfile import Error, BadRarFile, NeedFirstVolume
from email.mime.text import MIMEText
__author__ = 'siorai@gmail.com (Paul Waldorf)'
class main(object):
script, localFolder = argv
logging.basicConfig(filename=logfile,level=logging.DEBUG,format='%(asctime)s %(message)s')
def __init__(self, localFolder=None):
"""
........ does a lot......
........ to be added soon.....
"""
http = Authorize()
if localFolder:
self.localFolder = localFolder
self.serviceGmail = discovery.build('gmail', 'v1', http=http)
self.serviceDrive = discovery.build('drive', 'v2', http=http)
self.JSONResponseList = []
self.Public = True
self.extractedFilesList = []
self.nonDefaultPermissions = nonDefaultPermissions
try:
logging.debug('Attempting to pull information from daemon env')
self.bt_name = os.getenv('TR_TORRENT_NAME')
logging.debug("Pulled bt_name successfully: %s" % self.bt_name)
self.bt_time = os.getenv('TR_TIME_LOCALTIME')
logging.debug("Pulled bt_time successfully: %s" % self.bt_time)
self.bt_app = os.getenv('TR_APP_VERSION')
logging.debug("Pulled app_version successfully: %s" % self.bt_app)
self.bt_dir = os.getenv('TR_TORRENT_DIR')
logging.debug("Pulled Torrent Dir successfully: %s " % self.bt_dir)
self.bt_hash = os.getenv('TR_TORRENT_HASH')
logging.debug("Pulled hash successfully: %s " % self.bt_hash)
self.bt_id = os.getenv('TR_TORRENT_ID')
logging.debug("Pulled torrent_id successfully: %s" % self.bt_id)
self.fullFilePaths = os.path.join(self.bt_dir, self.bt_name)
logging.debug("Joined bt_dir and bt_name to get %s" % self.fullFilePaths)
self.autoExtract(self.fullFilePaths)
if SortTorrents == True:
updategoogledrivedir = Sort(directory=self.bt_name, fullPath=self.fullFilePaths)
logging.debug("***STARTSORT*** %s" % updategoogledrivedir)
else:
updategoogledrivedir = ["0", googledrivedir]
logging.debug("***SORTSKIPPED*** %s" % updategoogledrivedir)
self.destgoogledrivedir = updategoogledrivedir[1]
self.FilesDict = self.createDirectoryStructure(self.fullFilePaths)
logging.debug("Creating dictionary of files: %s" % self.FilesDict)
logging.debug('Information pulled successfully')
except(AttributeError):
self.fullFilePaths = self.localFolder
self.folderName = self.fullFilePaths.rsplit(os.sep)
logging.debug("Using %s" % self.folderName)
self.bt_name = self.folderName[-2]
logging.debug("Using %s" % self.bt_name)
self.autoExtract(self.fullFilePaths)
if SortTorrents == True:
updategoogledrivedir = Sort(directory=self.bt_name, fullPath=self.fullFilePaths)
logging.debug("***STARTSORT*** %s" % updategoogledrivedir)
else:
updategoogledrivedir = ["0", googledrivedir]
logging.debug("***SORTSKIPPED*** %s" % updategoogledrivedir)
self.destgoogledrivedir = updategoogledrivedir[1]
self.FilesDict = self.createDirectoryStructure(self.fullFilePaths)
logging.debug("Using %s as FilesDict" % self.FilesDict)
self.autoExtract(self.fullFilePaths)
self.uploadPreserve(self.FilesDict, Folder_ID=self.destgoogledrivedir)
tempfilename = '/var/tmp/transmissiontemp/transmission.%s.%s.html' % (self.bt_name, os.getpid())
setup_temp_file(tempfilename)
for EachEntry in self.JSONResponseList:
addentry(tempfilename, EachEntry)
finish_html(tempfilename)
email_subject = ("%s has finished downloading.") % self.bt_name
email_message = self.encodeMessage(email_subject, tempfilename)
self.sendMessage(email_message)
logging.debug("Contents of extractFilesList %s" % self.extractedFilesList)
self.cleanUp()
def createDirectoryStructure(self, rootdir):
"""
Creates dictionary using os.walk to be used for keeping track
of the local torrent's file structure to recreate it on Google Drive
Any folders it finds, it creates a new subdictionary inside, however
when it locates files adds a list to each entry the first of which is 'File'
and the second of which is the full path/to/file to be used by
self.uploadToGoogleDrive.
Args:
rootdir: string. path/to/directory to be recreated.
Returns:
dir: dictionary. Dictionary containing directory file structure and
full paths to file names
"""
dir = {}
rootdir = rootdir.rstrip(os.sep)
start = rootdir.rfind(os.sep) + 1
for path, dirs, files in os.walk(rootdir):
try:
filepath = os.path.join(path, files)
folders = path[start:].split(os.sep)
subdir = dict.fromkeys(files, ['None', filepath])
parent = reduce(dict.get, folders[:-1], dir)
parent[folders[-1]] = subdir
except:
filepath = path
folders = path[start:].split(os.sep)
subdir = dict.fromkeys(files, ['None', filepath])
parent = reduce(dict.get, folders[:-1], dir)
parent[folders[-1]] = subdir
return dir
def autoExtract(self, directory):
"""
Function for searching through the specified directory for rar
archives by performing a simple check for each file in the dir.
If one is found, it attempts to extract.
Files that are extracted get appended to self.extractedFilesList
as a way to keep track of them.
Once all files in the directory are either uploaded (or skipped if
they are archives), the extracted files are deleted by the cleanUP
function.
Args:
directory: string. Directory to check for archives
"""
for path, dirs, files in os.walk(directory):
for EachFile in files:
filepath = os.path.join(path, EachFile)
if rarfile.is_rarfile(filepath):
logging.debug("UNRAR: Archive %s found." % filepath)
try:
logging.debug("UNRAR: Attemping extraction....")
with rarfile.RarFile(filepath) as rf:
startExtraction = time.time()
rf.extractall(path=path)
timeToExtract = time.time() - startExtraction
for EachExtractedFile in rf.namelist():
self.extractedFilesList.append(
{
'FileList': EachExtractedFile,
'Path': path,
'TimeToUnrar': timeToExtract
}
)
logging.debug("UNRAR: Extraction for %s took %s." % (filepath, timeToExtract))
except:
logging.debug("UNRAR: Moving onto next file.")
def cleanUp(self):
"""
CleanUp script that removes each of the files that was previously extracted
from archives and deletes from the local hard drive.
Args:
None
"""
logging.info("CLEANUP: Cleanup started. Deleting extracted files.")
DeleteFiles = self.extractedFilesList
for EachFile in DeleteFiles:
FilePath = os.path.join(EachFile['Path'], EachFile['FileList'])
logging.info("CLEANUP: Deleting %s." % FilePath)
os.remove(FilePath)
if deleteTmpHTML is True:
logging.debug("CLEANUP: Deleting HTML File: %s" % tempfilename)
os.remove(tempfilename)
logging.info("CLEANUP: Cleanup completed.")
def fetchTorrentFile(self):
"""
Fetches the Torrents file name to parse for sorting.
Args:
bt_name: string. Name of the torrent
Returns:
filepath: /path/to/file to be parsed for trackerinfo
"""
bt_name = self.bt_name
torrentFileDirectory = self.torrentFileDirectory
for path, dirs, files in os.walk(torrentFileDirectory):
for EachTorrent in files:
if bt_name in EachTorrent:
filepath = os.path.join(path, EachTorrent)
return filepath
def getIDs(self):
"""
Fetches IDs from the Google API to be assigned as needed.
Args:
None
"""
service = self.serviceDrive
IDs = service.files().generateIds().execute()
return IDs['ids']
def createFolder(self, folderName, parents=None):
"""
Creates folder on Google Drive.
Args:
folderName: string. Name of folder to be created
parents: Unique ID where folder is to be put inside of
Returns:
id: unique folder ID
"""
service = self.serviceDrive
body = {'title': folderName,
'mimeType' : 'application/vnd.google-apps.folder'
}
if parents:
body['parents'] = [{'id' : parents}]
response = service.files().insert(body=body).execute()
if self.nonDefaultPermissions == True:
fileID = response['id']
self.setPermissions(fileID)
return response['id']
def encodeMessage(self, subject, tempfilename, message_text=None):
"""
Basic MIMEText encoding
Args:
subject: string. Subject of email
tempfilename: string. HTML Table create from temp.py
message_text: string. optional email text in addition to
supplied HTML table
Returns:
A base64url encoded email object.
"""
readhtml = open(tempfilename, 'r')
html = readhtml.read()
message = MIMEText(html, 'html')
message['to'] = emailTo
message['from'] = emailSender
message['subject'] = subject
return {'raw': base64.urlsafe_b64encode(message.as_string())}
def sendMessage(self, message):
"""
Sends message encoded by encodeMessage.
Args:
message: base64url encoded email object.
Returns:
JSON response from google.
"""
service = self.serviceGmail
response = service.users().messages().send(userId='me', body=message).execute()
return response
def uploadPreserve(self, FilesDict, Folder_ID=None):
"""
Uploads files in FilesDict preserving the local file structure
as shown by FilesDict created from getDirectoryStructure.
Appends each JSON response from google return as JSON Data into
self.JSONResponse list.
Args:
FilesDict: dict. Dictionary representation of files and structure
to be created on google drive
Folder_ID: string. Unique resource ID for folder to be uploaded to.
Returns:
Nothing
"""
for FF in FilesDict:
i = FilesDict[FF]
try:
if i[0]:
fullPathToFile = os.path.join(i[1], FF)
refilter = re.compile('.*\\.r.*.*\\Z(?ms)')
if refilter.match(fullPathToFile):
logging.debug("%s skipped." % fullPathToFile)
else:
response = self.uploadToGoogleDrive(fullPathToFile, FF, Folder_ID=Folder_ID)
self.JSONResponseList.append(response)
except(KeyError):
subfolder = FilesDict[FF]
subfolder_id = self.createFolder(FF, parents=Folder_ID)
self.uploadPreserve(subfolder, Folder_ID=subfolder_id)
def uploadToGoogleDrive(self, FilePath, FileTitle, Folder_ID=None):
"""
Performs upload to Google Drive.
Args:
FilePath: string. Path/To/File/
FileTitle: string. Passed to the body as the name of the file.
Folder_ID: string. Unique Folder_ID as assigned by Google Drive.
Returns:
Response in the form of JSON data from Google's REST.
"""
service = self.serviceDrive
body = {
'title': FileTitle
}
if Folder_ID:
body['parents'] = [{'id' : Folder_ID}]
media = MediaFileUpload(FilePath, chunksize=chunksize, resumable=True)
response = service.files().insert(body=body, media_body=media).execute()
if self.nonDefaultPermissions == True:
fileID = response['id']
self.setPermissions(fileID)
response['alt_tiny'] = self.shortenUrl(response['alternateLink'])
return response
def setPermissions(self, file_id):
"""
Sets the permissions for the file as long as settings.nonDefaultPermissions
is set to True. If set to True, the permissions listed there will be applied
after each file is uploaded to Google Drive.
Args:
file_id: string. Unique File ID assigned by google after file is uploaded
"""
service = self.serviceDrive
newPermissions = {
'value': permissionValue,
'type': permissionType,
'role': permissionRole,
}
return service.permissions().insert(
fileId=file_id, body=newPermissions).execute()
def shortenUrl(self, URL):
"""
URL Shortener function that when combined with the uploading
script adds a new key:value to the JSON response with a much
more managable URL.
Args:
URL: string. URL parsed from JSON response
"""
http = Authorize()
service = discovery.build('urlshortener', 'v1', http=http)
url = service.url()
body = {
'longUrl': URL
}
response = url.insert(body=body).execute()
logging.debug("URLSHRINK: %s" % response)
short_url = response['id']
logging.debug("URLSHRINK: %s" % short_url)
return short_url
if __name__ == '__main__':
script, localFolder = argv
AutoUploaderGoogleDrive(localFolder)
| gpl-3.0 |
marissazhou/django | tests/forms_tests/widget_tests/test_dateinput.py | 247 | 1557 | from datetime import date
from django.forms import DateInput
from django.test import override_settings
from django.utils import translation
from .base import WidgetTest
class DateInputTest(WidgetTest):
widget = DateInput()
def test_render_none(self):
self.check_html(self.widget, 'date', None, html='<input type="text" name="date" />')
def test_render_value(self):
d = date(2007, 9, 17)
self.assertEqual(str(d), '2007-09-17')
self.check_html(self.widget, 'date', d, html='<input type="text" name="date" value="2007-09-17" />')
self.check_html(self.widget, 'date', date(2007, 9, 17), html=(
'<input type="text" name="date" value="2007-09-17" />'
))
def test_string(self):
"""
Should be able to initialize from a string value.
"""
self.check_html(self.widget, 'date', '2007-09-17', html=(
'<input type="text" name="date" value="2007-09-17" />'
))
def test_format(self):
"""
Use 'format' to change the way a value is displayed.
"""
d = date(2007, 9, 17)
widget = DateInput(format='%d/%m/%Y', attrs={'type': 'date'})
self.check_html(widget, 'date', d, html='<input type="date" name="date" value="17/09/2007" />')
@override_settings(USE_L10N=True)
@translation.override('de-at')
def test_l10n(self):
self.check_html(
self.widget, 'date', date(2007, 9, 17),
html='<input type="text" name="date" value="17.09.2007" />',
)
| bsd-3-clause |
VOLTTRON/volttron-applications | kisensum/Simulation/SimulationClockAgent/tests/test_simulation_clock.py | 2 | 8609 | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2017, SLAC National Laboratory / Kisensum Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor SLAC / Kisensum,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# SLAC / Kisensum. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# }}}
from datetime import datetime, timedelta
import gevent
import pytest
import time
from volttron.platform.agent import utils
DEBUGGER_CONFIG = {
"agent": {
"exec": "simulationclockagent-1.0-py2.7.egg --config \"%c\" --sub \"%s\" --pub \"%p\""
},
"agentid": "simulationclock",
}
@pytest.fixture(scope='module')
def agent(request, volttron_instance):
master_uuid = volttron_instance.install_agent(agent_dir='applications/kisensum/Simulation/SimulationClockAgent',
config_file=DEBUGGER_CONFIG,
start=True)
gevent.sleep(2)
clock_agent = volttron_instance.build_agent()
gevent.sleep(20) # wait for the agent to start
def stop():
volttron_instance.stop_agent(master_uuid)
clock_agent.core.stop()
request.addfinalizer(stop)
return clock_agent
@pytest.mark.usefixtures('agent')
class TestSimulationClock:
"""
Regression tests for SimulationClockAgent.
"""
def test_start_simulation(self, agent):
"""Test initializing a simulation clock and getting a simulated time from it."""
response = self.start_simulation(agent, '2017-01-01 08:00', '2017-01-01 10:00', '10.0')
assert 'started' in response
parsed_time = self.get_time(agent)
assert type(parsed_time) != str
def test_elapsed_time(self, agent):
"""Confirm that correct simulated times are returned."""
sim_start_time = utils.parse_timestamp_string('2017-01-01 08:00')
sim_stop_time = utils.parse_timestamp_string('2017-01-01 10:00')
clock_speed = 10.0
response = self.start_simulation(agent, str(sim_start_time), str(sim_stop_time), str(10.0))
actual_start_time = datetime.now()
assert 'started' in response
time.sleep(2)
response = self.get_time(agent)
assert type(response) != str
elapsed_simulated_seconds = (datetime.now() - actual_start_time).seconds * clock_speed
simulation_timestamp = sim_start_time + timedelta(seconds=elapsed_simulated_seconds)
assert str(response) == str(simulation_timestamp)
time.sleep(2)
response = self.get_time(agent)
assert type(response) != str
elapsed_simulated_seconds = (datetime.now() - actual_start_time).seconds * clock_speed
simulation_timestamp = sim_start_time + timedelta(seconds=elapsed_simulated_seconds)
assert str(response) == str(simulation_timestamp)
def test_stop_simulation(self, agent):
"""Test stopping a simulation; confirm that getting a time from a stopped simulation returns an error."""
response = self.stop_simulation(agent)
assert response == 'Simulation stopped'
response = self.get_time(agent)
assert response == 'No simulation is in progress'
def test_invalid_dates(self, agent):
"""Confirm errors returned when trying to initialize a simulation with an invalid start or stop datetime."""
response = self.start_simulation(agent, '2017-00-01 08:00', '2017-01-01 10:00', '10.0')
assert response == 'Invalid simulated_start_time'
response = self.start_simulation(agent, '2017-01-01 08:00', '20175-01-01 10:00', '10.0')
assert response == 'Invalid simulated_stop_time'
response = self.start_simulation(agent, '2017-01-01 10:00', '2017-01-01 08:00', '10.0')
assert response == 'simulated_stop_time is earlier than simulated_start_time'
def test_invalid_speed(self, agent):
"""Confirm error returned when trying to initialize a simulation with an invalid clock speed."""
response = self.start_simulation(agent, '2017-01-01 08:00', '2017-01-01 10:00', 'XX')
assert response == 'Invalid speed'
def test_forever_simulation(self, agent):
"""Test running a simulation with no defined stop time."""
response = self.start_forever_simulation(agent, '2017-01-01 08:00', '10.0')
assert 'started' in response
def test_one_for_one_simulation(self, agent):
"""Test running a simulation for which the speed of the simulation clock is the speed of the wall clock."""
response = self.start_one_for_one_simulation(agent, '2017-01-01 08:00', '2017-01-01 10:00')
assert 'started' in response
def start_simulation(self, agt, start_time, stop_time, speed):
"""Issue an RPC call to initialize a simulation."""
return self.issue_rpc_call(agt, 'initialize_clock', start_time, simulated_stop_time=stop_time, speed=speed)
def start_forever_simulation(self, agt, start_time, speed):
"""Issue an RPC call to initialize a simulation without specifying a stop time."""
return self.issue_rpc_call(agt, 'initialize_clock', start_time, speed=speed)
def start_one_for_one_simulation(self, agt, start_time, stop_time):
"""Issue an RPC call to initialize a simulation without specifying a clock speed."""
return self.issue_rpc_call(agt, 'initialize_clock', start_time, simulated_stop_time=stop_time)
def get_time(self, agt):
"""Issue an RPC call to get the current simulated clock time."""
response = self.issue_rpc_call(agt, 'get_time')
try:
parsed_response = utils.parse_timestamp_string(response)
except ValueError:
parsed_response = response
return parsed_response
def stop_simulation(self, agt):
"""Issue an RPC call to stop the current simulation."""
return self.issue_rpc_call(agt, 'stop_simulation')
@staticmethod
def issue_rpc_call(agt, rpc_name, *args, **kwargs):
"""Issue an RPC call to the SimulatedClockAgent."""
return agt.vip.rpc.call('simulationclock', rpc_name, *args, **kwargs).get(timeout=30)
| bsd-3-clause |
pipitone/qbatch | setup.py | 1 | 1418 | #!/usr/bin/env python
from setuptools import setup
from io import open
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='qbatch',
version='2.2.1',
description='Execute shell command lines in parallel on Slurm, '
'S(un|on of) Grid Engine (SGE) and PBS/Torque clusters',
author="Jon Pipitone, Gabriel A. Devenyi",
author_email="jon@pipitone.ca, gdevenyi@gmail.com",
license='Unlicense',
url="https://github.com/pipitone/qbatch",
long_description=long_description,
long_description_content_type='text/markdown',
entry_points={
"console_scripts": [
"qbatch=qbatch:qbatchParser",
]
},
packages=["qbatch"],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: Public Domain',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: System :: Clustering',
'Topic :: System :: Distributed Computing',
'Topic :: Utilities',
],
install_requires=[
"future",
],
)
| unlicense |
jorge-marques/shoop | shoop/admin/forms/fields.py | 6 | 1234 | # This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from decimal import Decimal
from numbers import Number
from django.forms import DecimalField
class PercentageField(DecimalField):
MULTIPLIER = Decimal(100)
def prepare_value(self, value):
# Percentage values are 0..1 in database, so multiply by 100
if value is not None and isinstance(value, Number):
value *= self.MULTIPLIER
return super(PercentageField, self).prepare_value(value)
def to_python(self, value):
value = super(PercentageField, self).to_python(value)
if value is not None:
# We got a value, so divide it by 100 to get the 0..1 range value
value /= self.MULTIPLIER
return value
def widget_attrs(self, widget):
attrs = super(PercentageField, self).widget_attrs(widget)
if self.min_value is not None:
attrs['min'] = self.min_value * self.MULTIPLIER
if self.max_value is not None:
attrs['max'] = self.max_value * self.MULTIPLIER
return attrs
| agpl-3.0 |
Tinkerforge/brickv | src/brickv/bindings/bricklet_led_strip.py | 1 | 18477 | # -*- coding: utf-8 -*-
#############################################################
# This file was automatically generated on 2021-05-11. #
# #
# Python Bindings Version 2.1.29 #
# #
# If you have a bugfix for this file and want to commit it, #
# please fix the bug in the generator. You can find a link #
# to the generators git repository on tinkerforge.com #
#############################################################
from collections import namedtuple
try:
from .ip_connection import Device, IPConnection, Error, create_char, create_char_list, create_string, create_chunk_data
except ValueError:
from ip_connection import Device, IPConnection, Error, create_char, create_char_list, create_string, create_chunk_data
GetRGBValues = namedtuple('RGBValues', ['r', 'g', 'b'])
GetRGBWValues = namedtuple('RGBWValues', ['r', 'g', 'b', 'w'])
GetIdentity = namedtuple('Identity', ['uid', 'connected_uid', 'position', 'hardware_version', 'firmware_version', 'device_identifier'])
class BrickletLEDStrip(Device):
"""
Controls up to 320 RGB LEDs
"""
DEVICE_IDENTIFIER = 231
DEVICE_DISPLAY_NAME = 'LED Strip Bricklet'
DEVICE_URL_PART = 'led_strip' # internal
CALLBACK_FRAME_RENDERED = 6
FUNCTION_SET_RGB_VALUES = 1
FUNCTION_GET_RGB_VALUES = 2
FUNCTION_SET_FRAME_DURATION = 3
FUNCTION_GET_FRAME_DURATION = 4
FUNCTION_GET_SUPPLY_VOLTAGE = 5
FUNCTION_SET_CLOCK_FREQUENCY = 7
FUNCTION_GET_CLOCK_FREQUENCY = 8
FUNCTION_SET_CHIP_TYPE = 9
FUNCTION_GET_CHIP_TYPE = 10
FUNCTION_SET_RGBW_VALUES = 11
FUNCTION_GET_RGBW_VALUES = 12
FUNCTION_SET_CHANNEL_MAPPING = 13
FUNCTION_GET_CHANNEL_MAPPING = 14
FUNCTION_ENABLE_FRAME_RENDERED_CALLBACK = 15
FUNCTION_DISABLE_FRAME_RENDERED_CALLBACK = 16
FUNCTION_IS_FRAME_RENDERED_CALLBACK_ENABLED = 17
FUNCTION_GET_IDENTITY = 255
CHIP_TYPE_WS2801 = 2801
CHIP_TYPE_WS2811 = 2811
CHIP_TYPE_WS2812 = 2812
CHIP_TYPE_LPD8806 = 8806
CHIP_TYPE_APA102 = 102
CHANNEL_MAPPING_RGB = 6
CHANNEL_MAPPING_RBG = 9
CHANNEL_MAPPING_BRG = 33
CHANNEL_MAPPING_BGR = 36
CHANNEL_MAPPING_GRB = 18
CHANNEL_MAPPING_GBR = 24
CHANNEL_MAPPING_RGBW = 27
CHANNEL_MAPPING_RGWB = 30
CHANNEL_MAPPING_RBGW = 39
CHANNEL_MAPPING_RBWG = 45
CHANNEL_MAPPING_RWGB = 54
CHANNEL_MAPPING_RWBG = 57
CHANNEL_MAPPING_GRWB = 78
CHANNEL_MAPPING_GRBW = 75
CHANNEL_MAPPING_GBWR = 108
CHANNEL_MAPPING_GBRW = 99
CHANNEL_MAPPING_GWBR = 120
CHANNEL_MAPPING_GWRB = 114
CHANNEL_MAPPING_BRGW = 135
CHANNEL_MAPPING_BRWG = 141
CHANNEL_MAPPING_BGRW = 147
CHANNEL_MAPPING_BGWR = 156
CHANNEL_MAPPING_BWRG = 177
CHANNEL_MAPPING_BWGR = 180
CHANNEL_MAPPING_WRBG = 201
CHANNEL_MAPPING_WRGB = 198
CHANNEL_MAPPING_WGBR = 216
CHANNEL_MAPPING_WGRB = 210
CHANNEL_MAPPING_WBGR = 228
CHANNEL_MAPPING_WBRG = 225
def __init__(self, uid, ipcon):
"""
Creates an object with the unique device ID *uid* and adds it to
the IP Connection *ipcon*.
"""
Device.__init__(self, uid, ipcon, BrickletLEDStrip.DEVICE_IDENTIFIER, BrickletLEDStrip.DEVICE_DISPLAY_NAME)
self.api_version = (2, 0, 3)
self.response_expected[BrickletLEDStrip.FUNCTION_SET_RGB_VALUES] = BrickletLEDStrip.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletLEDStrip.FUNCTION_GET_RGB_VALUES] = BrickletLEDStrip.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLEDStrip.FUNCTION_SET_FRAME_DURATION] = BrickletLEDStrip.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletLEDStrip.FUNCTION_GET_FRAME_DURATION] = BrickletLEDStrip.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLEDStrip.FUNCTION_GET_SUPPLY_VOLTAGE] = BrickletLEDStrip.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLEDStrip.FUNCTION_SET_CLOCK_FREQUENCY] = BrickletLEDStrip.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletLEDStrip.FUNCTION_GET_CLOCK_FREQUENCY] = BrickletLEDStrip.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLEDStrip.FUNCTION_SET_CHIP_TYPE] = BrickletLEDStrip.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletLEDStrip.FUNCTION_GET_CHIP_TYPE] = BrickletLEDStrip.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLEDStrip.FUNCTION_SET_RGBW_VALUES] = BrickletLEDStrip.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletLEDStrip.FUNCTION_GET_RGBW_VALUES] = BrickletLEDStrip.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLEDStrip.FUNCTION_SET_CHANNEL_MAPPING] = BrickletLEDStrip.RESPONSE_EXPECTED_FALSE
self.response_expected[BrickletLEDStrip.FUNCTION_GET_CHANNEL_MAPPING] = BrickletLEDStrip.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLEDStrip.FUNCTION_ENABLE_FRAME_RENDERED_CALLBACK] = BrickletLEDStrip.RESPONSE_EXPECTED_TRUE
self.response_expected[BrickletLEDStrip.FUNCTION_DISABLE_FRAME_RENDERED_CALLBACK] = BrickletLEDStrip.RESPONSE_EXPECTED_TRUE
self.response_expected[BrickletLEDStrip.FUNCTION_IS_FRAME_RENDERED_CALLBACK_ENABLED] = BrickletLEDStrip.RESPONSE_EXPECTED_ALWAYS_TRUE
self.response_expected[BrickletLEDStrip.FUNCTION_GET_IDENTITY] = BrickletLEDStrip.RESPONSE_EXPECTED_ALWAYS_TRUE
self.callback_formats[BrickletLEDStrip.CALLBACK_FRAME_RENDERED] = (10, 'H')
ipcon.add_device(self)
def set_rgb_values(self, index, length, r, g, b):
"""
Sets *length* RGB values for the LEDs starting from *index*.
To make the colors show correctly you need to configure the chip type
(:func:`Set Chip Type`) and a 3-channel channel mapping (:func:`Set Channel Mapping`)
according to the connected LEDs.
Example: If you set
* index to 5,
* length to 3,
* r to [255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
* g to [0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] and
* b to [0, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
the LED with index 5 will be red, 6 will be green and 7 will be blue.
.. note:: Depending on the LED circuitry colors can be permuted.
The colors will be transfered to actual LEDs when the next
frame duration ends, see :func:`Set Frame Duration`.
Generic approach:
* Set the frame duration to a value that represents
the number of frames per second you want to achieve.
* Set all of the LED colors for one frame.
* Wait for the :cb:`Frame Rendered` callback.
* Set all of the LED colors for next frame.
* Wait for the :cb:`Frame Rendered` callback.
* and so on.
This approach ensures that you can change the LED colors with
a fixed frame rate.
The actual number of controllable LEDs depends on the number of free
Bricklet ports. See :ref:`here <led_strip_bricklet_ram_constraints>` for more
information. A call of :func:`Set RGB Values` with index + length above the
bounds is ignored completely.
"""
self.check_validity()
index = int(index)
length = int(length)
r = list(map(int, r))
g = list(map(int, g))
b = list(map(int, b))
self.ipcon.send_request(self, BrickletLEDStrip.FUNCTION_SET_RGB_VALUES, (index, length, r, g, b), 'H B 16B 16B 16B', 0, '')
def get_rgb_values(self, index, length):
"""
Returns *length* R, G and B values starting from the
given LED *index*.
The values are the last values that were set by :func:`Set RGB Values`.
"""
self.check_validity()
index = int(index)
length = int(length)
return GetRGBValues(*self.ipcon.send_request(self, BrickletLEDStrip.FUNCTION_GET_RGB_VALUES, (index, length), 'H B', 56, '16B 16B 16B'))
def set_frame_duration(self, duration):
"""
Sets the frame duration.
Example: If you want to achieve 20 frames per second, you should
set the frame duration to 50ms (50ms * 20 = 1 second).
For an explanation of the general approach see :func:`Set RGB Values`.
"""
self.check_validity()
duration = int(duration)
self.ipcon.send_request(self, BrickletLEDStrip.FUNCTION_SET_FRAME_DURATION, (duration,), 'H', 0, '')
def get_frame_duration(self):
"""
Returns the frame duration as set by :func:`Set Frame Duration`.
"""
self.check_validity()
return self.ipcon.send_request(self, BrickletLEDStrip.FUNCTION_GET_FRAME_DURATION, (), '', 10, 'H')
def get_supply_voltage(self):
"""
Returns the current supply voltage of the LEDs.
"""
self.check_validity()
return self.ipcon.send_request(self, BrickletLEDStrip.FUNCTION_GET_SUPPLY_VOLTAGE, (), '', 10, 'H')
def set_clock_frequency(self, frequency):
"""
Sets the frequency of the clock.
The Bricklet will choose the nearest achievable frequency, which may
be off by a few Hz. You can get the exact frequency that is used by
calling :func:`Get Clock Frequency`.
If you have problems with flickering LEDs, they may be bits flipping. You
can fix this by either making the connection between the LEDs and the
Bricklet shorter or by reducing the frequency.
With a decreasing frequency your maximum frames per second will decrease
too.
.. note::
The frequency in firmware version 2.0.0 is fixed at 2MHz.
.. versionadded:: 2.0.1$nbsp;(Plugin)
"""
self.check_validity()
frequency = int(frequency)
self.ipcon.send_request(self, BrickletLEDStrip.FUNCTION_SET_CLOCK_FREQUENCY, (frequency,), 'I', 0, '')
def get_clock_frequency(self):
"""
Returns the currently used clock frequency as set by :func:`Set Clock Frequency`.
.. versionadded:: 2.0.1$nbsp;(Plugin)
"""
self.check_validity()
return self.ipcon.send_request(self, BrickletLEDStrip.FUNCTION_GET_CLOCK_FREQUENCY, (), '', 12, 'I')
def set_chip_type(self, chip):
"""
Sets the type of the LED driver chip. We currently support the chips
* WS2801,
* WS2811,
* WS2812 / SK6812 / NeoPixel RGB,
* SK6812RGBW / NeoPixel RGBW (Chip Type = WS2812),
* LPD8806 and
* APA102 / DotStar.
.. versionadded:: 2.0.2$nbsp;(Plugin)
"""
self.check_validity()
chip = int(chip)
self.ipcon.send_request(self, BrickletLEDStrip.FUNCTION_SET_CHIP_TYPE, (chip,), 'H', 0, '')
def get_chip_type(self):
"""
Returns the currently used chip type as set by :func:`Set Chip Type`.
.. versionadded:: 2.0.2$nbsp;(Plugin)
"""
self.check_validity()
return self.ipcon.send_request(self, BrickletLEDStrip.FUNCTION_GET_CHIP_TYPE, (), '', 10, 'H')
def set_rgbw_values(self, index, length, r, g, b, w):
"""
Sets *length* RGBW values for the LEDs starting from *index*.
To make the colors show correctly you need to configure the chip type
(:func:`Set Chip Type`) and a 4-channel channel mapping (:func:`Set Channel Mapping`)
according to the connected LEDs.
The maximum length is 12, the index goes from 0 to 239 and the rgbw values
have 8 bits each.
Example: If you set
* index to 5,
* length to 4,
* r to [255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
* g to [0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
* b to [0, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0] and
* w to [0, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0]
the LED with index 5 will be red, 6 will be green, 7 will be blue and 8 will be white.
.. note:: Depending on the LED circuitry colors can be permuted.
The colors will be transfered to actual LEDs when the next
frame duration ends, see :func:`Set Frame Duration`.
Generic approach:
* Set the frame duration to a value that represents
the number of frames per second you want to achieve.
* Set all of the LED colors for one frame.
* Wait for the :cb:`Frame Rendered` callback.
* Set all of the LED colors for next frame.
* Wait for the :cb:`Frame Rendered` callback.
* and so on.
This approach ensures that you can change the LED colors with
a fixed frame rate.
The actual number of controllable LEDs depends on the number of free
Bricklet ports. See :ref:`here <led_strip_bricklet_ram_constraints>` for more
information. A call of :func:`Set RGBW Values` with index + length above the
bounds is ignored completely.
The LPD8806 LED driver chips have 7-bit channels for RGB. Internally the LED
Strip Bricklets divides the 8-bit values set using this function by 2 to make
them 7-bit. Therefore, you can just use the normal value range (0-255) for
LPD8806 LEDs.
The brightness channel of the APA102 LED driver chips has 5-bit. Internally the
LED Strip Bricklets divides the 8-bit values set using this function by 8 to make
them 5-bit. Therefore, you can just use the normal value range (0-255) for
the brightness channel of APA102 LEDs.
.. versionadded:: 2.0.6$nbsp;(Plugin)
"""
self.check_validity()
index = int(index)
length = int(length)
r = list(map(int, r))
g = list(map(int, g))
b = list(map(int, b))
w = list(map(int, w))
self.ipcon.send_request(self, BrickletLEDStrip.FUNCTION_SET_RGBW_VALUES, (index, length, r, g, b, w), 'H B 12B 12B 12B 12B', 0, '')
def get_rgbw_values(self, index, length):
"""
Returns *length* RGBW values starting from the given *index*.
The values are the last values that were set by :func:`Set RGBW Values`.
.. versionadded:: 2.0.6$nbsp;(Plugin)
"""
self.check_validity()
index = int(index)
length = int(length)
return GetRGBWValues(*self.ipcon.send_request(self, BrickletLEDStrip.FUNCTION_GET_RGBW_VALUES, (index, length), 'H B', 56, '12B 12B 12B 12B'))
def set_channel_mapping(self, mapping):
"""
Sets the channel mapping for the connected LEDs.
:func:`Set RGB Values` and :func:`Set RGBW Values` take the data in RGB(W) order.
But the connected LED driver chips might have their 3 or 4 channels in a
different order. For example, the WS2801 chips typically use BGR order, the
WS2812 chips typically use GRB order and the APA102 chips typically use WBGR
order.
The APA102 chips are special. They have three 8-bit channels for RGB
and an additional 5-bit channel for the overall brightness of the RGB LED
making them 4-channel chips. Internally the brightness channel is the first
channel, therefore one of the Wxyz channel mappings should be used. Then
the W channel controls the brightness.
If a 3-channel mapping is selected then :func:`Set RGB Values` has to be used.
Calling :func:`Set RGBW Values` with a 3-channel mapping will produce incorrect
results. Vice-versa if a 4-channel mapping is selected then
:func:`Set RGBW Values` has to be used. Calling :func:`Set RGB Values` with a
4-channel mapping will produce incorrect results.
.. versionadded:: 2.0.6$nbsp;(Plugin)
"""
self.check_validity()
mapping = int(mapping)
self.ipcon.send_request(self, BrickletLEDStrip.FUNCTION_SET_CHANNEL_MAPPING, (mapping,), 'B', 0, '')
def get_channel_mapping(self):
"""
Returns the currently used channel mapping as set by :func:`Set Channel Mapping`.
.. versionadded:: 2.0.6$nbsp;(Plugin)
"""
self.check_validity()
return self.ipcon.send_request(self, BrickletLEDStrip.FUNCTION_GET_CHANNEL_MAPPING, (), '', 9, 'B')
def enable_frame_rendered_callback(self):
"""
Enables the :cb:`Frame Rendered` callback.
By default the callback is enabled.
.. versionadded:: 2.0.6$nbsp;(Plugin)
"""
self.check_validity()
self.ipcon.send_request(self, BrickletLEDStrip.FUNCTION_ENABLE_FRAME_RENDERED_CALLBACK, (), '', 0, '')
def disable_frame_rendered_callback(self):
"""
Disables the :cb:`Frame Rendered` callback.
By default the callback is enabled.
.. versionadded:: 2.0.6$nbsp;(Plugin)
"""
self.check_validity()
self.ipcon.send_request(self, BrickletLEDStrip.FUNCTION_DISABLE_FRAME_RENDERED_CALLBACK, (), '', 0, '')
def is_frame_rendered_callback_enabled(self):
"""
Returns *true* if the :cb:`Frame Rendered` callback is enabled, *false* otherwise.
.. versionadded:: 2.0.6$nbsp;(Plugin)
"""
self.check_validity()
return self.ipcon.send_request(self, BrickletLEDStrip.FUNCTION_IS_FRAME_RENDERED_CALLBACK_ENABLED, (), '', 9, '!')
def get_identity(self):
"""
Returns the UID, the UID where the Bricklet is connected to,
the position, the hardware and firmware version as well as the
device identifier.
The position can be 'a', 'b', 'c', 'd', 'e', 'f', 'g' or 'h' (Bricklet Port).
A Bricklet connected to an :ref:`Isolator Bricklet <isolator_bricklet>` is always at
position 'z'.
The device identifier numbers can be found :ref:`here <device_identifier>`.
|device_identifier_constant|
"""
return GetIdentity(*self.ipcon.send_request(self, BrickletLEDStrip.FUNCTION_GET_IDENTITY, (), '', 33, '8s 8s c 3B 3B H'))
def register_callback(self, callback_id, function):
"""
Registers the given *function* with the given *callback_id*.
"""
if function is None:
self.registered_callbacks.pop(callback_id, None)
else:
self.registered_callbacks[callback_id] = function
LEDStrip = BrickletLEDStrip # for backward compatibility
| gpl-2.0 |
CloudBotIRC/CloudBot | plugins/admin_bot.py | 24 | 11016 | import asyncio
import re
from cloudbot import hook
@asyncio.coroutine
@hook.command("groups", "listgroups", "permgroups", permissions=["permissions_users"], autohelp=False)
def get_permission_groups(conn):
"""- lists all valid groups
:type conn: cloudbot.client.Client
"""
return "Valid groups: {}".format(conn.permissions.get_groups())
@asyncio.coroutine
@hook.command("gperms", permissions=["permissions_users"])
def get_group_permissions(text, conn, notice):
"""<group> - lists permissions given to <group>
:type text: str
:type conn: cloudbot.client.Client
"""
group = text.strip().lower()
permission_manager = conn.permissions
group_users = permission_manager.get_group_users(group)
group_permissions = permission_manager.get_group_permissions(group)
if group_permissions:
return "Group {} has permissions {}".format(group, group_permissions)
elif group_users:
return "Group {} exists, but has no permissions".format(group)
else:
notice("Unknown group '{}'".format(group))
@asyncio.coroutine
@hook.command("gusers", permissions=["permissions_users"])
def get_group_users(text, conn, notice):
"""<group> - lists users in <group>
:type text: str
:type conn: cloudbot.client.Client
"""
group = text.strip().lower()
permission_manager = conn.permissions
group_users = permission_manager.get_group_users(group)
group_permissions = permission_manager.get_group_permissions(group)
if group_users:
return "Group {} has members: {}".format(group, group_users)
elif group_permissions:
return "Group {} exists, but has no members".format(group, group_permissions)
else:
notice("Unknown group '{}'".format(group))
@asyncio.coroutine
@hook.command("uperms", autohelp=False)
def get_user_permissions(text, conn, mask, has_permission, notice):
"""[user] - lists all permissions given to [user], or the caller if no user is specified
:type text: str
:type conn: cloudbot.client.Client
:type mask: str
"""
if text:
if not has_permission("permissions_users"):
notice("Sorry, you are not allowed to use this command on another user")
return
user = text.strip().lower()
else:
user = mask.lower()
permission_manager = conn.permissions
user_permissions = permission_manager.get_user_permissions(user)
if user_permissions:
return "User {} has permissions: {}".format(user, user_permissions)
else:
return "User {} has no elevated permissions".format(user)
@asyncio.coroutine
@hook.command("ugroups", autohelp=False)
def get_user_groups(text, conn, mask, has_permission, notice):
"""[user] - lists all permissions given to [user], or the caller if no user is specified
:type text: str
:type conn: cloudbot.client.Client
:type mask: str
"""
if text:
if not has_permission("permissions_users"):
notice("Sorry, you are not allowed to use this command on another user")
return
user = text.strip().lower()
else:
user = mask.lower()
permission_manager = conn.permissions
user_groups = permission_manager.get_user_groups(user)
if user_groups:
return "User {} is in groups: {}".format(user, user_groups)
else:
return "User {} is in no permission groups".format(user)
@asyncio.coroutine
@hook.command("deluser", permissions=["permissions_users"])
def remove_permission_user(text, bot, conn, notice, reply):
"""<user> [group] - removes <user> from [group], or from all groups if no group is specified
:type text: str
:type bot: cloudbot.bot.CloudBot
:type conn: cloudbot.client.Client
"""
split = text.split()
if len(split) > 2:
notice("Too many arguments")
return
elif len(split) < 1:
notice("Not enough arguments")
return
if len(split) > 1:
user = split[0].lower()
group = split[1].lower()
else:
user = split[0].lower()
group = None
permission_manager = conn.permissions
changed = False
if group is not None:
if not permission_manager.group_exists(group):
notice("Unknown group '{}'".format(group))
return
changed_masks = permission_manager.remove_group_user(group, user)
if changed_masks:
changed = True
if len(changed_masks) > 1:
reply("Removed {} and {} from {}".format(", ".join(changed_masks[:-1]), changed_masks[-1], group))
elif changed_masks:
reply("Removed {} from {}".format(changed_masks[0], group))
else:
reply("No masks in {} matched {}".format(group, user))
else:
groups = permission_manager.get_user_groups(user)
for group in groups:
changed_masks = permission_manager.remove_group_user(group, user)
if changed_masks:
changed = True
if len(changed_masks) > 1:
reply("Removed {} and {} from {}".format(", ".join(changed_masks[:-1]), changed_masks[-1], group))
elif changed_masks:
reply("Removed {} from {}".format(changed_masks[0], group))
if not changed:
reply("No masks with elevated permissions matched {}".format(group, user))
if changed:
bot.config.save_config()
permission_manager.reload()
@asyncio.coroutine
@hook.command("adduser", permissions=["permissions_users"])
def add_permissions_user(text, conn, bot, notice, reply):
"""<user> <group> - adds <user> to <group>
:type text: str
:type conn: cloudbot.client.Client
:type bot: cloudbot.bot.CloudBot
"""
split = text.split()
if len(split) > 2:
notice("Too many arguments")
return
elif len(split) < 2:
notice("Not enough arguments")
return
user = split[0].lower()
group = split[1].lower()
if not re.search('.+!.+@.+', user):
# TODO: When we have presence tracking, check if there are any users in the channel with the nick given
notice("The user must be in the format 'nick!user@host'")
return
permission_manager = conn.permissions
group_exists = permission_manager.group_exists(group)
changed = permission_manager.add_user_to_group(user, group)
if not changed:
reply("User {} is already matched in group {}".format(user, group))
elif group_exists:
reply("User {} added to group {}".format(user, group))
else:
reply("Group {} created with user {}".format(group, user))
if changed:
bot.config.save_config()
permission_manager.reload()
@asyncio.coroutine
@hook.command("stop", "quit", permissions=["botcontrol"], autohelp=False)
def stop(text, bot):
"""[reason] - stops me with [reason] as its quit message.
:type text: str
:type bot: cloudbot.bot.CloudBot
"""
if text:
yield from bot.stop(reason=text)
else:
yield from bot.stop()
@asyncio.coroutine
@hook.command(permissions=["botcontrol"], autohelp=False)
def restart(text, bot):
"""[reason] - restarts me with [reason] as its quit message.
:type text: str
:type bot: cloudbot.bot.CloudBot
"""
if text:
yield from bot.restart(reason=text)
else:
yield from bot.restart()
@asyncio.coroutine
@hook.command(permissions=["botcontrol"])
def join(text, conn, notice):
"""<channel> - joins <channel>
:type text: str
:type conn: cloudbot.client.Client
"""
for target in text.split():
if not target.startswith("#"):
target = "#{}".format(target)
notice("Attempting to join {}...".format(target))
conn.join(target)
@asyncio.coroutine
@hook.command(permissions=["botcontrol"], autohelp=False)
def part(text, conn, chan, notice):
"""[#channel] - parts [#channel], or the caller's channel if no channel is specified
:type text: str
:type conn: cloudbot.client.Client
:type chan: str
"""
if text:
targets = text
else:
targets = chan
for target in targets.split():
if not target.startswith("#"):
target = "#{}".format(target)
notice("Attempting to leave {}...".format(target))
conn.part(target)
@asyncio.coroutine
@hook.command(autohelp=False, permissions=["botcontrol"])
def cycle(text, conn, chan, notice):
"""[#channel] - cycles [#channel], or the caller's channel if no channel is specified
:type text: str
:type conn: cloudbot.client.Client
:type chan: str
"""
if text:
targets = text
else:
targets = chan
for target in targets.split():
if not target.startswith("#"):
target = "#{}".format(target)
notice("Attempting to cycle {}...".format(target))
conn.part(target)
conn.join(target)
@asyncio.coroutine
@hook.command(permissions=["botcontrol"])
def nick(text, conn, notice):
"""<nick> - changes my nickname to <nick>
:type text: str
:type conn: cloudbot.client.Client
"""
if not re.match("^[a-z0-9_|.-\]\[]*$", text.lower()):
notice("Invalid username '{}'".format(text))
return
notice("Attempting to change nick to '{}'...".format(text))
conn.set_nick(text)
@asyncio.coroutine
@hook.command(permissions=["botcontrol"])
def raw(text, conn, notice):
"""<command> - sends <command> as a raw IRC command
:type text: str
:type conn: cloudbot.client.Client
"""
notice("Raw command sent.")
conn.send(text)
@asyncio.coroutine
@hook.command(permissions=["botcontrol"])
def say(text, conn, chan):
"""[#channel] <message> - says <message> to [#channel], or to the caller's channel if no channel is specified
:type text: str
:type conn: cloudbot.client.Client
:type chan: str
"""
text = text.strip()
if text.startswith("#"):
split = text.split(None, 1)
channel = split[0]
text = split[1]
else:
channel = chan
text = text
conn.message(channel, text)
@asyncio.coroutine
@hook.command("message", "sayto", permissions=["botcontrol"])
def message(text, conn):
"""<name> <message> - says <message> to <name>
:type text: str
:type conn: cloudbot.client.Client
"""
split = text.split(None, 1)
channel = split[0]
text = split[1]
conn.message(channel, text)
@asyncio.coroutine
@hook.command("me", "act", permissions=["botcontrol"])
def me(text, conn, chan):
"""[#channel] <action> - acts out <action> in a [#channel], or in the current channel of none is specified
:type text: str
:type conn: cloudbot.client.Client
:type chan: str
"""
text = text.strip()
if text.startswith("#"):
split = text.split(None, 1)
channel = split[0]
text = split[1]
else:
channel = chan
text = text
conn.ctcp(channel, "ACTION", text)
| gpl-3.0 |
santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/twisted/web/util.py | 19 | 9865 | # -*- test-case-name: twisted.web.test.test_web -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
from cStringIO import StringIO
from twisted.python import failure
import html
import resource
import linecache
import string, re
import types
def redirectTo(URL, request):
request.redirect(URL)
return """
<html>
<head>
<meta http-equiv=\"refresh\" content=\"0;URL=%(url)s\">
</head>
<body bgcolor=\"#FFFFFF\" text=\"#000000\">
<a href=\"%(url)s\">click here</a>
</body>
</html>
""" % {'url': URL}
class Redirect(resource.Resource):
isLeaf = 1
def __init__(self, url):
resource.Resource.__init__(self)
self.url = url
def render(self, request):
return redirectTo(self.url, request)
def getChild(self, name, request):
return self
class ChildRedirector(Redirect):
isLeaf = 0
def __init__(self, url):
# XXX is this enough?
if ((url.find('://') == -1)
and (not url.startswith('..'))
and (not url.startswith('/'))):
raise ValueError("It seems you've given me a redirect (%s) that is a child of myself! That's not good, it'll cause an infinite redirect." % url)
Redirect.__init__(self, url)
def getChild(self, name, request):
newUrl = self.url
if not newUrl.endswith('/'):
newUrl += '/'
newUrl += name
return ChildRedirector(newUrl)
from twisted.python import urlpath
class ParentRedirect(resource.Resource):
"""
I redirect to URLPath.here().
"""
isLeaf = 1
def render(self, request):
return redirectTo(urlpath.URLPath.fromRequest(request).here(), request)
def getChild(self, request):
return self
class DeferredResource(resource.Resource):
"""
I wrap up a Deferred that will eventually result in a Resource
object.
"""
isLeaf = 1
def __init__(self, d):
resource.Resource.__init__(self)
self.d = d
def getChild(self, name, request):
return self
def render(self, request):
self.d.addCallback(self._cbChild, request).addErrback(
self._ebChild,request)
from twisted.web.server import NOT_DONE_YET
return NOT_DONE_YET
def _cbChild(self, child, request):
result = resource.getChildForRequest(child, request).render(request)
from twisted.web.server import NOT_DONE_YET
if result == NOT_DONE_YET:
return
else:
request.write(result)
request.finish()
def _ebChild(self, reason, request):
request.processingFailed(reason)
return reason
stylesheet = """
<style type="text/css">
p.error {
color: red;
font-family: Verdana, Arial, helvetica, sans-serif;
font-weight: bold;
}
div {
font-family: Verdana, Arial, helvetica, sans-serif;
}
div.stackTrace {
}
div.frame {
padding: 1em;
background: white;
border-bottom: thin black dashed;
}
div.firstFrame {
padding: 1em;
background: white;
border-top: thin black dashed;
border-bottom: thin black dashed;
}
div.location {
}
div.snippet {
margin-bottom: 0.5em;
margin-left: 1em;
background: #FFFFDD;
}
div.snippetHighlightLine {
color: red;
}
span.code {
font-family: "Courier New", courier, monotype;
}
span.function {
font-weight: bold;
font-family: "Courier New", courier, monotype;
}
table.variables {
border-collapse: collapse;
margin-left: 1em;
}
td.varName {
vertical-align: top;
font-weight: bold;
padding-left: 0.5em;
padding-right: 0.5em;
}
td.varValue {
padding-left: 0.5em;
padding-right: 0.5em;
}
div.variables {
margin-bottom: 0.5em;
}
span.heading {
font-weight: bold;
}
div.dict {
background: #cccc99;
padding: 2px;
float: left;
}
td.dictKey {
background: #ffff99;
font-weight: bold;
}
td.dictValue {
background: #ffff99;
}
div.list {
background: #7777cc;
padding: 2px;
float: left;
}
div.listItem {
background: #9999ff;
}
div.instance {
background: #cc7777;
padding: 2px;
float: left;
}
span.instanceName {
font-weight: bold;
display: block;
}
span.instanceRepr {
background: #ff9999;
font-family: "Courier New", courier, monotype;
}
div.function {
background: orange;
font-weight: bold;
float: left;
}
</style>
"""
def htmlrepr(x):
return htmlReprTypes.get(type(x), htmlUnknown)(x)
def saferepr(x):
try:
rx = repr(x)
except:
rx = "<repr failed! %s instance at %s>" % (x.__class__, id(x))
return rx
def htmlUnknown(x):
return '<code>'+html.escape(saferepr(x))+'</code>'
def htmlDict(d):
io = StringIO()
w = io.write
w('<div class="dict"><span class="heading">Dictionary instance @ %s</span>' % hex(id(d)))
w('<table class="dict">')
for k, v in d.items():
if k == '__builtins__':
v = 'builtin dictionary'
w('<tr><td class="dictKey">%s</td><td class="dictValue">%s</td></tr>' % (htmlrepr(k), htmlrepr(v)))
w('</table></div>')
return io.getvalue()
def htmlList(l):
io = StringIO()
w = io.write
w('<div class="list"><span class="heading">List instance @ %s</span>' % hex(id(l)))
for i in l:
w('<div class="listItem">%s</div>' % htmlrepr(i))
w('</div>')
return io.getvalue()
def htmlInst(i):
if hasattr(i, "__html__"):
s = i.__html__()
else:
s = html.escape(saferepr(i))
return '''<div class="instance"><span class="instanceName">%s instance @ %s</span>
<span class="instanceRepr">%s</span></div>
''' % (i.__class__, hex(id(i)), s)
def htmlString(s):
return html.escape(saferepr(s))
def htmlFunc(f):
return ('<div class="function">' +
html.escape("function %s in file %s at line %s" %
(f.__name__, f.func_code.co_filename,
f.func_code.co_firstlineno))+
'</div>')
htmlReprTypes = {types.DictType: htmlDict,
types.ListType: htmlList,
types.InstanceType: htmlInst,
types.StringType: htmlString,
types.FunctionType: htmlFunc}
def htmlIndent(snippetLine):
ret = string.replace(string.replace(html.escape(string.rstrip(snippetLine)),
' ', ' '),
'\t', ' ')
return ret
def formatFailure(myFailure):
exceptionHTML = """
<p class="error">%s: %s</p>
"""
frameHTML = """
<div class="location">%s, line %s in <span class="function">%s</span></div>
"""
snippetLineHTML = """
<div class="snippetLine"><span class="lineno">%s</span><span class="code">%s</span></div>
"""
snippetHighlightLineHTML = """
<div class="snippetHighlightLine"><span class="lineno">%s</span><span class="code">%s</span></div>
"""
variableHTML = """
<tr class="varRow"><td class="varName">%s</td><td class="varValue">%s</td></tr>
"""
if not isinstance(myFailure, failure.Failure):
return html.PRE(str(myFailure))
io = StringIO()
w = io.write
w(stylesheet)
w('<a href="#tbend">')
w(exceptionHTML % (html.escape(str(myFailure.type)),
html.escape(str(myFailure.value))))
w('</a>')
w('<div class="stackTrace">')
first = 1
for method, filename, lineno, localVars, globalVars in myFailure.frames:
if filename == '<string>':
continue
if first:
w('<div class="firstFrame">')
first = 0
else:
w('<div class="frame">')
w(frameHTML % (filename, lineno, method))
w('<div class="snippet">')
textSnippet = ''
for snipLineNo in range(lineno-2, lineno+2):
snipLine = linecache.getline(filename, snipLineNo)
textSnippet += snipLine
snipLine = htmlIndent(snipLine)
if snipLineNo == lineno:
w(snippetHighlightLineHTML % (snipLineNo, snipLine))
else:
w(snippetLineHTML % (snipLineNo, snipLine))
w('</div>')
# Instance variables
for name, var in localVars:
if name == 'self' and hasattr(var, '__dict__'):
usedVars = [ (key, value) for (key, value) in var.__dict__.items()
if re.search(r'\W'+'self.'+key+r'\W', textSnippet) ]
if usedVars:
w('<div class="variables"><b>Self</b>')
w('<table class="variables">')
for key, value in usedVars:
w(variableHTML % (key, htmlrepr(value)))
w('</table></div>')
break
# Local and global vars
for nm, varList in ('Locals', localVars), ('Globals', globalVars):
usedVars = [ (name, var) for (name, var) in varList
if re.search(r'\W'+name+r'\W', textSnippet) ]
if usedVars:
w('<div class="variables"><b>%s</b><table class="variables">' % nm)
for name, var in usedVars:
w(variableHTML % (name, htmlrepr(var)))
w('</table></div>')
w('</div>') # frame
w('</div>') # stacktrace
w('<a name="tbend"> </a>')
w(exceptionHTML % (html.escape(str(myFailure.type)),
html.escape(str(myFailure.value))))
return io.getvalue()
| bsd-3-clause |
pyparallel/numpy | numpy/lib/user_array.py | 111 | 7764 | """
Standard container-class for easy multiple-inheritance.
Try to inherit from the ndarray instead of using this class as this is not
complete.
"""
from __future__ import division, absolute_import, print_function
from numpy.core import (
array, asarray, absolute, add, subtract, multiply, divide,
remainder, power, left_shift, right_shift, bitwise_and, bitwise_or,
bitwise_xor, invert, less, less_equal, not_equal, equal, greater,
greater_equal, shape, reshape, arange, sin, sqrt, transpose
)
from numpy.compat import long
class container(object):
def __init__(self, data, dtype=None, copy=True):
self.array = array(data, dtype, copy=copy)
def __repr__(self):
if len(self.shape) > 0:
return self.__class__.__name__ + repr(self.array)[len("array"):]
else:
return self.__class__.__name__ + "(" + repr(self.array) + ")"
def __array__(self, t=None):
if t:
return self.array.astype(t)
return self.array
# Array as sequence
def __len__(self):
return len(self.array)
def __getitem__(self, index):
return self._rc(self.array[index])
def __getslice__(self, i, j):
return self._rc(self.array[i:j])
def __setitem__(self, index, value):
self.array[index] = asarray(value, self.dtype)
def __setslice__(self, i, j, value):
self.array[i:j] = asarray(value, self.dtype)
def __abs__(self):
return self._rc(absolute(self.array))
def __neg__(self):
return self._rc(-self.array)
def __add__(self, other):
return self._rc(self.array + asarray(other))
__radd__ = __add__
def __iadd__(self, other):
add(self.array, other, self.array)
return self
def __sub__(self, other):
return self._rc(self.array - asarray(other))
def __rsub__(self, other):
return self._rc(asarray(other) - self.array)
def __isub__(self, other):
subtract(self.array, other, self.array)
return self
def __mul__(self, other):
return self._rc(multiply(self.array, asarray(other)))
__rmul__ = __mul__
def __imul__(self, other):
multiply(self.array, other, self.array)
return self
def __div__(self, other):
return self._rc(divide(self.array, asarray(other)))
def __rdiv__(self, other):
return self._rc(divide(asarray(other), self.array))
def __idiv__(self, other):
divide(self.array, other, self.array)
return self
def __mod__(self, other):
return self._rc(remainder(self.array, other))
def __rmod__(self, other):
return self._rc(remainder(other, self.array))
def __imod__(self, other):
remainder(self.array, other, self.array)
return self
def __divmod__(self, other):
return (self._rc(divide(self.array, other)),
self._rc(remainder(self.array, other)))
def __rdivmod__(self, other):
return (self._rc(divide(other, self.array)),
self._rc(remainder(other, self.array)))
def __pow__(self, other):
return self._rc(power(self.array, asarray(other)))
def __rpow__(self, other):
return self._rc(power(asarray(other), self.array))
def __ipow__(self, other):
power(self.array, other, self.array)
return self
def __lshift__(self, other):
return self._rc(left_shift(self.array, other))
def __rshift__(self, other):
return self._rc(right_shift(self.array, other))
def __rlshift__(self, other):
return self._rc(left_shift(other, self.array))
def __rrshift__(self, other):
return self._rc(right_shift(other, self.array))
def __ilshift__(self, other):
left_shift(self.array, other, self.array)
return self
def __irshift__(self, other):
right_shift(self.array, other, self.array)
return self
def __and__(self, other):
return self._rc(bitwise_and(self.array, other))
def __rand__(self, other):
return self._rc(bitwise_and(other, self.array))
def __iand__(self, other):
bitwise_and(self.array, other, self.array)
return self
def __xor__(self, other):
return self._rc(bitwise_xor(self.array, other))
def __rxor__(self, other):
return self._rc(bitwise_xor(other, self.array))
def __ixor__(self, other):
bitwise_xor(self.array, other, self.array)
return self
def __or__(self, other):
return self._rc(bitwise_or(self.array, other))
def __ror__(self, other):
return self._rc(bitwise_or(other, self.array))
def __ior__(self, other):
bitwise_or(self.array, other, self.array)
return self
def __pos__(self):
return self._rc(self.array)
def __invert__(self):
return self._rc(invert(self.array))
def _scalarfunc(self, func):
if len(self.shape) == 0:
return func(self[0])
else:
raise TypeError(
"only rank-0 arrays can be converted to Python scalars.")
def __complex__(self):
return self._scalarfunc(complex)
def __float__(self):
return self._scalarfunc(float)
def __int__(self):
return self._scalarfunc(int)
def __long__(self):
return self._scalarfunc(long)
def __hex__(self):
return self._scalarfunc(hex)
def __oct__(self):
return self._scalarfunc(oct)
def __lt__(self, other):
return self._rc(less(self.array, other))
def __le__(self, other):
return self._rc(less_equal(self.array, other))
def __eq__(self, other):
return self._rc(equal(self.array, other))
def __ne__(self, other):
return self._rc(not_equal(self.array, other))
def __gt__(self, other):
return self._rc(greater(self.array, other))
def __ge__(self, other):
return self._rc(greater_equal(self.array, other))
def copy(self):
return self._rc(self.array.copy())
def tostring(self):
return self.array.tostring()
def byteswap(self):
return self._rc(self.array.byteswap())
def astype(self, typecode):
return self._rc(self.array.astype(typecode))
def _rc(self, a):
if len(shape(a)) == 0:
return a
else:
return self.__class__(a)
def __array_wrap__(self, *args):
return self.__class__(args[0])
def __setattr__(self, attr, value):
if attr == 'array':
object.__setattr__(self, attr, value)
return
try:
self.array.__setattr__(attr, value)
except AttributeError:
object.__setattr__(self, attr, value)
# Only called after other approaches fail.
def __getattr__(self, attr):
if (attr == 'array'):
return object.__getattribute__(self, attr)
return self.array.__getattribute__(attr)
#############################################################
# Test of class container
#############################################################
if __name__ == '__main__':
temp = reshape(arange(10000), (100, 100))
ua = container(temp)
# new object created begin test
print(dir(ua))
print(shape(ua), ua.shape) # I have changed Numeric.py
ua_small = ua[:3, :5]
print(ua_small)
# this did not change ua[0,0], which is not normal behavior
ua_small[0, 0] = 10
print(ua_small[0, 0], ua[0, 0])
print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2))
print(less(ua_small, 103), type(less(ua_small, 103)))
print(type(ua_small * reshape(arange(15), shape(ua_small))))
print(reshape(ua_small, (5, 3)))
print(transpose(ua_small))
| bsd-3-clause |
codepope/transporter | vendor/gopkg.in/gorethink/gorethink.v3/internal/gen_tests/process_polyglot.py | 8 | 15087 | '''Finds and reads polyglot yaml tests (preferring the python tests),
normalizing their quirks into something that can be translated in a
sane way.
The idea is that this file contains nothing Go specific, so could
potentially be used to convert the tests for use with other drivers.
'''
import os
import sys
import os.path
import ast
import copy
import logging
from collections import namedtuple
try:
basestring
except NameError:
basestring = ("".__class__,)
logger = logging.getLogger("process_polyglot")
class EmptyTemplate(Exception):
'''Raised inside templates if they have no reason to be rendered
because what they're iterating over is empty'''
pass
class Unhandled(Exception):
'''Used when a corner case is hit that probably should be handled
if a test actually hits it'''
pass
class Skip(Exception):
'''Used when skipping a test for whatever reason'''
pass
class FatalSkip(EmptyTemplate):
'''Used when a skipped test should prevent the entire test file
from rendering'''
def __init__(self, msg):
logger.info("Skipping rendering because %s", msg)
super(FatalSkip, self).__init__(msg)
Term = namedtuple("Term", 'line type ast')
CustomTerm = namedtuple('CustomTerm', 'line')
Query = namedtuple(
'Query',
('query',
'expected',
'testfile',
'line_num',
'runopts')
)
Def = namedtuple('Def', 'varname term run_if_query testfile line_num runopts')
CustomDef = namedtuple('CustomDef', 'line testfile line_num')
Expect = namedtuple('Expect', 'bif term')
class AnythingIsFine(object):
def __init__(self):
self.type = str
self.ast = ast.Name("AnythingIsFine", None)
self.line = "AnythingIsFine"
class SkippedTest(object):
__slots__ = ('line', 'reason')
def __init__(self, line, reason):
if reason == "No go, python or generic test":
logger.debug("Skipped test because %s", reason)
else:
logger.info("Skipped test because %s", reason)
logger.info(" - Skipped test was: %s", line)
self.line = line
self.reason = reason
def flexiget(obj, keys, default):
'''Like dict.get, but accepts an array of keys, matching the first
that exists in the dict. If none do, it returns the default. If
the object isn't a dict, it also returns the default'''
if not isinstance(obj, dict):
return default
for key in keys:
if key in obj:
return obj[key]
return default
def py_str(py):
'''Turns a python value into a string of python code
representing that object'''
def maybe_str(s):
return s if isinstance(s, str) and '(' in s else repr(s)
if type(py) is dict:
return '{' + ', '.join(
[repr(k) + ': ' + maybe_str(py[k]) for k in py]) + '}'
if not isinstance(py, basestring):
return repr(py)
else:
return py
def _try_eval(node, context):
'''For evaluating expressions given a context'''
node_4_eval = copy.deepcopy(node)
if type(node_4_eval) == ast.Expr:
node_4_eval = node_4_eval.value
node_4_eval = ast.Expression(node_4_eval)
ast.fix_missing_locations(node_4_eval)
compiled_value = compile(node_4_eval, '<str>', mode='eval')
r = context['r']
try:
value = eval(compiled_value, context)
except r.ReqlError:
raise Skip("Java type system prevents static Reql errors")
except AttributeError:
raise Skip("Java type system prevents attribute errors")
except Exception as err:
return type(err), err
else:
return type(value), value
def try_eval(node, context):
return _try_eval(node, context)[0]
def try_eval_def(parsed_define, context):
'''For evaluating python definitions like x = foo'''
varname = parsed_define.targets[0].id
type_, value = _try_eval(parsed_define.value, context)
context[varname] = value
return varname, type_
def all_yaml_tests(test_dir, exclusions):
'''Generator for the full paths of all non-excluded yaml tests'''
for root, dirs, files in os.walk(test_dir):
for f in files:
path = os.path.relpath(os.path.join(root, f), test_dir)
if valid_filename(exclusions, path):
yield path
def valid_filename(exclusions, filepath):
parts = filepath.split('.')
if parts[-1] != 'yaml':
return False
for exclusion in exclusions:
if exclusion in filepath:
logger.info("Skipped %s due to exclusion %r",
filepath, exclusion)
return False
return True
def fake_type(name):
def __init__(self, *args, **kwargs):
pass
typ = type(name, (object,), {'__init__': __init__})
typ.__module__ = '?test?'
return typ
def create_context(r, table_var_names):
'''Creates a context for evaluation of test definitions. Needs the
rethinkdb driver module to use, and the variable names of
predefined tables'''
from datetime import datetime, tzinfo, timedelta
# Both these tzinfo classes were nabbed from
# test/rql_test/driver/driver.py to aid in evaluation
class UTCTimeZone(tzinfo):
'''UTC'''
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return timedelta(0)
class PacificTimeZone(tzinfo):
'''Pacific timezone emulator for timestamp: 1375147296.68'''
def utcoffset(self, dt):
return timedelta(-1, 61200)
def tzname(self, dt):
return 'PDT'
def dst(self, dt):
return timedelta(0, 3600)
# We need to keep track of the values of definitions because each
# subsequent definition can depend on previous ones.
context = {
'r': r,
'null': None,
'nil': None,
'sys': sys,
'false': False,
'true': True,
'datetime': datetime,
'PacificTimeZone': PacificTimeZone,
'UTCTimeZone': UTCTimeZone,
# mock test helper functions
'len': lambda x: 1,
'arrlen': fake_type("arr_len"),
'uuid': fake_type("uuid"),
'fetch': lambda c, limit=None: [],
'int_cmp': fake_type("int_cmp"),
'partial': fake_type("partial"),
'float_cmp': fake_type("float_cmp"),
'wait': lambda time: None,
'err': fake_type('err'),
'err_regex': fake_type('err_regex'),
'regex': fake_type('regex'),
'bag': fake_type('bag'),
# py3 compatibility
'xrange': range,
}
# Definitions can refer to these predefined table variables. Since
# we're only evaluating definitions here to determine what the
# type of the term will be, it doesn't need to include the db or
# anything, it just needs to be a Table ast object.
context.update({tbl: r.table(tbl) for tbl in table_var_names})
return context
class TestContext(object):
'''Holds file, context and test number info before "expected" data
is obtained'''
def __init__(self, context, testfile, runopts):
self.context = context
self.testfile = testfile
self.runopts = runopts
@staticmethod
def find_python_expected(test):
'''Extract the expected result of the test. We want the python
specific version if it's available, so we have to poke around
a bit'''
if 'ot' in test:
ret = flexiget(test['ot'], ['py', 'cd'], test['ot'])
elif isinstance(test.get('py'), dict) and 'ot' in test['py']:
ret = test['py']['ot']
else:
# This is distinct from the 'ot' field having the
# value None in it!
return AnythingIsFine()
return ret
@staticmethod
def find_custom_expected(test, field):
'''Gets the ot field for the language if it exists. If not it returns
None.'''
if 'ot' in test:
ret = flexiget(test['ot'], [field], None)
elif field in test:
ret = flexiget(test[field], ['ot'], None)
else:
ret = None
return ret
def expected_context(self, test, custom_field):
custom_expected = self.find_custom_expected(test, custom_field)
if custom_expected is not None:
# custom version doesn't need to be evaluated, it's in the
# right language already
term = CustomTerm(custom_expected)
else:
exp = self.find_python_expected(test)
if type(exp) == AnythingIsFine:
return ExpectedContext(self, AnythingIsFine())
expected = py_str(exp)
expected_ast = ast.parse(expected, mode="eval").body
logger.debug("Evaluating: %s", expected)
expected_type = try_eval(expected_ast, self.context)
term = Term(
ast=expected_ast,
line=expected,
type=expected_type,
)
return ExpectedContext(self, term)
def def_from_parsed(self, define_line, parsed_define, run_if_query):
logger.debug("Evaluating: %s", define_line)
varname, result_type = try_eval_def(parsed_define, self.context)
return Def(
varname=varname,
term=Term(
line=define_line,
type=result_type,
ast=parsed_define),
run_if_query=run_if_query,
testfile=self.testfile,
line_num=define_line.linenumber,
runopts=self.runopts,
)
def def_from_define(self, define, run_if_query):
define_line = py_str(define)
parsed_define = ast.parse(define_line, mode='single').body[0]
return self.def_from_parsed(define_line, parsed_define, run_if_query)
def custom_def(self, line):
return CustomDef(
line=line, testfile=self.testfile, line_num=line.linenumber)
class ExpectedContext(object):
'''Holds some contextual information needed to yield queries. Used by
the tests_and_defs generator'''
def __init__(self, test_context, expected_term):
self.testfile = test_context.testfile
self.context = test_context.context
self.runopts = test_context.runopts
self.expected_term = expected_term
def query_from_term(self, query_term, line_num=None):
if type(query_term) == SkippedTest:
return query_term
else:
return Query(
query=query_term,
expected=self.expected_term,
testfile=self.testfile,
line_num=query_term.line.linenumber,
runopts=self.runopts,
)
def query_from_test(self, test):
return self.query_from_term(
self.term_from_test(test), test.linenumber)
def query_from_subtest(self, test, subline_num):
return self.query_from_term(
self.term_from_test(test),
(test.linenumber, subline_num))
def query_from_parsed(self, testline, parsed):
return self.query_from_term(
self.term_from_parsed(testline, parsed))
def term_from_test(self, test):
testline = py_str(test)
return self.term_from_testline(testline)
def term_from_testline(self, testline):
parsed = ast.parse(testline, mode='eval').body
return self.term_from_parsed(testline, parsed)
def term_from_parsed(self, testline, parsed):
try:
logger.debug("Evaluating: %s", testline)
result_type = try_eval(parsed, self.context)
except Skip as s:
return SkippedTest(line=testline, reason=str(s))
else:
return Term(ast=parsed, line=testline, type=result_type)
def tests_and_defs(testfile, raw_test_data, context, custom_field=None):
'''Generator of parsed python tests and definitions.
`testfile` is the name of the file being converted
`raw_test_data` is the yaml data as python data structures
`context` is the evaluation context for the values. Will be modified
`custom` is the specific type of test to look for.
(falls back to 'py', then 'cd')
'''
for test in raw_test_data:
runopts = test.get('runopts')
if runopts is not None:
runopts = {key: ast.parse(py_str(val), mode="eval").body
for key, val in runopts.items()}
test_context = TestContext(context, testfile, runopts=runopts)
if 'def' in test and flexiget(test['def'], [custom_field], False):
yield test_context.custom_def(test['def'][custom_field])
elif 'def' in test:
# We want to yield the definition before the test itself
define = flexiget(test['def'], [custom_field], None)
if define is not None:
yield test_context.custom_def(define)
else:
define = flexiget(test['def'], ['py', 'cd'], test['def'])
# for some reason, sometimes def is just None
if define and type(define) is not dict:
# if define is a dict, it doesn't have anything
# relevant since we already checked. if this
# happens to be a query fragment, the test
# framework should not run it, just store the
# fragment in the variable.
yield test_context.def_from_define(
define, run_if_query=False)
customtest = test.get(custom_field, None)
# as a backup try getting a python or generic test
pytest = flexiget(test, ['py', 'cd'], None)
if customtest is None and pytest is None:
line = flexiget(test, ['rb', 'js'], u'¯\_(ツ)_/¯')
yield SkippedTest(
line=line,
reason='No {}, python or generic test'.format(custom_field))
continue
expected_context = test_context.expected_context(test, custom_field)
if customtest is not None:
yield expected_context.query_from_term(customtest)
elif isinstance(pytest, basestring):
parsed = ast.parse(pytest, mode="single").body[0]
if type(parsed) == ast.Expr:
yield expected_context.query_from_parsed(pytest, parsed.value)
elif type(parsed) == ast.Assign:
# Second syntax for defines. Surprise, it wasn't a
# test at all, because it has an equals sign in it.
# if this happens to be a query, it will be run.
yield test_context.def_from_parsed(
pytest, parsed, run_if_query=True)
elif type(pytest) is dict and 'cd' in pytest:
yield expected_context.query_from_test(pytest['cd'])
else:
for i, subtest in enumerate(pytest, start=1):
# unroll subtests
yield expected_context.query_from_subtest(subtest, i)
| bsd-3-clause |
achoy/cwapi | backend/py-server/flask/lib/python3.6/site-packages/flask/testing.py | 121 | 5630 | # -*- coding: utf-8 -*-
"""
flask.testing
~~~~~~~~~~~~~
Implements test support helpers. This module is lazily imported
and usually not used in production environments.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import werkzeug
from contextlib import contextmanager
from werkzeug.test import Client, EnvironBuilder
from flask import _request_ctx_stack
try:
from werkzeug.urls import url_parse
except ImportError:
from urlparse import urlsplit as url_parse
def make_test_environ_builder(app, path='/', base_url=None, *args, **kwargs):
"""Creates a new test builder with some application defaults thrown in."""
http_host = app.config.get('SERVER_NAME')
app_root = app.config.get('APPLICATION_ROOT')
if base_url is None:
url = url_parse(path)
base_url = 'http://%s/' % (url.netloc or http_host or 'localhost')
if app_root:
base_url += app_root.lstrip('/')
if url.netloc:
path = url.path
if url.query:
path += '?' + url.query
return EnvironBuilder(path, base_url, *args, **kwargs)
class FlaskClient(Client):
"""Works like a regular Werkzeug test client but has some knowledge about
how Flask works to defer the cleanup of the request context stack to the
end of a ``with`` body when used in a ``with`` statement. For general
information about how to use this class refer to
:class:`werkzeug.test.Client`.
.. versionchanged:: 0.12
`app.test_client()` includes preset default environment, which can be
set after instantiation of the `app.test_client()` object in
`client.environ_base`.
Basic usage is outlined in the :ref:`testing` chapter.
"""
preserve_context = False
def __init__(self, *args, **kwargs):
super(FlaskClient, self).__init__(*args, **kwargs)
self.environ_base = {
"REMOTE_ADDR": "127.0.0.1",
"HTTP_USER_AGENT": "werkzeug/" + werkzeug.__version__
}
@contextmanager
def session_transaction(self, *args, **kwargs):
"""When used in combination with a ``with`` statement this opens a
session transaction. This can be used to modify the session that
the test client uses. Once the ``with`` block is left the session is
stored back.
::
with client.session_transaction() as session:
session['value'] = 42
Internally this is implemented by going through a temporary test
request context and since session handling could depend on
request variables this function accepts the same arguments as
:meth:`~flask.Flask.test_request_context` which are directly
passed through.
"""
if self.cookie_jar is None:
raise RuntimeError('Session transactions only make sense '
'with cookies enabled.')
app = self.application
environ_overrides = kwargs.setdefault('environ_overrides', {})
self.cookie_jar.inject_wsgi(environ_overrides)
outer_reqctx = _request_ctx_stack.top
with app.test_request_context(*args, **kwargs) as c:
sess = app.open_session(c.request)
if sess is None:
raise RuntimeError('Session backend did not open a session. '
'Check the configuration')
# Since we have to open a new request context for the session
# handling we want to make sure that we hide out own context
# from the caller. By pushing the original request context
# (or None) on top of this and popping it we get exactly that
# behavior. It's important to not use the push and pop
# methods of the actual request context object since that would
# mean that cleanup handlers are called
_request_ctx_stack.push(outer_reqctx)
try:
yield sess
finally:
_request_ctx_stack.pop()
resp = app.response_class()
if not app.session_interface.is_null_session(sess):
app.save_session(sess, resp)
headers = resp.get_wsgi_headers(c.request.environ)
self.cookie_jar.extract_wsgi(c.request.environ, headers)
def open(self, *args, **kwargs):
kwargs.setdefault('environ_overrides', {}) \
['flask._preserve_context'] = self.preserve_context
kwargs.setdefault('environ_base', self.environ_base)
as_tuple = kwargs.pop('as_tuple', False)
buffered = kwargs.pop('buffered', False)
follow_redirects = kwargs.pop('follow_redirects', False)
builder = make_test_environ_builder(self.application, *args, **kwargs)
return Client.open(self, builder,
as_tuple=as_tuple,
buffered=buffered,
follow_redirects=follow_redirects)
def __enter__(self):
if self.preserve_context:
raise RuntimeError('Cannot nest client invocations')
self.preserve_context = True
return self
def __exit__(self, exc_type, exc_value, tb):
self.preserve_context = False
# on exit we want to clean up earlier. Normally the request context
# stays preserved until the next request in the same thread comes
# in. See RequestGlobals.push() for the general behavior.
top = _request_ctx_stack.top
if top is not None and top.preserved:
top.pop()
| bsd-3-clause |
yeming233/horizon | openstack_dashboard/dashboards/admin/flavors/views.py | 3 | 3633 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.flavors \
import tables as project_tables
from openstack_dashboard.dashboards.admin.flavors \
import workflows as flavor_workflows
INDEX_URL = "horizon:admin:flavors:index"
class IndexView(tables.DataTableView):
table_class = project_tables.FlavorsTable
page_title = _("Flavors")
def has_prev_data(self, table):
return self._prev
def has_more_data(self, table):
return self._more
def get_data(self):
request = self.request
prev_marker = request.GET.get(
project_tables.FlavorsTable._meta.prev_pagination_param, None)
if prev_marker is not None:
marker = prev_marker
else:
marker = request.GET.get(
project_tables.FlavorsTable._meta.pagination_param, None)
reversed_order = prev_marker is not None
flavors = []
try:
# Removing the pagination params and adding "is_public=None"
# will return all flavors.
flavors, self._more, self._prev = api.nova.flavor_list_paged(
request, None,
marker=marker,
paginate=True,
sort_dir='asc',
sort_key='name',
reversed_order=reversed_order)
except Exception:
self._prev = self._more = False
exceptions.handle(request,
_('Unable to retrieve flavor list.'))
return flavors
class CreateView(workflows.WorkflowView):
workflow_class = flavor_workflows.CreateFlavor
template_name = 'admin/flavors/create.html'
page_title = _("Create Flavor")
class UpdateView(workflows.WorkflowView):
workflow_class = flavor_workflows.UpdateFlavor
template_name = 'admin/flavors/update.html'
page_title = _("Edit Flavor")
def get_initial(self):
flavor_id = self.kwargs['id']
try:
# Get initial flavor information
flavor = api.nova.flavor_get(self.request, flavor_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve flavor details.'),
redirect=reverse_lazy(INDEX_URL))
return {'flavor_id': flavor.id,
'name': flavor.name,
'vcpus': flavor.vcpus,
'memory_mb': flavor.ram,
'disk_gb': flavor.disk,
'swap_mb': flavor.swap or 0,
'rxtx_factor': flavor.rxtx_factor or 1,
'eph_gb': getattr(flavor, 'OS-FLV-EXT-DATA:ephemeral', None)}
| apache-2.0 |
isyippee/nova | nova/api/openstack/compute/schemas/floating_ip_dns.py | 84 | 1806 | # Copyright 2014 IBM Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.validation import parameter_types
domain_entry_update = {
'type': 'object',
'properties': {
'domain_entry': {
'type': 'object',
'properties': {
'scope': {
'type': 'string',
'enum': ['public', 'private'],
},
'project': parameter_types.project_id,
'availability_zone': parameter_types.name,
},
'required': ['scope'],
'maxProperties': 2,
'additionalProperties': False,
},
},
'required': ['domain_entry'],
'additionalProperties': False,
}
dns_entry_update = {
'type': 'object',
'properties': {
'dns_entry': {
'type': 'object',
'properties': {
'ip': parameter_types.ip_address,
'dns_type': {
'type': 'string',
'enum': ['a', 'A'],
},
},
'required': ['ip', 'dns_type'],
'additionalProperties': False,
},
},
'required': ['dns_entry'],
'additionalProperties': False,
}
| apache-2.0 |
thaumos/ansible | test/units/modules/storage/netapp/test_na_ontap_lun_map.py | 38 | 7000 | ''' unit tests ONTAP Ansible module: na_ontap_lun_map '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_lun_map \
import NetAppOntapLUNMap as my_module
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None):
''' save arguments '''
self.type = kind
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.type == 'lun_map':
xml = self.build_lun_info()
elif self.type == 'lun_map_fail':
raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
self.xml_out = xml
return xml
@staticmethod
def build_lun_info():
''' build xml data for lun-map-entry '''
xml = netapp_utils.zapi.NaElement('xml')
data = {'initiator-groups': [{'initiator-group-info': {'initiator-group-name': 'ansible', 'lun-id': 2}}]}
xml.translate_struct(data)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.server = MockONTAPConnection()
self.onbox = False
def set_default_args(self):
if self.onbox:
hostname = '10.10.10.10'
username = 'admin'
password = 'password'
initiator_group_name = 'ansible'
vserver = 'ansible'
path = '/vol/ansible/test'
lun_id = 2
else:
hostname = 'hostname'
username = 'username'
password = 'password'
initiator_group_name = 'ansible'
vserver = 'ansible'
path = '/vol/ansible/test'
lun_id = 2
return dict({
'hostname': hostname,
'username': username,
'password': password,
'initiator_group_name': initiator_group_name,
'vserver': vserver,
'path': path,
'lun_id': lun_id
})
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
my_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_ensure_get_called(self):
''' test get_lun_map for non-existent lun'''
set_module_args(self.set_default_args())
my_obj = my_module()
my_obj.server = self.server
assert my_obj.get_lun_map is not None
def test_ensure_get_called_existing(self):
''' test get_lun_map for existing lun'''
set_module_args(self.set_default_args())
my_obj = my_module()
my_obj.server = MockONTAPConnection(kind='lun_map')
assert my_obj.get_lun_map()
@patch('ansible.modules.storage.netapp.na_ontap_lun_map.NetAppOntapLUNMap.create_lun_map')
def test_successful_create(self, create_lun_map):
''' mapping lun and testing idempotency '''
data = self.set_default_args()
set_module_args(data)
my_obj = my_module()
if not self.onbox:
my_obj.server = self.server
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert exc.value.args[0]['changed']
create_lun_map.assert_called_with()
# to reset na_helper from remembering the previous 'changed' value
set_module_args(self.set_default_args())
my_obj = my_module()
if not self.onbox:
my_obj.server = MockONTAPConnection('lun_map')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert not exc.value.args[0]['changed']
@patch('ansible.modules.storage.netapp.na_ontap_lun_map.NetAppOntapLUNMap.delete_lun_map')
def test_successful_delete(self, delete_lun_map):
''' unmapping lun and testing idempotency '''
data = self.set_default_args()
data['state'] = 'absent'
set_module_args(data)
my_obj = my_module()
if not self.onbox:
my_obj.server = MockONTAPConnection('lun_map')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert exc.value.args[0]['changed']
delete_lun_map.assert_called_with()
# to reset na_helper from remembering the previous 'changed' value
my_obj = my_module()
if not self.onbox:
my_obj.server = self.server
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert not exc.value.args[0]['changed']
def test_if_all_methods_catch_exception(self):
module_args = {}
module_args.update(self.set_default_args())
set_module_args(module_args)
my_obj = my_module()
if not self.onbox:
my_obj.server = MockONTAPConnection('lun_map_fail')
with pytest.raises(AnsibleFailJson) as exc:
my_obj.create_lun_map()
assert 'Error mapping lun' in exc.value.args[0]['msg']
with pytest.raises(AnsibleFailJson) as exc:
my_obj.delete_lun_map()
assert 'Error unmapping lun' in exc.value.args[0]['msg']
| gpl-3.0 |
Zord13appdesa/python-for-android | python3-alpha/python3-src/Tools/scripts/h2py.py | 46 | 5419 | #! /usr/bin/env python3
# Read #define's and translate to Python code.
# Handle #include statements.
# Handle #define macros with one argument.
# Anything that isn't recognized or doesn't translate into valid
# Python is ignored.
# Without filename arguments, acts as a filter.
# If one or more filenames are given, output is written to corresponding
# filenames in the local directory, translated to all uppercase, with
# the extension replaced by ".py".
# By passing one or more options of the form "-i regular_expression"
# you can specify additional strings to be ignored. This is useful
# e.g. to ignore casts to u_long: simply specify "-i '(u_long)'".
# XXX To do:
# - turn trailing C comments into Python comments
# - turn C Boolean operators "&& || !" into Python "and or not"
# - what to do about #if(def)?
# - what to do about macros with multiple parameters?
import sys, re, getopt, os
p_define = re.compile('^[\t ]*#[\t ]*define[\t ]+([a-zA-Z0-9_]+)[\t ]+')
p_macro = re.compile(
'^[\t ]*#[\t ]*define[\t ]+'
'([a-zA-Z0-9_]+)\(([_a-zA-Z][_a-zA-Z0-9]*)\)[\t ]+')
p_include = re.compile('^[\t ]*#[\t ]*include[\t ]+<([a-zA-Z0-9_/\.]+)')
p_comment = re.compile(r'/\*([^*]+|\*+[^/])*(\*+/)?')
p_cpp_comment = re.compile('//.*')
ignores = [p_comment, p_cpp_comment]
p_char = re.compile(r"'(\\.[^\\]*|[^\\])'")
p_hex = re.compile(r"0x([0-9a-fA-F]+)L?")
filedict = {}
importable = {}
try:
searchdirs=os.environ['include'].split(';')
except KeyError:
try:
searchdirs=os.environ['INCLUDE'].split(';')
except KeyError:
searchdirs=['/usr/include']
def main():
global filedict
opts, args = getopt.getopt(sys.argv[1:], 'i:')
for o, a in opts:
if o == '-i':
ignores.append(re.compile(a))
if not args:
args = ['-']
for filename in args:
if filename == '-':
sys.stdout.write('# Generated by h2py from stdin\n')
process(sys.stdin, sys.stdout)
else:
fp = open(filename, 'r')
outfile = os.path.basename(filename)
i = outfile.rfind('.')
if i > 0: outfile = outfile[:i]
modname = outfile.upper()
outfile = modname + '.py'
outfp = open(outfile, 'w')
outfp.write('# Generated by h2py from %s\n' % filename)
filedict = {}
for dir in searchdirs:
if filename[:len(dir)] == dir:
filedict[filename[len(dir)+1:]] = None # no '/' trailing
importable[filename[len(dir)+1:]] = modname
break
process(fp, outfp)
outfp.close()
fp.close()
def pytify(body):
# replace ignored patterns by spaces
for p in ignores:
body = p.sub(' ', body)
# replace char literals by ord(...)
body = p_char.sub("ord('\\1')", body)
# Compute negative hexadecimal constants
start = 0
UMAX = 2*(sys.maxsize+1)
while 1:
m = p_hex.search(body, start)
if not m: break
s,e = m.span()
val = int(body[slice(*m.span(1))], 16)
if val > sys.maxsize:
val -= UMAX
body = body[:s] + "(" + str(val) + ")" + body[e:]
start = s + 1
return body
def process(fp, outfp, env = {}):
lineno = 0
while 1:
line = fp.readline()
if not line: break
lineno = lineno + 1
match = p_define.match(line)
if match:
# gobble up continuation lines
while line[-2:] == '\\\n':
nextline = fp.readline()
if not nextline: break
lineno = lineno + 1
line = line + nextline
name = match.group(1)
body = line[match.end():]
body = pytify(body)
ok = 0
stmt = '%s = %s\n' % (name, body.strip())
try:
exec(stmt, env)
except:
sys.stderr.write('Skipping: %s' % stmt)
else:
outfp.write(stmt)
match = p_macro.match(line)
if match:
macro, arg = match.group(1, 2)
body = line[match.end():]
body = pytify(body)
stmt = 'def %s(%s): return %s\n' % (macro, arg, body)
try:
exec(stmt, env)
except:
sys.stderr.write('Skipping: %s' % stmt)
else:
outfp.write(stmt)
match = p_include.match(line)
if match:
regs = match.regs
a, b = regs[1]
filename = line[a:b]
if filename in importable:
outfp.write('from %s import *\n' % importable[filename])
elif filename not in filedict:
filedict[filename] = None
inclfp = None
for dir in searchdirs:
try:
inclfp = open(dir + '/' + filename)
break
except IOError:
pass
if inclfp:
outfp.write(
'\n# Included from %s\n' % filename)
process(inclfp, outfp, env)
else:
sys.stderr.write('Warning - could not find file %s\n' %
filename)
if __name__ == '__main__':
main()
| apache-2.0 |
maloL/nao-fsm | camstream.py | 1 | 6230 | # camera streaming algorithm showing state of FSM
import time
import cv2 as opencv
import numpy as np
from naoqi import ALProxy, ALBroker, ALModule
from vision_definitions import kVGA, kBGRColorSpace
import ConfigParser, argparse
# function for getting video stream from nao camera
def nao_image_getter(alvideoproxy, video):
alimg = alvideoproxy.getImageRemote(video)
imgheader = opencv.cv.CreateImageHeader((alimg[0], alimg[1]), opencv.cv.IPL_DEPTH_8U, 3)
opencv.cv.SetData(imgheader, alimg[6])
img = np.asarray(imgheader[:, :])
return img
if __name__ == '__main__':
# initializing proxies and other required parameters
IP = "192.168.1.105"
PORT = 9559
myBroker = ALBroker("myBroker", "0.0.0.0", 0, IP, PORT)
######################################################################
#opencv.namedWindow("Robot camera feed")
# get sample image to detect size
alvideoproxy = ALProxy("ALVideoDevice", IP, PORT)
video = alvideoproxy.subscribeCamera("video", 1, kVGA, kBGRColorSpace, 30)
cfile = "fsm_state.ini"
config = ConfigParser.ConfigParser()
tts = ALProxy('ALTextToSpeech', myBroker)
#######################
motionproxy=ALProxy('ALMotion', myBroker)
motionproxy.killAll()
behaveproxy = ALProxy('ALBehaviorManager', myBroker)
postureproxy = ALProxy('ALRobotPosture', myBroker)
navigationProxy = ALProxy('ALNavigation', myBroker)
sound = ALProxy('ALAudioDevice', myBroker)
##########################
memory = ALProxy('ALMemory', myBroker)
camProxy = ALProxy("ALVideoDevice", IP, PORT)
tts = ALProxy('ALTextToSpeech', myBroker)
####################3
motionproxy.setAngles('HeadPitch', 0, 0.4)
time.sleep(0.5)
motionproxy.setAngles('HeadYaw', 0, 0.2)
time.sleep(0.5)
#############################
try:
image_position = np.zeros(shape=2)
pos_vec = np.zeros(shape=2)
i = 0
#log = open(filename + ".txt", "w") ####################################################################################
estimation = np.zeros(shape=(1, 2))
# while loop where tracking is executed
i = 0
#global ObjectTracker
#memory.insertData('ObjectGrabber', int(0))
#ObjectTracker.gestureProxy.stopTracker()
#time.sleep(2)
reset = False
while True:
config.read(cfile)
image = nao_image_getter(alvideoproxy, video)
if config.has_section("State info"):
state = config.get("State info", "state")
end = config.get("State info", "end")
start = config.get("State info", "start_tracking")
pointx = config.get("State info", "pix_x")
pointy = config.get("State info", "pix_y")
plotx = int(round(float(pointx)))
ploty = int(round(float(pointy)))
if state == "Initial":
opencv.putText(image, "Initial", (10, 70), opencv.FONT_HERSHEY_SIMPLEX, 3, (0, 255, 0), 5)
opencv.imwrite("Slike/init.png", image)
elif state == "Searching":
opencv.putText(image, "Searching", (10, 70), opencv.FONT_HERSHEY_SIMPLEX, 3, (0, 255, 0), 5)
opencv.imwrite("Slike/search.png", image)
elif state == "Image processing":
opencv.putText(image, "Image", (10, 70), opencv.FONT_HERSHEY_SIMPLEX, 3, (0, 255, 0), 5)
opencv.putText(image, "processing", (10, 140), opencv.FONT_HERSHEY_SIMPLEX, 3, (0, 255, 0), 5)
opencv.imwrite("Slike/process.png", image)
if plotx != 0 or ploty != 0:
#opencv.circle(image, (ploty + 70, plotx), 7, (0, 255, 0), -1)
#time.sleep(0.5)
#opencv.putText(image, "Grab point", (ploty - 70, plotx + 60), opencv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
#opencv.circle(image, (ploty + 70, plotx), 63, (0, 0, 255), -1)
#opencv.ellipse(image, (ploty + 70, plotx), (30, 30), -180, 0, 360, (0, 0, 255), 2)
#time.sleep(2)
opencv.circle(image, (ploty, plotx + 50), 7, (0, 255, 0), -1)
time.sleep(0.5)
opencv.putText(image, "Grab point", (ploty - 70, plotx + 110), opencv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
#opencv.circle(image, (ploty + 70, plotx), 63, (0, 0, 255), -1)
opencv.ellipse(image, (ploty, plotx + 50), (30, 30), -180, 0, 360, (0, 0, 255), 2)
time.sleep(2)
opencv.imwrite("Slike/processres.png", image)
elif state == "Object manipulation":
opencv.putText(image, "Object", (10, 70), opencv.FONT_HERSHEY_SIMPLEX, 3, (0, 255, 0), 5)
opencv.putText(image, "manipulation", (10, 140), opencv.FONT_HERSHEY_SIMPLEX, 3, (0, 255, 0), 5)
opencv.imwrite("Slike/manip.png", image)
elif state == "Object tracking":
opencv.putText(image, "Object", (10, 70), opencv.FONT_HERSHEY_SIMPLEX, 3, (0, 255, 0), 5)
opencv.putText(image, "tracking", (10, 140), opencv.FONT_HERSHEY_SIMPLEX, 3, (0, 255, 0), 5)
opencv.imwrite("Slike/track.png", image)
myBroker.shutdown()
time.sleep(20)
myBroker = ALBroker("myBroker", "0.0.0.0", 0, IP, PORT)
#opencv.putText(image, state, (10, 70), opencv.FONT_HERSHEY_SIMPLEX, 3, (0, 255, 0), 5)
#opencv.putText(image, state, org, fontFace, fontScale, color[, thickness[, lineType[, bottomLeftOrigin]]])
#opencv.putText(image, state, (70, 70), opencv.FONT_HERSHEY_SIMPLEX, 2.0, (0, 0, 255))
opencv.imshow("Robot camera feed", image)
if opencv.waitKey(10) == 27:
break
finally:
time.sleep(1)
alvideoproxy.unsubscribe(video)
opencv.destroyAllWindows()
time.sleep(1.0)
myBroker.shutdown()
| lgpl-3.0 |
ejucovy/reportlab | docs/userguide/ch5_paragraphs.py | 5 | 17668 | #Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/docs/userguide/ch5_paragraphs.py
from tools.docco.rl_doc_utils import *
#begin chapter oon paragraphs
heading1("Paragraphs")
disc("""
The $reportlab.platypus.Paragraph$ class is one of the most useful of the Platypus $Flowables$;
it can format fairly arbitrary text and provides for inline font style and colour changes using
an XML style markup. The overall shape of the formatted text can be justified, right or left ragged
or centered. The XML markup can even be used to insert greek characters or to do subscripts.
""")
disc("""The following text creates an instance of the $Paragraph$ class:""")
eg("""Paragraph(text, style, bulletText=None)""")
disc("""The $text$ argument contains the text of the
paragraph; excess white space is removed from the text at the ends and internally after
linefeeds. This allows easy use of indented triple quoted text in <b>Python</b> scripts.
The $bulletText$ argument provides the text of a default bullet for the paragraph.
The font and other properties for the paragraph text and bullet are set using the style argument.
""")
disc("""
The $style$ argument should be an instance of class $ParagraphStyle$ obtained typically
using""")
eg("""
from reportlab.lib.styles import ParagraphStyle
""")
disc("""
this container class provides for the setting of multiple default paragraph attributes
in a structured way. The styles are arranged in a dictionary style object called a $stylesheet$
which allows for the styles to be accessed as $stylesheet['BodyText']$. A sample style
sheet is provided.
""")
eg("""
from reportlab.lib.styles import getSampleStyleSheet
stylesheet=getSampleStyleSheet()
normalStyle = stylesheet['Normal']
""")
disc("""
The options which can be set for a $Paragraph$ can be seen from the $ParagraphStyle$ defaults.
""")
heading4("$class ParagraphStyle$")
eg("""
class ParagraphStyle(PropertySet):
defaults = {
'fontName':'Times-Roman',
'fontSize':10,
'leading':12,
'leftIndent':0,
'rightIndent':0,
'firstLineIndent':0,
'alignment':TA_LEFT,
'spaceBefore':0,
'spaceAfter':0,
'bulletFontName':'Times-Roman',
'bulletFontSize':10,
'bulletIndent':0,
'textColor': black,
'backColor':None,
'wordWrap':None,
'borderWidth': 0,
'borderPadding': 0,
'borderColor': None,
'borderRadius': None,
'allowWidows': 1,
'allowOrphans': 0,
}
""")
heading2("Using Paragraph Styles")
#this will be used in the ParaBox demos.
sample = """You are hereby charged that on the 28th day of May, 1970, you did
willfully, unlawfully, and with malice of forethought, publish an
alleged English-Hungarian phrase book with intent to cause a breach
of the peace. How do you plead?"""
disc("""The $Paragraph$ and $ParagraphStyle$ classes together
handle most common formatting needs. The following examples
draw paragraphs in various styles, and add a bounding box
so that you can see exactly what space is taken up.""")
s1 = ParagraphStyle('Normal')
parabox(sample, s1, 'The default $ParagraphStyle$')
disc("""The two attributes $spaceBefore$ and $spaceAfter$ do what they
say, except at the top or bottom of a frame. At the top of a frame,
$spaceBefore$ is ignored, and at the bottom, $spaceAfter$ is ignored.
This means that you could specify that a 'Heading2' style had two
inches of space before when it occurs in mid-page, but will not
get acres of whitespace at the top of a page. These two attributes
should be thought of as 'requests' to the Frame and are not part
of the space occupied by the Paragraph itself.""")
disc("""The $fontSize$ and $fontName$ tags are obvious, but it is
important to set the $leading$. This is the spacing between
adjacent lines of text; a good rule of thumb is to make this
20% larger than the point size. To get double-spaced text,
use a high $leading$. If you set $autoLeading$(default $"off"$) to $"min"$(use observed leading even if smaller than specified) or $"max"$(use the larger of observed and specified) then an attempt is made to determine the leading
on a line by line basis. This may be useful if the lines contain different font sizes etc.""")
disc("""The figure below shows space before and after and an
increased leading:""")
parabox(sample,
ParagraphStyle('Spaced',
spaceBefore=6,
spaceAfter=6,
leading=16),
'Space before and after and increased leading'
)
disc("""The attribute $borderPadding$ adjusts the padding between the paragraph and the border of its background.
This can either be a single value or a tuple containing 2 to 4 values.
These values are applied the same way as in Cascading Style Sheets (CSS).
If a single value is given, that value is applied to all four sides.
If more than one value is given, they are applied in clockwise order to the sides starting at the top.
If two or three values are given, the missing values are taken from the opposite side(s).
Note that in the following example the yellow box is drawn by the paragraph itself.""")
parabox(sample,
ParagraphStyle('padded',
borderPadding=(7, 2, 20),
borderColor='#000000',
borderWidth=1,
backColor='#FFFF00'),
'Variable padding'
)
disc("""The $leftIndent$ and $rightIndent$ attributes do exactly
what you would expect; $firstLineIndent$ is added to the $leftIndent$ of the
first line. If you want a straight left edge, remember
to set $firstLineIndent$ equal to 0.""")
parabox(sample,
ParagraphStyle('indented',
firstLineIndent=+24,
leftIndent=24,
rightIndent=24),
'one third inch indents at left and right, two thirds on first line'
)
disc("""Setting $firstLineIndent$ equal to a negative number, $leftIndent$
much higher, and using a
different font (we'll show you how later!) can give you a
definition list:.""")
parabox('<b><i>Judge Pickles: </i></b>' + sample,
ParagraphStyle('dl',
leftIndent=36),
'Definition Lists'
)
disc("""There are four possible values of $alignment$, defined as
constants in the module <i>reportlab.lib.enums</i>. These are
TA_LEFT, TA_CENTER or TA_CENTRE, TA_RIGHT and
TA_JUSTIFY, with values of 0, 1, 2 and 4 respectively. These
do exactly what you would expect.""")
disc("""Set $wordWrap$ to $'CJK'$ to get Asian language linewrapping. For normal western text you can change the way
the line breaking algorithm handles <i>widows</i> and <i>orphans</i> with the $allowWidows$ and $allowOrphans$ values.
Both should normally be set to $0$, but for historical reasons we have allowed <i>widows</i>.
The default color of the text can be set with $textColor$ and the paragraph background
colour can be set with $backColor$. The paragraph's border properties may be changed using
$borderWidth$, $borderPadding$, $borderColor$ and $borderRadius$.""")
heading2("Paragraph XML Markup Tags")
disc("""XML markup can be used to modify or specify the
overall paragraph style, and also to specify intra-
paragraph markup.""")
heading3("The outermost < para > tag")
disc("""
The paragraph text may optionally be surrounded by
<para attributes....>
</para>
tags. The attributes if any of the opening <para> tag affect the style that is used
with the $Paragraph$ $text$ and/or $bulletText$.
""")
disc(" ")
from reportlab.platypus.paraparser import _addAttributeNames, _paraAttrMap, _bulletAttrMap
def getAttrs(A):
_addAttributeNames(A)
S={}
for k, v in A.items():
a = v[0]
if a not in S:
S[a] = k
else:
S[a] = "%s, %s" %(S[a],k)
K = S.keys()
K.sort()
D=[('Attribute','Synonyms')]
for k in K:
D.append((k,S[k]))
cols=2*[None]
rows=len(D)*[None]
return D,cols,rows
t=Table(*getAttrs(_paraAttrMap))
t.setStyle(TableStyle([
('FONT',(0,0),(-1,1),'Times-Bold',10,12),
('FONT',(0,1),(-1,-1),'Courier',8,8),
('VALIGN',(0,0),(-1,-1),'MIDDLE'),
('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
]))
getStory().append(t)
caption("""Table <seq template="%(Chapter)s-%(Table+)s"/> - Synonyms for style attributes""")
disc("""Some useful synonyms have been provided for our Python attribute
names, including lowercase versions, and the equivalent properties
from the HTML standard where they exist. These additions make
it much easier to build XML-printing applications, since
much intra-paragraph markup may not need translating. The
table below shows the allowed attributes and synonyms in the
outermost paragraph tag.""")
heading2("Intra-paragraph markup")
disc("""'<![CDATA[Within each paragraph, we use a basic set of XML tags
to provide markup. The most basic of these are bold (<b>...</b>),
italic (<i>...</i>) and underline (<u>...</u>).
Other tags which are allowed are strong (<strong>...</strong>), and strike through (<strike>...</strike>). The <link> and <a> tags
may be used to refer to URIs, documents or bookmarks in the current document. The a variant of the <a> tag can be used to
mark a position in a document.
A break (<br/>) tag is also allowed.]]>
""")
parabox2("""<b>You are hereby charged</b> that on the 28th day of May, 1970, you did
willfully, unlawfully, and <i>with malice of forethought</i>, publish an
alleged English-Hungarian phrase book with intent to cause a breach
of the peace. <u>How do you plead</u>?""", "Simple bold and italic tags")
parabox2("""This <a href="#MYANCHOR" color="blue">is a link to</a> an anchor tag ie <a name="MYANCHOR"/><font color="green">here</font>.
This <link href="#MYANCHOR" color="blue" fontName="Helvetica">is another link to</link> the same anchor tag.""",
"anchors and links")
disc("""The <b>link</b> tag can be used as a reference, but
not as an anchor. The a and link hyperlink tags have additional attributes <i>fontName</i>,
<i>fontSize</i>, <i>color</i> & <i>backColor</i> attributes.
The hyperlink reference can have a scheme of <b>http:</b><i>(external webpage)</i>, <b>pdf:</b><i>(different pdf document)</i> or
<b>document:</b><i>(same pdf document)</i>; a missing scheme is treated as <b>document</b> as is the case when the reference starts with # (in which case the anchor should omit it). Any other scheme is treated as some kind of URI.
""")
parabox2("""<strong>You are hereby charged</strong> that on the 28th day of May, 1970, you did
willfully, unlawfully, <strike>and with malice of forethought</strike>, <br/>publish an
alleged English-Hungarian phrase book with intent to cause a breach
of the peace. How do you plead?""", "Strong, strike, and break tags")
heading3("The $<font>$ tag")
disc("""The $<font>$ tag can be used to change the font name,
size and text color for any substring within the paragraph.
Legal attributes are $size$, $face$, $name$ (which is the same as $face$),
$color$, and $fg$ (which is the same as $color$). The $name$ is
the font family name, without any 'bold' or 'italic' suffixes.
Colors may be
HTML color names or a hex string encoded in a variety of ways;
see ^reportlab.lib.colors^ for the formats allowed.""")
parabox2("""<font face="times" color="red">You are hereby charged</font> that on the 28th day of May, 1970, you did
willfully, unlawfully, and <font size=14>with malice of forethought</font>,
publish an
alleged English-Hungarian phrase book with intent to cause a breach
of the peace. How do you plead?""", "The $font$ tag")
heading3("Superscripts and Subscripts")
disc("""Superscripts and subscripts are supported with the
<![CDATA[<super> and <sub> tags, which work exactly
as you might expect. In addition, most greek letters
can be accessed by using the <greek></greek>
tag, or with mathML entity names.]]>""")
##parabox2("""<greek>epsilon</greek><super><greek>iota</greek>
##<greek>pi</greek></super> = -1""", "Greek letters and subscripts")
parabox2("""Equation (α): <greek>e</greek> <super><greek>ip</greek></super> = -1""",
"Greek letters and superscripts")
heading3("Inline Images")
disc("""We can embed images in a paragraph with the
<img/> tag which has attributes $src$, $width$, $height$ whose meanings are obvious. The $valign$ attribute may be set to a css like value from
"baseline", "sub", "super", "top", "text-top", "middle", "bottom", "text-bottom"; the value may also be a numeric percentage or an absolute value.
""")
parabox2("""<para autoLeading="off" fontSize=12>This <img/> <img src="../images/testimg.gif" valign="top"/> is aligned <b>top</b>.<br/><br/>
This <img/> <img src="../images/testimg.gif" valign="bottom"/> is aligned <b>bottom</b>.<br/><br/>
This <img/> <img src="../images/testimg.gif" valign="middle"/> is aligned <b>middle</b>.<br/><br/>
This <img/> <img src="../images/testimg.gif" valign="-4"/> is aligned <b>-4</b>.<br/><br/>
This <img/> <img src="../images/testimg.gif" valign="+4"/> is aligned <b>+4</b>.<br/><br/>
This <img/> <img src="../images/testimg.gif" width="10"/> has width <b>10</b>.<br/><br/>
</para>""","Inline images")
heading3("Numbering Paragraphs and Lists")
disc("""The $<seq>$ tag provides comprehensive support
for numbering lists, chapter headings and so on. It acts as
an interface to the $Sequencer$ class in ^reportlab.lib.sequencer^.
These are used to number headings and figures throughout this
document.
You may create as many separate 'counters' as you wish, accessed
with the $id$ attribute; these will be incremented by one each
time they are accessed. The $seqreset$ tag resets a counter.
If you want it to resume from a number other than 1, use
the syntax <seqreset id="mycounter" base="42">.
Let's have a go:""")
parabox2("""<seq id="spam"/>, <seq id="spam"/>, <seq id="spam"/>.
Reset<seqreset id="spam"/>. <seq id="spam"/>, <seq id="spam"/>,
<seq id="spam"/>.""", "Basic sequences")
disc("""You can save specifying an ID by designating a counter ID
as the <i>default</i> using the <seqdefault id="Counter">
tag; it will then be used whenever a counter ID
is not specified. This saves some typing, especially when
doing multi-level lists; you just change counter ID when
stepping in or out a level.""")
parabox2("""<seqdefault id="spam"/>Continued... <seq/>,
<seq/>, <seq/>, <seq/>, <seq/>, <seq/>, <seq/>.""",
"The default sequence")
disc("""Finally, one can access multi-level sequences using
a variation of Python string formatting and the $template$
attribute in a <seq> tags. This is used to do the
captions in all of the figures, as well as the level two
headings. The substring $%(counter)s$ extracts the current
value of a counter without incrementing it; appending a
plus sign as in $%(counter)s$ increments the counter.
The figure captions use a pattern like the one below:""")
parabox2("""Figure <seq template="%(Chapter)s-%(FigureNo+)s"/> - Multi-level templates""",
"Multi-level templates")
disc("""We cheated a little - the real document used 'Figure',
but the text above uses 'FigureNo' - otherwise we would have
messed up our numbering!""")
heading2("Bullets and Paragraph Numbering")
disc("""In addition to the three indent properties, some other
parameters are needed to correctly handle bulleted and numbered
lists. We discuss this here because you have now seen how
to handle numbering. A paragraph may have an optional
^bulletText^ argument passed to its constructor; alternatively,
bullet text may be placed in a $<![CDATA[<bullet>..</bullet>]]>$
tag at its head. This text will be drawn on the first line of
the paragraph, with its x origin determined by the $bulletIndent$
attribute of the style, and in the font given in the
$bulletFontName$ attribute. The "bullet" may be a single character
such as (doh!) a bullet, or a fragment of text such as a number in
some numbering sequence, or even a short title as used in a definition
list. Fonts may offer various bullet
characters but we suggest first trying the Unicode bullet ($•$), which may
be written as $&bull;$, $&#x2022;$ or (in utf8) $\\xe2\\x80\\xa2$):""")
t=Table(*getAttrs(_bulletAttrMap))
t.setStyle([
('FONT',(0,0),(-1,1),'Times-Bold',10,12),
('FONT',(0,1),(-1,-1),'Courier',8,8),
('VALIGN',(0,0),(-1,-1),'MIDDLE'),
('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
])
getStory().append(t)
caption("""Table <seq template="%(Chapter)s-%(Table+)s"/> - <bullet> attributes & synonyms""")
disc("""The <bullet> tag is only allowed once in a given paragraph and its use
overrides the implied bullet style and ^bulletText^ specified in the ^Paragraph^
creation.
""")
parabox("""<bullet>\xe2\x80\xa2</bullet>this is a bullet point. Spam
spam spam spam spam spam spam spam spam spam spam spam
spam spam spam spam spam spam spam spam spam spam """,
styleSheet['Bullet'],
'Basic use of bullet points')
disc("""Exactly the same technique is used for numbers,
except that a sequence tag is used. It is also possible
to put a multi-character string in the bullet; with a deep
indent and bold bullet font, you can make a compact
definition list.""")
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.