repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
xindus40223115/2015cd_midterm | static/Brython3.1.0-20150301-090019/Lib/sre_constants.py | 692 | 7172 | #
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20031017
#MAXREPEAT = 2147483648
#from _sre import MAXREPEAT
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
pass
# operators
FAILURE = "failure"
SUCCESS = "success"
ANY = "any"
ANY_ALL = "any_all"
ASSERT = "assert"
ASSERT_NOT = "assert_not"
AT = "at"
BIGCHARSET = "bigcharset"
BRANCH = "branch"
CALL = "call"
CATEGORY = "category"
CHARSET = "charset"
GROUPREF = "groupref"
GROUPREF_IGNORE = "groupref_ignore"
GROUPREF_EXISTS = "groupref_exists"
IN = "in"
IN_IGNORE = "in_ignore"
INFO = "info"
JUMP = "jump"
LITERAL = "literal"
LITERAL_IGNORE = "literal_ignore"
MARK = "mark"
MAX_REPEAT = "max_repeat"
MAX_UNTIL = "max_until"
MIN_REPEAT = "min_repeat"
MIN_UNTIL = "min_until"
NEGATE = "negate"
NOT_LITERAL = "not_literal"
NOT_LITERAL_IGNORE = "not_literal_ignore"
RANGE = "range"
REPEAT = "repeat"
REPEAT_ONE = "repeat_one"
SUBPATTERN = "subpattern"
MIN_REPEAT_ONE = "min_repeat_one"
# positions
AT_BEGINNING = "at_beginning"
AT_BEGINNING_LINE = "at_beginning_line"
AT_BEGINNING_STRING = "at_beginning_string"
AT_BOUNDARY = "at_boundary"
AT_NON_BOUNDARY = "at_non_boundary"
AT_END = "at_end"
AT_END_LINE = "at_end_line"
AT_END_STRING = "at_end_string"
AT_LOC_BOUNDARY = "at_loc_boundary"
AT_LOC_NON_BOUNDARY = "at_loc_non_boundary"
AT_UNI_BOUNDARY = "at_uni_boundary"
AT_UNI_NON_BOUNDARY = "at_uni_non_boundary"
# categories
CATEGORY_DIGIT = "category_digit"
CATEGORY_NOT_DIGIT = "category_not_digit"
CATEGORY_SPACE = "category_space"
CATEGORY_NOT_SPACE = "category_not_space"
CATEGORY_WORD = "category_word"
CATEGORY_NOT_WORD = "category_not_word"
CATEGORY_LINEBREAK = "category_linebreak"
CATEGORY_NOT_LINEBREAK = "category_not_linebreak"
CATEGORY_LOC_WORD = "category_loc_word"
CATEGORY_LOC_NOT_WORD = "category_loc_not_word"
CATEGORY_UNI_DIGIT = "category_uni_digit"
CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit"
CATEGORY_UNI_SPACE = "category_uni_space"
CATEGORY_UNI_NOT_SPACE = "category_uni_not_space"
CATEGORY_UNI_WORD = "category_uni_word"
CATEGORY_UNI_NOT_WORD = "category_uni_not_word"
CATEGORY_UNI_LINEBREAK = "category_uni_linebreak"
CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak"
OPCODES = [
# failure=0 success=1 (just because it looks better that way :-)
FAILURE, SUCCESS,
ANY, ANY_ALL,
ASSERT, ASSERT_NOT,
AT,
BRANCH,
CALL,
CATEGORY,
CHARSET, BIGCHARSET,
GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE,
IN, IN_IGNORE,
INFO,
JUMP,
LITERAL, LITERAL_IGNORE,
MARK,
MAX_UNTIL,
MIN_UNTIL,
NOT_LITERAL, NOT_LITERAL_IGNORE,
NEGATE,
RANGE,
REPEAT,
REPEAT_ONE,
SUBPATTERN,
MIN_REPEAT_ONE
]
ATCODES = [
AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY,
AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING,
AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY,
AT_UNI_NON_BOUNDARY
]
CHCODES = [
CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE,
CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD,
CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD,
CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT,
CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD,
CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK,
CATEGORY_UNI_NOT_LINEBREAK
]
def makedict(list):
d = {}
i = 0
for item in list:
d[item] = i
i = i + 1
return d
OPCODES = makedict(OPCODES)
ATCODES = makedict(ATCODES)
CHCODES = makedict(CHCODES)
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode "locale"
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
SRE_FLAG_ASCII = 256 # use ascii "locale"
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
if __name__ == "__main__":
def dump(f, d, prefix):
items = sorted(d.items(), key=lambda a: a[1])
for k, v in items:
f.write("#define %s_%s %s\n" % (prefix, k.upper(), v))
f = open("sre_constants.h", "w")
f.write("""\
/*
* Secret Labs' Regular Expression Engine
*
* regular expression matching engine
*
* NOTE: This file is generated by sre_constants.py. If you need
* to change anything in here, edit sre_constants.py and run it.
*
* Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
*
* See the _sre.c file for information on usage and redistribution.
*/
""")
f.write("#define SRE_MAGIC %d\n" % MAGIC)
dump(f, OPCODES, "SRE_OP")
dump(f, ATCODES, "SRE")
dump(f, CHCODES, "SRE")
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
f.close()
print("done")
| gpl-3.0 |
epssy/hue | apps/oozie/src/oozie/migrations/0022_auto__chg_field_mapreduce_node_ptr__chg_field_start_node_ptr.py | 37 | 25559 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Mapreduce.node_ptr'
db.alter_column('oozie_mapreduce', 'node_ptr_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['oozie.Node'], unique=True, primary_key=True))
# Changing field 'Start.node_ptr'
db.alter_column('oozie_start', 'node_ptr_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['oozie.Node'], unique=True, primary_key=True))
def backwards(self, orm):
# Changing field 'Mapreduce.node_ptr'
db.alter_column('oozie_mapreduce', 'node_ptr_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['oozie.Node'], unique=True))
# Changing field 'Start.node_ptr'
db.alter_column('oozie_start', 'node_ptr_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['oozie.Node'], unique=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 8, 28, 16, 10, 12, 534880)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 8, 28, 16, 10, 12, 534819)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'oozie.bundle': {
'Meta': {'object_name': 'Bundle', '_ormbases': ['oozie.Job']},
'coordinators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['oozie.Coordinator']", 'through': "orm['oozie.BundledCoordinator']", 'symmetrical': 'False'}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'kick_off_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 8, 28, 16, 10, 12, 429841)'})
},
'oozie.bundledcoordinator': {
'Meta': {'object_name': 'BundledCoordinator'},
'bundle': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Bundle']"}),
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameters': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''})
},
'oozie.coordinator': {
'Meta': {'object_name': 'Coordinator', '_ormbases': ['oozie.Job']},
'concurrency': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 8, 31, 16, 10, 12, 427644)'}),
'execution': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 8, 28, 16, 10, 12, 427612)'}),
'throttle': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timeout': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']", 'null': 'True'})
},
'oozie.datainput': {
'Meta': {'object_name': 'DataInput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataoutput': {
'Meta': {'object_name': 'DataOutput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataset': {
'Meta': {'object_name': 'Dataset'},
'advanced_end_instance': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '128', 'blank': 'True'}),
'advanced_start_instance': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '128'}),
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'done_flag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_choice': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 8, 28, 16, 10, 12, 428249)'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'uri': ('django.db.models.fields.CharField', [], {'default': "'/data/${YEAR}${MONTH}${DAY}'", 'max_length': '1024'})
},
'oozie.decision': {
'Meta': {'object_name': 'Decision'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.decisionend': {
'Meta': {'object_name': 'DecisionEnd'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.distcp': {
'Meta': {'object_name': 'DistCp'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.email': {
'Meta': {'object_name': 'Email'},
'body': ('django.db.models.fields.TextField', [], {'default': "''"}),
'cc': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'subject': ('django.db.models.fields.TextField', [], {'default': "''"}),
'to': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'oozie.end': {
'Meta': {'object_name': 'End'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fork': {
'Meta': {'object_name': 'Fork'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fs': {
'Meta': {'object_name': 'Fs'},
'chmods': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'deletes': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'mkdirs': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'moves': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'touchzs': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'})
},
'oozie.generic': {
'Meta': {'object_name': 'Generic'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'oozie.history': {
'Meta': {'object_name': 'History'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Job']"}),
'oozie_job_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'properties': ('django.db.models.fields.TextField', [], {}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'submitter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'oozie.hive': {
'Meta': {'object_name': 'Hive'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.hive.defaults","value":"hive-site.xml"}]\''}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.java': {
'Meta': {'object_name': 'Java'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'args': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'java_opts': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'main_class': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.job': {
'Meta': {'object_name': 'Job'},
'deployment_dir': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_shared': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'parameters': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''}),
'schema_version': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'oozie.join': {
'Meta': {'object_name': 'Join'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.kill': {
'Meta': {'object_name': 'Kill'},
'message': ('django.db.models.fields.CharField', [], {'default': "'Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]'", 'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.link': {
'Meta': {'object_name': 'Link'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_node'", 'to': "orm['oozie.Node']"}),
'comment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child_node'", 'to': "orm['oozie.Node']"})
},
'oozie.mapreduce': {
'Meta': {'object_name': 'Mapreduce'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.node': {
'Meta': {'object_name': 'Node'},
'children': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'parents'", 'symmetrical': 'False', 'through': "orm['oozie.Link']", 'to': "orm['oozie.Node']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'node_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.pig': {
'Meta': {'object_name': 'Pig'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.shell': {
'Meta': {'object_name': 'Shell'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.sqoop': {
'Meta': {'object_name': 'Sqoop'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
'oozie.ssh': {
'Meta': {'object_name': 'Ssh'},
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'oozie.start': {
'Meta': {'object_name': 'Start'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.streaming': {
'Meta': {'object_name': 'Streaming'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'mapper': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'reducer': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'oozie.subworkflow': {
'Meta': {'object_name': 'SubWorkflow'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'propagate_configuration': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'sub_workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.workflow': {
'Meta': {'object_name': 'Workflow', '_ormbases': ['oozie.Job']},
'end': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'end_workflow'", 'null': 'True', 'to': "orm['oozie.End']"}),
'is_single': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'start_workflow'", 'null': 'True', 'to': "orm['oozie.Start']"})
}
}
complete_apps = ['oozie']
| apache-2.0 |
daxgirl/2Stroke-kernel-n910f | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
nip3o/open-lighting | python/examples/ola_recv_dmx.py | 3 | 1735 | #!/usr/bin/python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# ola_recv_dmx.py
# Copyright (C) 2005-2009 Simon Newton
"""Receive DMX data."""
__author__ = 'nomis52@gmail.com (Simon Newton)'
import getopt
import textwrap
import sys
from ola.ClientWrapper import ClientWrapper
def NewData(data):
print data
def Usage():
print textwrap.dedent("""
Usage: ola_recv_dmx.py --universe <universe>
Display the DXM512 data for the universe.
-h, --help Display this help message and exit.
-u, --universe <universe> Universe number.""")
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hu:", ["help", "universe="])
except getopt.GetoptError, err:
print str(err)
Usage()
sys.exit(2)
universe = 1
for o, a in opts:
if o in ("-h", "--help"):
Usage()
sys.exit()
elif o in ("-u", "--universe"):
universe = int(a)
wrapper = ClientWrapper()
client = wrapper.Client()
client.RegisterUniverse(universe, client.REGISTER, NewData)
wrapper.Run()
if __name__ == "__main__":
main()
| lgpl-2.1 |
valdecar/Murka | pyttsx/engine.py | 1 | 7206 | '''
Speech engine front-end.
Copyright (c) 2009, 2013 Peter Parente
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
from . import driver
import traceback
import weakref
class Engine(object):
'''
@ivar proxy: Proxy to a driver implementation
@type proxy: L{DriverProxy}
@ivar _connects: Array of subscriptions
@type _connects: list
@ivar _inLoop: Running an event loop or not
@type _inLoop: bool
@ivar _driverLoop: Using a driver event loop or not
@type _driverLoop: bool
@ivar _debug: Print exceptions or not
@type _debug: bool
'''
def __init__(self, driverName=None, debug=False):
'''
Constructs a new TTS engine instance.
@param driverName: Name of the platform specific driver to use. If
None, selects the default driver for the operating system.
@type: str
@param debug: Debugging output enabled or not
@type debug: bool
'''
self.proxy = driver.DriverProxy(weakref.proxy(self), driverName, debug)
# initialize other vars
self._connects = {}
self._inLoop = False
self._driverLoop = True
self._debug = debug
def _notify(self, topic, **kwargs):
'''
Invokes callbacks for an event topic.
@param topic: String event name
@type topic: str
@param kwargs: Values associated with the event
@type kwargs: dict
'''
for cb in self._connects.get(topic, []):
try:
cb(**kwargs)
except Exception as e:
if self._debug: traceback.print_exc()
def connect(self, topic, cb):
'''
Registers a callback for an event topic. Valid topics and their
associated values:
started-utterance: name=<str>
started-word: name=<str>, location=<int>, length=<int>
finished-utterance: name=<str>, completed=<bool>
error: name=<str>, exception=<exception>
@param topic: Event topic name
@type topic: str
@param cb: Callback function
@type cb: callable
@return: Token to use to unregister
@rtype: dict
'''
arr = self._connects.setdefault(topic, [])
arr.append(cb)
return {'topic' : topic, 'cb' : cb}
def disconnect(self, token):
'''
Unregisters a callback for an event topic.
@param token: Token of the callback to unregister
@type token: dict
'''
topic = token['topic']
try:
arr = self._connects[topic]
except KeyError:
return
arr.remove(token['cb'])
if len(arr) == 0:
del self._connects[topic]
def say(self, text, name=None):
'''
Adds an utterance to speak to the event queue.
@param text: Text to sepak
@type text: unicode
@param name: Name to associate with this utterance. Included in
notifications about this utterance.
@type name: str
'''
self.proxy.say(text, name)
def stop(self):
'''
Stops the current utterance and clears the event queue.
'''
self.proxy.stop()
def isBusy(self):
'''
@return: True if an utterance is currently being spoken, false if not
@rtype: bool
'''
return self.proxy.isBusy()
def getProperty(self, name):
'''
Gets the current value of a property. Valid names and values include:
voices: List of L{voice.Voice} objects supported by the driver
voice: String ID of the current voice
rate: Integer speech rate in words per minute
volume: Floating point volume of speech in the range [0.0, 1.0]
Numeric values outside the valid range supported by the driver are
clipped.
@param name: Name of the property to fetch
@type name: str
@return: Value associated with the property
@rtype: object
@raise KeyError: When the property name is unknown
'''
return self.proxy.getProperty(name)
def setProperty(self, name, value):
'''
Adds a property value to set to the event queue. Valid names and values
include:
voice: String ID of the voice
rate: Integer speech rate in words per minute
volume: Floating point volume of speech in the range [0.0, 1.0]
Numeric values outside the valid range supported by the driver are
clipped.
@param name: Name of the property to fetch
@type name: str
@param: Value to set for the property
@rtype: object
@raise KeyError: When the property name is unknown
'''
self.proxy.setProperty(name, value)
def runAndWait(self):
'''
Runs an event loop until all commands queued up until this method call
complete. Blocks during the event loop and returns when the queue is
cleared.
@raise RuntimeError: When the loop is already running
'''
if self._inLoop:
raise RuntimeError('run loop already started')
self._inLoop = True
self._driverLoop = True
self.proxy.runAndWait()
def startLoop(self, useDriverLoop=True):
'''
Starts an event loop to process queued commands and callbacks.
@param useDriverLoop: If True, uses the run loop provided by the driver
(the default). If False, assumes the caller will enter its own
run loop which will pump any events for the TTS engine properly.
@type useDriverLoop: bool
@raise RuntimeError: When the loop is already running
'''
if self._inLoop:
raise RuntimeError('run loop already started')
self._inLoop = True
self._driverLoop = useDriverLoop
self.proxy.startLoop(self._driverLoop)
def endLoop(self):
'''
Stops a running event loop.
@raise RuntimeError: When the loop is not running
'''
if not self._inLoop:
raise RuntimeError('run loop not started')
self.proxy.endLoop(self._driverLoop)
self._inLoop = False
def iterate(self):
'''
Must be called regularly when using an external event loop.
'''
if not self._inLoop:
raise RuntimeError('run loop not started')
elif self._driverLoop:
raise RuntimeError('iterate not valid in driver run loop')
self.proxy.iterate() | gpl-3.0 |
MRigal/mongoengine | tests/document/instance.py | 3 | 99399 | # -*- coding: utf-8 -*-
import sys
sys.path[0:0] = [""]
import bson
import os
import pickle
import unittest
import uuid
import weakref
from datetime import datetime
from bson import DBRef, ObjectId
from tests import fixtures
from tests.fixtures import (PickleEmbedded, PickleTest, PickleSignalsTest,
PickleDyanmicEmbedded, PickleDynamicTest)
from mongoengine import *
from mongoengine.errors import (NotRegistered, InvalidDocumentError,
InvalidQueryError, NotUniqueError,
FieldDoesNotExist, SaveConditionError)
from mongoengine.queryset import NULLIFY, Q
from mongoengine.connection import get_db
from mongoengine.base import get_document
from mongoengine.context_managers import switch_db, query_counter
from mongoengine import signals
TEST_IMAGE_PATH = os.path.join(os.path.dirname(__file__),
'../fields/mongoengine.png')
__all__ = ("InstanceTest",)
class InstanceTest(unittest.TestCase):
def setUp(self):
connect(db='mongoenginetest')
self.db = get_db()
class Job(EmbeddedDocument):
name = StringField()
years = IntField()
class Person(Document):
name = StringField()
age = IntField()
job = EmbeddedDocumentField(Job)
non_field = True
meta = {"allow_inheritance": True}
self.Person = Person
self.Job = Job
def tearDown(self):
for collection in self.db.collection_names():
if 'system.' in collection:
continue
self.db.drop_collection(collection)
def assertDbEqual(self, docs):
self.assertEqual(
list(self.Person._get_collection().find().sort("id")),
sorted(docs, key=lambda doc: doc["_id"]))
def assertHasInstance(self, field, instance):
self.assertTrue(hasattr(field, "_instance"))
self.assertTrue(field._instance is not None)
if isinstance(field._instance, weakref.ProxyType):
self.assertTrue(field._instance.__eq__(instance))
else:
self.assertEqual(field._instance, instance)
def test_capped_collection(self):
"""Ensure that capped collections work properly.
"""
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_documents': 10,
'max_size': 4096,
}
Log.drop_collection()
# Ensure that the collection handles up to its maximum
for _ in range(10):
Log().save()
self.assertEqual(Log.objects.count(), 10)
# Check that extra documents don't increase the size
Log().save()
self.assertEqual(Log.objects.count(), 10)
options = Log.objects._collection.options()
self.assertEqual(options['capped'], True)
self.assertEqual(options['max'], 10)
self.assertEqual(options['size'], 4096)
# Check that the document cannot be redefined with different options
def recreate_log_document():
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_documents': 11,
}
# Create the collection by accessing Document.objects
Log.objects
self.assertRaises(InvalidCollectionError, recreate_log_document)
Log.drop_collection()
def test_capped_collection_default(self):
"""Ensure that capped collections defaults work properly.
"""
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_documents': 10,
}
Log.drop_collection()
# Create a doc to create the collection
Log().save()
options = Log.objects._collection.options()
self.assertEqual(options['capped'], True)
self.assertEqual(options['max'], 10)
self.assertEqual(options['size'], 10 * 2**20)
# Check that the document with default value can be recreated
def recreate_log_document():
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_documents': 10,
}
# Create the collection by accessing Document.objects
Log.objects
recreate_log_document()
Log.drop_collection()
def test_capped_collection_no_max_size_problems(self):
"""Ensure that capped collections with odd max_size work properly.
MongoDB rounds up max_size to next multiple of 256, recreating a doc
with the same spec failed in mongoengine <0.10
"""
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_size': 10000,
}
Log.drop_collection()
# Create a doc to create the collection
Log().save()
options = Log.objects._collection.options()
self.assertEqual(options['capped'], True)
self.assertTrue(options['size'] >= 10000)
# Check that the document with odd max_size value can be recreated
def recreate_log_document():
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_size': 10000,
}
# Create the collection by accessing Document.objects
Log.objects
recreate_log_document()
Log.drop_collection()
def test_repr(self):
"""Ensure that unicode representation works
"""
class Article(Document):
title = StringField()
def __unicode__(self):
return self.title
doc = Article(title=u'привет мир')
self.assertEqual('<Article: привет мир>', repr(doc))
def test_repr_none(self):
"""Ensure None values handled correctly
"""
class Article(Document):
title = StringField()
def __str__(self):
return None
doc = Article(title=u'привет мир')
self.assertEqual('<Article: None>', repr(doc))
def test_queryset_resurrects_dropped_collection(self):
self.Person.drop_collection()
self.assertEqual([], list(self.Person.objects()))
class Actor(self.Person):
pass
# Ensure works correctly with inhertited classes
Actor.objects()
self.Person.drop_collection()
self.assertEqual([], list(Actor.objects()))
def test_polymorphic_references(self):
"""Ensure that the correct subclasses are returned from a query when
using references / generic references
"""
class Animal(Document):
meta = {'allow_inheritance': True}
class Fish(Animal):
pass
class Mammal(Animal):
pass
class Dog(Mammal):
pass
class Human(Mammal):
pass
class Zoo(Document):
animals = ListField(ReferenceField(Animal))
Zoo.drop_collection()
Animal.drop_collection()
Animal().save()
Fish().save()
Mammal().save()
Dog().save()
Human().save()
# Save a reference to each animal
zoo = Zoo(animals=Animal.objects)
zoo.save()
zoo.reload()
classes = [a.__class__ for a in Zoo.objects.first().animals]
self.assertEqual(classes, [Animal, Fish, Mammal, Dog, Human])
Zoo.drop_collection()
class Zoo(Document):
animals = ListField(GenericReferenceField(Animal))
# Save a reference to each animal
zoo = Zoo(animals=Animal.objects)
zoo.save()
zoo.reload()
classes = [a.__class__ for a in Zoo.objects.first().animals]
self.assertEqual(classes, [Animal, Fish, Mammal, Dog, Human])
Zoo.drop_collection()
Animal.drop_collection()
def test_reference_inheritance(self):
class Stats(Document):
created = DateTimeField(default=datetime.now)
meta = {'allow_inheritance': False}
class CompareStats(Document):
generated = DateTimeField(default=datetime.now)
stats = ListField(ReferenceField(Stats))
Stats.drop_collection()
CompareStats.drop_collection()
list_stats = []
for i in xrange(10):
s = Stats()
s.save()
list_stats.append(s)
cmp_stats = CompareStats(stats=list_stats)
cmp_stats.save()
self.assertEqual(list_stats, CompareStats.objects.first().stats)
def test_db_field_load(self):
"""Ensure we load data correctly
"""
class Person(Document):
name = StringField(required=True)
_rank = StringField(required=False, db_field="rank")
@property
def rank(self):
return self._rank or "Private"
Person.drop_collection()
Person(name="Jack", _rank="Corporal").save()
Person(name="Fred").save()
self.assertEqual(Person.objects.get(name="Jack").rank, "Corporal")
self.assertEqual(Person.objects.get(name="Fred").rank, "Private")
def test_db_embedded_doc_field_load(self):
"""Ensure we load embedded document data correctly
"""
class Rank(EmbeddedDocument):
title = StringField(required=True)
class Person(Document):
name = StringField(required=True)
rank_ = EmbeddedDocumentField(Rank,
required=False,
db_field='rank')
@property
def rank(self):
if self.rank_ is None:
return "Private"
return self.rank_.title
Person.drop_collection()
Person(name="Jack", rank_=Rank(title="Corporal")).save()
Person(name="Fred").save()
self.assertEqual(Person.objects.get(name="Jack").rank, "Corporal")
self.assertEqual(Person.objects.get(name="Fred").rank, "Private")
def test_custom_id_field(self):
"""Ensure that documents may be created with custom primary keys.
"""
class User(Document):
username = StringField(primary_key=True)
name = StringField()
meta = {'allow_inheritance': True}
User.drop_collection()
self.assertEqual(User._fields['username'].db_field, '_id')
self.assertEqual(User._meta['id_field'], 'username')
def create_invalid_user():
User(name='test').save() # no primary key field
self.assertRaises(ValidationError, create_invalid_user)
def define_invalid_user():
class EmailUser(User):
email = StringField(primary_key=True)
self.assertRaises(ValueError, define_invalid_user)
class EmailUser(User):
email = StringField()
user = User(username='test', name='test user')
user.save()
user_obj = User.objects.first()
self.assertEqual(user_obj.id, 'test')
self.assertEqual(user_obj.pk, 'test')
user_son = User.objects._collection.find_one()
self.assertEqual(user_son['_id'], 'test')
self.assertTrue('username' not in user_son['_id'])
User.drop_collection()
user = User(pk='mongo', name='mongo user')
user.save()
user_obj = User.objects.first()
self.assertEqual(user_obj.id, 'mongo')
self.assertEqual(user_obj.pk, 'mongo')
user_son = User.objects._collection.find_one()
self.assertEqual(user_son['_id'], 'mongo')
self.assertTrue('username' not in user_son['_id'])
User.drop_collection()
def test_document_not_registered(self):
class Place(Document):
name = StringField()
meta = {'allow_inheritance': True}
class NicePlace(Place):
pass
Place.drop_collection()
Place(name="London").save()
NicePlace(name="Buckingham Palace").save()
# Mimic Place and NicePlace definitions being in a different file
# and the NicePlace model not being imported in at query time.
from mongoengine.base import _document_registry
del(_document_registry['Place.NicePlace'])
def query_without_importing_nice_place():
print Place.objects.all()
self.assertRaises(NotRegistered, query_without_importing_nice_place)
def test_document_registry_regressions(self):
class Location(Document):
name = StringField()
meta = {'allow_inheritance': True}
class Area(Location):
location = ReferenceField('Location', dbref=True)
Location.drop_collection()
self.assertEqual(Area, get_document("Area"))
self.assertEqual(Area, get_document("Location.Area"))
def test_creation(self):
"""Ensure that document may be created using keyword arguments.
"""
person = self.Person(name="Test User", age=30)
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 30)
def test_to_dbref(self):
"""Ensure that you can get a dbref of a document"""
person = self.Person(name="Test User", age=30)
self.assertRaises(OperationError, person.to_dbref)
person.save()
person.to_dbref()
def test_reload(self):
"""Ensure that attributes may be reloaded.
"""
person = self.Person(name="Test User", age=20)
person.save()
person_obj = self.Person.objects.first()
person_obj.name = "Mr Test User"
person_obj.age = 21
person_obj.save()
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 20)
person.reload('age')
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 21)
person.reload()
self.assertEqual(person.name, "Mr Test User")
self.assertEqual(person.age, 21)
person.reload()
self.assertEqual(person.name, "Mr Test User")
self.assertEqual(person.age, 21)
def test_reload_sharded(self):
class Animal(Document):
superphylum = StringField()
meta = {'shard_key': ('superphylum',)}
Animal.drop_collection()
doc = Animal(superphylum='Deuterostomia')
doc.save()
doc.reload()
Animal.drop_collection()
def test_reload_sharded_nested(self):
class SuperPhylum(EmbeddedDocument):
name = StringField()
class Animal(Document):
superphylum = EmbeddedDocumentField(SuperPhylum)
meta = {'shard_key': ('superphylum.name',)}
Animal.drop_collection()
doc = Animal(superphylum=SuperPhylum(name='Deuterostomia'))
doc.save()
doc.reload()
Animal.drop_collection()
def test_reload_referencing(self):
"""Ensures reloading updates weakrefs correctly
"""
class Embedded(EmbeddedDocument):
dict_field = DictField()
list_field = ListField()
class Doc(Document):
dict_field = DictField()
list_field = ListField()
embedded_field = EmbeddedDocumentField(Embedded)
Doc.drop_collection()
doc = Doc()
doc.dict_field = {'hello': 'world'}
doc.list_field = ['1', 2, {'hello': 'world'}]
embedded_1 = Embedded()
embedded_1.dict_field = {'hello': 'world'}
embedded_1.list_field = ['1', 2, {'hello': 'world'}]
doc.embedded_field = embedded_1
doc.save()
doc = doc.reload(10)
doc.list_field.append(1)
doc.dict_field['woot'] = "woot"
doc.embedded_field.list_field.append(1)
doc.embedded_field.dict_field['woot'] = "woot"
self.assertEqual(doc._get_changed_fields(), [
'list_field', 'dict_field.woot', 'embedded_field.list_field',
'embedded_field.dict_field.woot'])
doc.save()
self.assertEqual(len(doc.list_field), 4)
doc = doc.reload(10)
self.assertEqual(doc._get_changed_fields(), [])
self.assertEqual(len(doc.list_field), 4)
self.assertEqual(len(doc.dict_field), 2)
self.assertEqual(len(doc.embedded_field.list_field), 4)
self.assertEqual(len(doc.embedded_field.dict_field), 2)
doc.list_field.append(1)
doc.save()
doc.dict_field['extra'] = 1
doc = doc.reload(10, 'list_field')
self.assertEqual(doc._get_changed_fields(), [])
self.assertEqual(len(doc.list_field), 5)
self.assertEqual(len(doc.dict_field), 3)
self.assertEqual(len(doc.embedded_field.list_field), 4)
self.assertEqual(len(doc.embedded_field.dict_field), 2)
def test_reload_doesnt_exist(self):
class Foo(Document):
pass
f = Foo()
try:
f.reload()
except Foo.DoesNotExist:
pass
except Exception:
self.assertFalse("Threw wrong exception")
f.save()
f.delete()
try:
f.reload()
except Foo.DoesNotExist:
pass
except Exception:
self.assertFalse("Threw wrong exception")
def test_reload_of_non_strict_with_special_field_name(self):
"""Ensures reloading works for documents with meta strict == False
"""
class Post(Document):
meta = {
'strict': False
}
title = StringField()
items = ListField()
Post.drop_collection()
Post._get_collection().insert_one({
"title": "Items eclipse",
"items": ["more lorem", "even more ipsum"]
})
post = Post.objects.first()
post.reload()
self.assertEqual(post.title, "Items eclipse")
self.assertEqual(post.items, ["more lorem", "even more ipsum"])
def test_dictionary_access(self):
"""Ensure that dictionary-style field access works properly.
"""
person = self.Person(name='Test User', age=30, job=self.Job())
self.assertEqual(person['name'], 'Test User')
self.assertRaises(KeyError, person.__getitem__, 'salary')
self.assertRaises(KeyError, person.__setitem__, 'salary', 50)
person['name'] = 'Another User'
self.assertEqual(person['name'], 'Another User')
# Length = length(assigned fields + id)
self.assertEqual(len(person), 5)
self.assertTrue('age' in person)
person.age = None
self.assertFalse('age' in person)
self.assertFalse('nationality' in person)
def test_embedded_document_to_mongo(self):
class Person(EmbeddedDocument):
name = StringField()
age = IntField()
meta = {"allow_inheritance": True}
class Employee(Person):
salary = IntField()
self.assertEqual(Person(name="Bob", age=35).to_mongo().keys(),
['_cls', 'name', 'age'])
self.assertEqual(
Employee(name="Bob", age=35, salary=0).to_mongo().keys(),
['_cls', 'name', 'age', 'salary'])
def test_embedded_document_to_mongo_id(self):
class SubDoc(EmbeddedDocument):
id = StringField(required=True)
sub_doc = SubDoc(id="abc")
self.assertEqual(sub_doc.to_mongo().keys(), ['id'])
def test_embedded_document(self):
"""Ensure that embedded documents are set up correctly.
"""
class Comment(EmbeddedDocument):
content = StringField()
self.assertTrue('content' in Comment._fields)
self.assertFalse('id' in Comment._fields)
def test_embedded_document_instance(self):
"""Ensure that embedded documents can reference parent instance
"""
class Embedded(EmbeddedDocument):
string = StringField()
class Doc(Document):
embedded_field = EmbeddedDocumentField(Embedded)
Doc.drop_collection()
doc = Doc(embedded_field=Embedded(string="Hi"))
self.assertHasInstance(doc.embedded_field, doc)
doc.save()
doc = Doc.objects.get()
self.assertHasInstance(doc.embedded_field, doc)
def test_embedded_document_complex_instance(self):
"""Ensure that embedded documents in complex fields can reference
parent instance"""
class Embedded(EmbeddedDocument):
string = StringField()
class Doc(Document):
embedded_field = ListField(EmbeddedDocumentField(Embedded))
Doc.drop_collection()
doc = Doc(embedded_field=[Embedded(string="Hi")])
self.assertHasInstance(doc.embedded_field[0], doc)
doc.save()
doc = Doc.objects.get()
self.assertHasInstance(doc.embedded_field[0], doc)
def test_instance_is_set_on_setattr(self):
class Email(EmbeddedDocument):
email = EmailField()
class Account(Document):
email = EmbeddedDocumentField(Email)
Account.drop_collection()
acc = Account()
acc.email = Email(email='test@example.com')
self.assertHasInstance(acc._data["email"], acc)
acc.save()
acc1 = Account.objects.first()
self.assertHasInstance(acc1._data["email"], acc1)
def test_instance_is_set_on_setattr_on_embedded_document_list(self):
class Email(EmbeddedDocument):
email = EmailField()
class Account(Document):
emails = EmbeddedDocumentListField(Email)
Account.drop_collection()
acc = Account()
acc.emails = [Email(email='test@example.com')]
self.assertHasInstance(acc._data["emails"][0], acc)
acc.save()
acc1 = Account.objects.first()
self.assertHasInstance(acc1._data["emails"][0], acc1)
def test_document_clean(self):
class TestDocument(Document):
status = StringField()
pub_date = DateTimeField()
def clean(self):
if self.status == 'draft' and self.pub_date is not None:
msg = 'Draft entries may not have a publication date.'
raise ValidationError(msg)
# Set the pub_date for published items if not set.
if self.status == 'published' and self.pub_date is None:
self.pub_date = datetime.now()
TestDocument.drop_collection()
t = TestDocument(status="draft", pub_date=datetime.now())
try:
t.save()
except ValidationError, e:
expect_msg = "Draft entries may not have a publication date."
self.assertTrue(expect_msg in e.message)
self.assertEqual(e.to_dict(), {'__all__': expect_msg})
t = TestDocument(status="published")
t.save(clean=False)
self.assertEqual(t.pub_date, None)
t = TestDocument(status="published")
t.save(clean=True)
self.assertEqual(type(t.pub_date), datetime)
def test_document_embedded_clean(self):
class TestEmbeddedDocument(EmbeddedDocument):
x = IntField(required=True)
y = IntField(required=True)
z = IntField(required=True)
meta = {'allow_inheritance': False}
def clean(self):
if self.z:
if self.z != self.x + self.y:
raise ValidationError('Value of z != x + y')
else:
self.z = self.x + self.y
class TestDocument(Document):
doc = EmbeddedDocumentField(TestEmbeddedDocument)
status = StringField()
TestDocument.drop_collection()
t = TestDocument(doc=TestEmbeddedDocument(x=10, y=25, z=15))
try:
t.save()
except ValidationError, e:
expect_msg = "Value of z != x + y"
self.assertTrue(expect_msg in e.message)
self.assertEqual(e.to_dict(), {'doc': {'__all__': expect_msg}})
t = TestDocument(doc=TestEmbeddedDocument(x=10, y=25)).save()
self.assertEqual(t.doc.z, 35)
# Asserts not raises
t = TestDocument(doc=TestEmbeddedDocument(x=15, y=35, z=5))
t.save(clean=False)
def test_modify_empty(self):
doc = self.Person(name="bob", age=10).save()
self.assertRaises(
InvalidDocumentError, lambda: self.Person().modify(set__age=10))
self.assertDbEqual([dict(doc.to_mongo())])
def test_modify_invalid_query(self):
doc1 = self.Person(name="bob", age=10).save()
doc2 = self.Person(name="jim", age=20).save()
docs = [dict(doc1.to_mongo()), dict(doc2.to_mongo())]
self.assertRaises(
InvalidQueryError,
lambda: doc1.modify(dict(id=doc2.id), set__value=20))
self.assertDbEqual(docs)
def test_modify_match_another_document(self):
doc1 = self.Person(name="bob", age=10).save()
doc2 = self.Person(name="jim", age=20).save()
docs = [dict(doc1.to_mongo()), dict(doc2.to_mongo())]
assert not doc1.modify(dict(name=doc2.name), set__age=100)
self.assertDbEqual(docs)
def test_modify_not_exists(self):
doc1 = self.Person(name="bob", age=10).save()
doc2 = self.Person(id=ObjectId(), name="jim", age=20)
docs = [dict(doc1.to_mongo())]
assert not doc2.modify(dict(name=doc2.name), set__age=100)
self.assertDbEqual(docs)
def test_modify_update(self):
other_doc = self.Person(name="bob", age=10).save()
doc = self.Person(
name="jim", age=20, job=self.Job(name="10gen", years=3)).save()
doc_copy = doc._from_son(doc.to_mongo())
# these changes must go away
doc.name = "liza"
doc.job.name = "Google"
doc.job.years = 3
assert doc.modify(
set__age=21, set__job__name="MongoDB", unset__job__years=True)
doc_copy.age = 21
doc_copy.job.name = "MongoDB"
del doc_copy.job.years
assert doc.to_json() == doc_copy.to_json()
assert doc._get_changed_fields() == []
self.assertDbEqual([dict(other_doc.to_mongo()), dict(doc.to_mongo())])
def test_save(self):
"""Ensure that a document may be saved in the database.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30)
person.save()
# Ensure that the object is in the database
collection = self.db[self.Person._get_collection_name()]
person_obj = collection.find_one({'name': 'Test User'})
self.assertEqual(person_obj['name'], 'Test User')
self.assertEqual(person_obj['age'], 30)
self.assertEqual(person_obj['_id'], person.id)
# Test skipping validation on save
class Recipient(Document):
email = EmailField(required=True)
recipient = Recipient(email='root@localhost')
self.assertRaises(ValidationError, recipient.save)
try:
recipient.save(validate=False)
except ValidationError:
self.fail()
def test_save_to_a_value_that_equates_to_false(self):
class Thing(EmbeddedDocument):
count = IntField()
class User(Document):
thing = EmbeddedDocumentField(Thing)
User.drop_collection()
user = User(thing=Thing(count=1))
user.save()
user.reload()
user.thing.count = 0
user.save()
user.reload()
self.assertEqual(user.thing.count, 0)
def test_save_max_recursion_not_hit(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
friend = ReferenceField('self')
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p1.friend = p2
p1.save()
# Confirm can save and it resets the changed fields without hitting
# max recursion error
p0 = Person.objects.first()
p0.name = 'wpjunior'
p0.save()
def test_save_max_recursion_not_hit_with_file_field(self):
class Foo(Document):
name = StringField()
picture = FileField()
bar = ReferenceField('self')
Foo.drop_collection()
a = Foo(name='hello').save()
a.bar = a
with open(TEST_IMAGE_PATH, 'rb') as test_image:
a.picture = test_image
a.save()
# Confirm can save and it resets the changed fields without hitting
# max recursion error
b = Foo.objects.with_id(a.id)
b.name = 'world'
b.save()
self.assertEqual(b.picture, b.bar.picture, b.bar.bar.picture)
def test_save_cascades(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save(cascade=True)
p1.reload()
self.assertEqual(p1.name, p.parent.name)
def test_save_cascade_kwargs(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p1.name = "Daddy Wilson"
p2.save(force_insert=True, cascade_kwargs={"force_insert": False})
p1.reload()
p2.reload()
self.assertEqual(p1.name, p2.parent.name)
def test_save_cascade_meta_false(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
meta = {'cascade': False}
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save()
p1.reload()
self.assertNotEqual(p1.name, p.parent.name)
p.save(cascade=True)
p1.reload()
self.assertEqual(p1.name, p.parent.name)
def test_save_cascade_meta_true(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
meta = {'cascade': False}
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save(cascade=True)
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save()
p1.reload()
self.assertNotEqual(p1.name, p.parent.name)
def test_save_cascades_generically(self):
class Person(Document):
name = StringField()
parent = GenericReferenceField()
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save()
p1.reload()
self.assertNotEqual(p1.name, p.parent.name)
p.save(cascade=True)
p1.reload()
self.assertEqual(p1.name, p.parent.name)
def test_save_atomicity_condition(self):
class Widget(Document):
toggle = BooleanField(default=False)
count = IntField(default=0)
save_id = UUIDField()
def flip(widget):
widget.toggle = not widget.toggle
widget.count += 1
def UUID(i):
return uuid.UUID(int=i)
Widget.drop_collection()
w1 = Widget(toggle=False, save_id=UUID(1))
# ignore save_condition on new record creation
w1.save(save_condition={'save_id': UUID(42)})
w1.reload()
self.assertFalse(w1.toggle)
self.assertEqual(w1.save_id, UUID(1))
self.assertEqual(w1.count, 0)
# mismatch in save_condition prevents save and raise exception
flip(w1)
self.assertTrue(w1.toggle)
self.assertEqual(w1.count, 1)
self.assertRaises(SaveConditionError,
w1.save, save_condition={'save_id': UUID(42)})
w1.reload()
self.assertFalse(w1.toggle)
self.assertEqual(w1.count, 0)
# matched save_condition allows save
flip(w1)
self.assertTrue(w1.toggle)
self.assertEqual(w1.count, 1)
w1.save(save_condition={'save_id': UUID(1)})
w1.reload()
self.assertTrue(w1.toggle)
self.assertEqual(w1.count, 1)
# save_condition can be used to ensure atomic read & updates
# i.e., prevent interleaved reads and writes from separate contexts
w2 = Widget.objects.get()
self.assertEqual(w1, w2)
old_id = w1.save_id
flip(w1)
w1.save_id = UUID(2)
w1.save(save_condition={'save_id': old_id})
w1.reload()
self.assertFalse(w1.toggle)
self.assertEqual(w1.count, 2)
flip(w2)
flip(w2)
self.assertRaises(SaveConditionError,
w2.save, save_condition={'save_id': old_id})
w2.reload()
self.assertFalse(w2.toggle)
self.assertEqual(w2.count, 2)
# save_condition uses mongoengine-style operator syntax
flip(w1)
w1.save(save_condition={'count__lt': w1.count})
w1.reload()
self.assertTrue(w1.toggle)
self.assertEqual(w1.count, 3)
flip(w1)
self.assertRaises(SaveConditionError,
w1.save, save_condition={'count__gte': w1.count})
w1.reload()
self.assertTrue(w1.toggle)
self.assertEqual(w1.count, 3)
def test_update(self):
"""Ensure that an existing document is updated instead of be
overwritten."""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30)
person.save()
# Create same person object, with same id, without age
same_person = self.Person(name='Test')
same_person.id = person.id
same_person.save()
# Confirm only one object
self.assertEqual(self.Person.objects.count(), 1)
# reload
person.reload()
same_person.reload()
# Confirm the same
self.assertEqual(person, same_person)
self.assertEqual(person.name, same_person.name)
self.assertEqual(person.age, same_person.age)
# Confirm the saved values
self.assertEqual(person.name, 'Test')
self.assertEqual(person.age, 30)
# Test only / exclude only updates included fields
person = self.Person.objects.only('name').get()
person.name = 'User'
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 30)
# test exclude only updates set fields
person = self.Person.objects.exclude('name').get()
person.age = 21
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 21)
# Test only / exclude can set non excluded / included fields
person = self.Person.objects.only('name').get()
person.name = 'Test'
person.age = 30
person.save()
person.reload()
self.assertEqual(person.name, 'Test')
self.assertEqual(person.age, 30)
# test exclude only updates set fields
person = self.Person.objects.exclude('name').get()
person.name = 'User'
person.age = 21
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 21)
# Confirm does remove unrequired fields
person = self.Person.objects.exclude('name').get()
person.age = None
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, None)
person = self.Person.objects.get()
person.name = None
person.age = None
person.save()
person.reload()
self.assertEqual(person.name, None)
self.assertEqual(person.age, None)
def test_inserts_if_you_set_the_pk(self):
p1 = self.Person(name='p1', id=bson.ObjectId()).save()
p2 = self.Person(name='p2')
p2.id = bson.ObjectId()
p2.save()
self.assertEqual(2, self.Person.objects.count())
def test_can_save_if_not_included(self):
class EmbeddedDoc(EmbeddedDocument):
pass
class Simple(Document):
pass
class Doc(Document):
string_field = StringField(default='1')
int_field = IntField(default=1)
float_field = FloatField(default=1.1)
boolean_field = BooleanField(default=True)
datetime_field = DateTimeField(default=datetime.now)
embedded_document_field = EmbeddedDocumentField(
EmbeddedDoc, default=lambda: EmbeddedDoc())
list_field = ListField(default=lambda: [1, 2, 3])
dict_field = DictField(default=lambda: {"hello": "world"})
objectid_field = ObjectIdField(default=bson.ObjectId)
reference_field = ReferenceField(Simple, default=lambda:
Simple().save())
map_field = MapField(IntField(), default=lambda: {"simple": 1})
decimal_field = DecimalField(default=1.0)
complex_datetime_field = ComplexDateTimeField(default=datetime.now)
url_field = URLField(default="http://mongoengine.org")
dynamic_field = DynamicField(default=1)
generic_reference_field = GenericReferenceField(
default=lambda: Simple().save())
sorted_list_field = SortedListField(IntField(),
default=lambda: [1, 2, 3])
email_field = EmailField(default="ross@example.com")
geo_point_field = GeoPointField(default=lambda: [1, 2])
sequence_field = SequenceField()
uuid_field = UUIDField(default=uuid.uuid4)
generic_embedded_document_field = GenericEmbeddedDocumentField(
default=lambda: EmbeddedDoc())
Simple.drop_collection()
Doc.drop_collection()
Doc().save()
my_doc = Doc.objects.only("string_field").first()
my_doc.string_field = "string"
my_doc.save()
my_doc = Doc.objects.get(string_field="string")
self.assertEqual(my_doc.string_field, "string")
self.assertEqual(my_doc.int_field, 1)
def test_document_update(self):
def update_not_saved_raises():
person = self.Person(name='dcrosta')
person.update(set__name='Dan Crosta')
self.assertRaises(OperationError, update_not_saved_raises)
author = self.Person(name='dcrosta')
author.save()
author.update(set__name='Dan Crosta')
author.reload()
p1 = self.Person.objects.first()
self.assertEqual(p1.name, author.name)
def update_no_value_raises():
person = self.Person.objects.first()
person.update()
self.assertRaises(OperationError, update_no_value_raises)
def update_no_op_should_default_to_set():
person = self.Person.objects.first()
person.update(name="Dan")
person.reload()
return person.name
self.assertEqual("Dan", update_no_op_should_default_to_set())
def test_update_unique_field(self):
class Doc(Document):
name = StringField(unique=True)
doc1 = Doc(name="first").save()
doc2 = Doc(name="second").save()
self.assertRaises(NotUniqueError, lambda:
doc2.update(set__name=doc1.name))
def test_embedded_update(self):
"""
Test update on `EmbeddedDocumentField` fields
"""
class Page(EmbeddedDocument):
log_message = StringField(verbose_name="Log message",
required=True)
class Site(Document):
page = EmbeddedDocumentField(Page)
Site.drop_collection()
site = Site(page=Page(log_message="Warning: Dummy message"))
site.save()
# Update
site = Site.objects.first()
site.page.log_message = "Error: Dummy message"
site.save()
site = Site.objects.first()
self.assertEqual(site.page.log_message, "Error: Dummy message")
def test_embedded_update_db_field(self):
"""
Test update on `EmbeddedDocumentField` fields when db_field is other
than default.
"""
class Page(EmbeddedDocument):
log_message = StringField(verbose_name="Log message",
db_field="page_log_message",
required=True)
class Site(Document):
page = EmbeddedDocumentField(Page)
Site.drop_collection()
site = Site(page=Page(log_message="Warning: Dummy message"))
site.save()
# Update
site = Site.objects.first()
site.page.log_message = "Error: Dummy message"
site.save()
site = Site.objects.first()
self.assertEqual(site.page.log_message, "Error: Dummy message")
def test_save_only_changed_fields(self):
"""Ensure save only sets / unsets changed fields
"""
class User(self.Person):
active = BooleanField(default=True)
User.drop_collection()
# Create person object and save it to the database
user = User(name='Test User', age=30, active=True)
user.save()
user.reload()
# Simulated Race condition
same_person = self.Person.objects.get()
same_person.active = False
user.age = 21
user.save()
same_person.name = 'User'
same_person.save()
person = self.Person.objects.get()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 21)
self.assertEqual(person.active, False)
def test_query_count_when_saving(self):
"""Ensure references don't cause extra fetches when saving"""
class Organization(Document):
name = StringField()
class User(Document):
name = StringField()
orgs = ListField(ReferenceField('Organization'))
class Feed(Document):
name = StringField()
class UserSubscription(Document):
name = StringField()
user = ReferenceField(User)
feed = ReferenceField(Feed)
Organization.drop_collection()
User.drop_collection()
Feed.drop_collection()
UserSubscription.drop_collection()
o1 = Organization(name="o1").save()
o2 = Organization(name="o2").save()
u1 = User(name="Ross", orgs=[o1, o2]).save()
f1 = Feed(name="MongoEngine").save()
sub = UserSubscription(user=u1, feed=f1).save()
user = User.objects.first()
# Even if stored as ObjectId's internally mongoengine uses DBRefs
# As ObjectId's aren't automatically derefenced
self.assertTrue(isinstance(user._data['orgs'][0], DBRef))
self.assertTrue(isinstance(user.orgs[0], Organization))
self.assertTrue(isinstance(user._data['orgs'][0], Organization))
# Changing a value
with query_counter() as q:
self.assertEqual(q, 0)
sub = UserSubscription.objects.first()
self.assertEqual(q, 1)
sub.name = "Test Sub"
sub.save()
self.assertEqual(q, 2)
# Changing a value that will cascade
with query_counter() as q:
self.assertEqual(q, 0)
sub = UserSubscription.objects.first()
self.assertEqual(q, 1)
sub.user.name = "Test"
self.assertEqual(q, 2)
sub.save(cascade=True)
self.assertEqual(q, 3)
# Changing a value and one that will cascade
with query_counter() as q:
self.assertEqual(q, 0)
sub = UserSubscription.objects.first()
sub.name = "Test Sub 2"
self.assertEqual(q, 1)
sub.user.name = "Test 2"
self.assertEqual(q, 2)
sub.save(cascade=True)
self.assertEqual(q, 4) # One for the UserSub and one for the User
# Saving with just the refs
with query_counter() as q:
self.assertEqual(q, 0)
sub = UserSubscription(user=u1.pk, feed=f1.pk)
self.assertEqual(q, 0)
sub.save()
self.assertEqual(q, 1)
# Saving with just the refs on a ListField
with query_counter() as q:
self.assertEqual(q, 0)
User(name="Bob", orgs=[o1.pk, o2.pk]).save()
self.assertEqual(q, 1)
# Saving new objects
with query_counter() as q:
self.assertEqual(q, 0)
user = User.objects.first()
self.assertEqual(q, 1)
feed = Feed.objects.first()
self.assertEqual(q, 2)
sub = UserSubscription(user=user, feed=feed)
self.assertEqual(q, 2) # Check no change
sub.save()
self.assertEqual(q, 3)
def test_set_unset_one_operation(self):
"""Ensure that $set and $unset actions are performed in the same
operation.
"""
class FooBar(Document):
foo = StringField(default=None)
bar = StringField(default=None)
FooBar.drop_collection()
# write an entity with a single prop
foo = FooBar(foo='foo').save()
self.assertEqual(foo.foo, 'foo')
del foo.foo
foo.bar = 'bar'
with query_counter() as q:
self.assertEqual(0, q)
foo.save()
self.assertEqual(1, q)
def test_save_only_changed_fields_recursive(self):
"""Ensure save only sets / unsets changed fields
"""
class Comment(EmbeddedDocument):
published = BooleanField(default=True)
class User(self.Person):
comments_dict = DictField()
comments = ListField(EmbeddedDocumentField(Comment))
active = BooleanField(default=True)
User.drop_collection()
# Create person object and save it to the database
person = User(name='Test User', age=30, active=True)
person.comments.append(Comment())
person.save()
person.reload()
person = self.Person.objects.get()
self.assertTrue(person.comments[0].published)
person.comments[0].published = False
person.save()
person = self.Person.objects.get()
self.assertFalse(person.comments[0].published)
# Simple dict w
person.comments_dict['first_post'] = Comment()
person.save()
person = self.Person.objects.get()
self.assertTrue(person.comments_dict['first_post'].published)
person.comments_dict['first_post'].published = False
person.save()
person = self.Person.objects.get()
self.assertFalse(person.comments_dict['first_post'].published)
def test_delete(self):
"""Ensure that document may be deleted using the delete method.
"""
person = self.Person(name="Test User", age=30)
person.save()
self.assertEqual(self.Person.objects.count(), 1)
person.delete()
self.assertEqual(self.Person.objects.count(), 0)
def test_save_custom_id(self):
"""Ensure that a document may be saved with a custom _id.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30,
id='497ce96f395f2f052a494fd4')
person.save()
# Ensure that the object is in the database with the correct _id
collection = self.db[self.Person._get_collection_name()]
person_obj = collection.find_one({'name': 'Test User'})
self.assertEqual(str(person_obj['_id']), '497ce96f395f2f052a494fd4')
def test_save_custom_pk(self):
"""
Ensure that a document may be saved with a custom _id using pk alias.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30,
pk='497ce96f395f2f052a494fd4')
person.save()
# Ensure that the object is in the database with the correct _id
collection = self.db[self.Person._get_collection_name()]
person_obj = collection.find_one({'name': 'Test User'})
self.assertEqual(str(person_obj['_id']), '497ce96f395f2f052a494fd4')
def test_save_list(self):
"""Ensure that a list field may be properly saved.
"""
class Comment(EmbeddedDocument):
content = StringField()
class BlogPost(Document):
content = StringField()
comments = ListField(EmbeddedDocumentField(Comment))
tags = ListField(StringField())
BlogPost.drop_collection()
post = BlogPost(content='Went for a walk today...')
post.tags = tags = ['fun', 'leisure']
comments = [Comment(content='Good for you'), Comment(content='Yay.')]
post.comments = comments
post.save()
collection = self.db[BlogPost._get_collection_name()]
post_obj = collection.find_one()
self.assertEqual(post_obj['tags'], tags)
for comment_obj, comment in zip(post_obj['comments'], comments):
self.assertEqual(comment_obj['content'], comment['content'])
BlogPost.drop_collection()
def test_list_search_by_embedded(self):
class User(Document):
username = StringField(required=True)
meta = {'allow_inheritance': False}
class Comment(EmbeddedDocument):
comment = StringField()
user = ReferenceField(User,
required=True)
meta = {'allow_inheritance': False}
class Page(Document):
comments = ListField(EmbeddedDocumentField(Comment))
meta = {'allow_inheritance': False,
'indexes': [
{'fields': ['comments.user']}
]}
User.drop_collection()
Page.drop_collection()
u1 = User(username="wilson")
u1.save()
u2 = User(username="rozza")
u2.save()
u3 = User(username="hmarr")
u3.save()
p1 = Page(comments=[Comment(user=u1, comment="Its very good"),
Comment(user=u2, comment="Hello world"),
Comment(user=u3, comment="Ping Pong"),
Comment(user=u1, comment="I like a beer")])
p1.save()
p2 = Page(comments=[Comment(user=u1, comment="Its very good"),
Comment(user=u2, comment="Hello world")])
p2.save()
p3 = Page(comments=[Comment(user=u3, comment="Its very good")])
p3.save()
p4 = Page(comments=[Comment(user=u2, comment="Heavy Metal song")])
p4.save()
self.assertEqual(
[p1, p2],
list(Page.objects.filter(comments__user=u1)))
self.assertEqual(
[p1, p2, p4],
list(Page.objects.filter(comments__user=u2)))
self.assertEqual(
[p1, p3],
list(Page.objects.filter(comments__user=u3)))
def test_save_embedded_document(self):
"""Ensure that a document with an embedded document field may be
saved in the database.
"""
class EmployeeDetails(EmbeddedDocument):
position = StringField()
class Employee(self.Person):
salary = IntField()
details = EmbeddedDocumentField(EmployeeDetails)
# Create employee object and save it to the database
employee = Employee(name='Test Employee', age=50, salary=20000)
employee.details = EmployeeDetails(position='Developer')
employee.save()
# Ensure that the object is in the database
collection = self.db[self.Person._get_collection_name()]
employee_obj = collection.find_one({'name': 'Test Employee'})
self.assertEqual(employee_obj['name'], 'Test Employee')
self.assertEqual(employee_obj['age'], 50)
# Ensure that the 'details' embedded object saved correctly
self.assertEqual(employee_obj['details']['position'], 'Developer')
def test_embedded_update_after_save(self):
"""
Test update of `EmbeddedDocumentField` attached to a newly saved
document.
"""
class Page(EmbeddedDocument):
log_message = StringField(verbose_name="Log message",
required=True)
class Site(Document):
page = EmbeddedDocumentField(Page)
Site.drop_collection()
site = Site(page=Page(log_message="Warning: Dummy message"))
site.save()
# Update
site.page.log_message = "Error: Dummy message"
site.save()
site = Site.objects.first()
self.assertEqual(site.page.log_message, "Error: Dummy message")
def test_updating_an_embedded_document(self):
"""Ensure that a document with an embedded document field may be
saved in the database.
"""
class EmployeeDetails(EmbeddedDocument):
position = StringField()
class Employee(self.Person):
salary = IntField()
details = EmbeddedDocumentField(EmployeeDetails)
# Create employee object and save it to the database
employee = Employee(name='Test Employee', age=50, salary=20000)
employee.details = EmployeeDetails(position='Developer')
employee.save()
# Test updating an embedded document
promoted_employee = Employee.objects.get(name='Test Employee')
promoted_employee.details.position = 'Senior Developer'
promoted_employee.save()
promoted_employee.reload()
self.assertEqual(promoted_employee.name, 'Test Employee')
self.assertEqual(promoted_employee.age, 50)
# Ensure that the 'details' embedded object saved correctly
self.assertEqual(
promoted_employee.details.position, 'Senior Developer')
# Test removal
promoted_employee.details = None
promoted_employee.save()
promoted_employee.reload()
self.assertEqual(promoted_employee.details, None)
def test_object_mixins(self):
class NameMixin(object):
name = StringField()
class Foo(EmbeddedDocument, NameMixin):
quantity = IntField()
self.assertEqual(['name', 'quantity'], sorted(Foo._fields.keys()))
class Bar(Document, NameMixin):
widgets = StringField()
self.assertEqual(['id', 'name', 'widgets'], sorted(Bar._fields.keys()))
def test_mixin_inheritance(self):
class BaseMixIn(object):
count = IntField()
data = StringField()
class DoubleMixIn(BaseMixIn):
comment = StringField()
class TestDoc(Document, DoubleMixIn):
age = IntField()
TestDoc.drop_collection()
t = TestDoc(count=12, data="test",
comment="great!", age=19)
t.save()
t = TestDoc.objects.first()
self.assertEqual(t.age, 19)
self.assertEqual(t.comment, "great!")
self.assertEqual(t.data, "test")
self.assertEqual(t.count, 12)
def test_save_reference(self):
"""Ensure that a document reference field may be saved in the database.
"""
class BlogPost(Document):
meta = {'collection': 'blogpost_1'}
content = StringField()
author = ReferenceField(self.Person)
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
post = BlogPost(content='Watched some TV today... how exciting.')
# Should only reference author when saving
post.author = author
post.save()
post_obj = BlogPost.objects.first()
# Test laziness
self.assertTrue(isinstance(post_obj._data['author'],
bson.DBRef))
self.assertTrue(isinstance(post_obj.author, self.Person))
self.assertEqual(post_obj.author.name, 'Test User')
# Ensure that the dereferenced object may be changed and saved
post_obj.author.age = 25
post_obj.author.save()
author = list(self.Person.objects(name='Test User'))[-1]
self.assertEqual(author.age, 25)
BlogPost.drop_collection()
def test_duplicate_db_fields_raise_invalid_document_error(self):
"""Ensure a InvalidDocumentError is thrown if duplicate fields
declare the same db_field"""
def throw_invalid_document_error():
class Foo(Document):
name = StringField()
name2 = StringField(db_field='name')
self.assertRaises(InvalidDocumentError, throw_invalid_document_error)
def test_invalid_son(self):
"""Raise an error if loading invalid data"""
class Occurrence(EmbeddedDocument):
number = IntField()
class Word(Document):
stem = StringField()
count = IntField(default=1)
forms = ListField(StringField(), default=list)
occurs = ListField(EmbeddedDocumentField(Occurrence), default=list)
def raise_invalid_document():
Word._from_son({'stem': [1, 2, 3], 'forms': 1, 'count': 'one',
'occurs': {"hello": None}})
self.assertRaises(InvalidDocumentError, raise_invalid_document)
def test_reverse_delete_rule_cascade_and_nullify(self):
"""Ensure that a referenced document is also deleted upon deletion.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
reviewer = ReferenceField(self.Person, reverse_delete_rule=NULLIFY)
self.Person.drop_collection()
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
reviewer = self.Person(name='Re Viewer')
reviewer.save()
post = BlogPost(content='Watched some TV')
post.author = author
post.reviewer = reviewer
post.save()
reviewer.delete()
# No effect on the BlogPost
self.assertEqual(BlogPost.objects.count(), 1)
self.assertEqual(BlogPost.objects.get().reviewer, None)
# Delete the Person, which should lead to deletion of the BlogPost, too
author.delete()
self.assertEqual(BlogPost.objects.count(), 0)
def test_reverse_delete_rule_with_document_inheritance(self):
"""Ensure that a referenced document is also deleted upon deletion
of a child document.
"""
class Writer(self.Person):
pass
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
reviewer = ReferenceField(self.Person, reverse_delete_rule=NULLIFY)
self.Person.drop_collection()
BlogPost.drop_collection()
author = Writer(name='Test User')
author.save()
reviewer = Writer(name='Re Viewer')
reviewer.save()
post = BlogPost(content='Watched some TV')
post.author = author
post.reviewer = reviewer
post.save()
reviewer.delete()
self.assertEqual(BlogPost.objects.count(), 1)
self.assertEqual(BlogPost.objects.get().reviewer, None)
# Delete the Writer should lead to deletion of the BlogPost
author.delete()
self.assertEqual(BlogPost.objects.count(), 0)
def test_reverse_delete_rule_cascade_and_nullify_complex_field(self):
"""Ensure that a referenced document is also deleted upon deletion for
complex fields.
"""
class BlogPost(Document):
content = StringField()
authors = ListField(ReferenceField(
self.Person, reverse_delete_rule=CASCADE))
reviewers = ListField(ReferenceField(
self.Person, reverse_delete_rule=NULLIFY))
self.Person.drop_collection()
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
reviewer = self.Person(name='Re Viewer')
reviewer.save()
post = BlogPost(content='Watched some TV')
post.authors = [author]
post.reviewers = [reviewer]
post.save()
# Deleting the reviewer should have no effect on the BlogPost
reviewer.delete()
self.assertEqual(BlogPost.objects.count(), 1)
self.assertEqual(BlogPost.objects.get().reviewers, [])
# Delete the Person, which should lead to deletion of the BlogPost, too
author.delete()
self.assertEqual(BlogPost.objects.count(), 0)
def test_reverse_delete_rule_cascade_triggers_pre_delete_signal(self):
""" ensure the pre_delete signal is triggered upon a cascading deletion
setup a blog post with content, an author and editor
delete the author which triggers deletion of blogpost via cascade
blog post's pre_delete signal alters an editor attribute
"""
class Editor(self.Person):
review_queue = IntField(default=0)
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
editor = ReferenceField(Editor)
@classmethod
def pre_delete(cls, sender, document, **kwargs):
# decrement the docs-to-review count
document.editor.update(dec__review_queue=1)
signals.pre_delete.connect(BlogPost.pre_delete, sender=BlogPost)
self.Person.drop_collection()
BlogPost.drop_collection()
Editor.drop_collection()
author = self.Person(name='Will S.').save()
editor = Editor(name='Max P.', review_queue=1).save()
BlogPost(content='wrote some books', author=author,
editor=editor).save()
# delete the author, the post is also deleted due to the CASCADE rule
author.delete()
# the pre-delete signal should have decremented the editor's queue
editor = Editor.objects(name='Max P.').get()
self.assertEqual(editor.review_queue, 0)
def test_two_way_reverse_delete_rule(self):
"""Ensure that Bi-Directional relationships work with
reverse_delete_rule
"""
class Bar(Document):
content = StringField()
foo = ReferenceField('Foo')
class Foo(Document):
content = StringField()
bar = ReferenceField(Bar)
Bar.register_delete_rule(Foo, 'bar', NULLIFY)
Foo.register_delete_rule(Bar, 'foo', NULLIFY)
Bar.drop_collection()
Foo.drop_collection()
b = Bar(content="Hello")
b.save()
f = Foo(content="world", bar=b)
f.save()
b.foo = f
b.save()
f.delete()
self.assertEqual(Bar.objects.count(), 1) # No effect on the BlogPost
self.assertEqual(Bar.objects.get().foo, None)
def test_invalid_reverse_delete_rule_raise_errors(self):
def throw_invalid_document_error():
class Blog(Document):
content = StringField()
authors = MapField(ReferenceField(
self.Person, reverse_delete_rule=CASCADE))
reviewers = DictField(
field=ReferenceField(
self.Person,
reverse_delete_rule=NULLIFY))
self.assertRaises(InvalidDocumentError, throw_invalid_document_error)
def throw_invalid_document_error_embedded():
class Parents(EmbeddedDocument):
father = ReferenceField('Person', reverse_delete_rule=DENY)
mother = ReferenceField('Person', reverse_delete_rule=DENY)
self.assertRaises(
InvalidDocumentError, throw_invalid_document_error_embedded)
def test_reverse_delete_rule_cascade_recurs(self):
"""Ensure that a chain of documents is also deleted upon cascaded
deletion.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
class Comment(Document):
text = StringField()
post = ReferenceField(BlogPost, reverse_delete_rule=CASCADE)
self.Person.drop_collection()
BlogPost.drop_collection()
Comment.drop_collection()
author = self.Person(name='Test User')
author.save()
post = BlogPost(content='Watched some TV')
post.author = author
post.save()
comment = Comment(text='Kudos.')
comment.post = post
comment.save()
# Delete the Person, which should lead to deletion of the BlogPost,
# and, recursively to the Comment, too
author.delete()
self.assertEqual(Comment.objects.count(), 0)
self.Person.drop_collection()
BlogPost.drop_collection()
Comment.drop_collection()
def test_reverse_delete_rule_deny(self):
"""Ensure that a document cannot be referenced if there are still
documents referring to it.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=DENY)
self.Person.drop_collection()
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
post = BlogPost(content='Watched some TV')
post.author = author
post.save()
# Delete the Person should be denied
self.assertRaises(OperationError, author.delete) # Should raise denied error
self.assertEqual(BlogPost.objects.count(), 1) # No objects may have been deleted
self.assertEqual(self.Person.objects.count(), 1)
# Other users, that don't have BlogPosts must be removable, like normal
author = self.Person(name='Another User')
author.save()
self.assertEqual(self.Person.objects.count(), 2)
author.delete()
self.assertEqual(self.Person.objects.count(), 1)
self.Person.drop_collection()
BlogPost.drop_collection()
def subclasses_and_unique_keys_works(self):
class A(Document):
pass
class B(A):
foo = BooleanField(unique=True)
A.drop_collection()
B.drop_collection()
A().save()
A().save()
B(foo=True).save()
self.assertEqual(A.objects.count(), 2)
self.assertEqual(B.objects.count(), 1)
A.drop_collection()
B.drop_collection()
def test_document_hash(self):
"""Test document in list, dict, set
"""
class User(Document):
pass
class BlogPost(Document):
pass
# Clear old datas
User.drop_collection()
BlogPost.drop_collection()
u1 = User.objects.create()
u2 = User.objects.create()
u3 = User.objects.create()
u4 = User() # New object
b1 = BlogPost.objects.create()
b2 = BlogPost.objects.create()
# in List
all_user_list = list(User.objects.all())
self.assertTrue(u1 in all_user_list)
self.assertTrue(u2 in all_user_list)
self.assertTrue(u3 in all_user_list)
self.assertFalse(u4 in all_user_list) # New object
self.assertFalse(b1 in all_user_list) # Other object
self.assertFalse(b2 in all_user_list) # Other object
# in Dict
all_user_dic = {}
for u in User.objects.all():
all_user_dic[u] = "OK"
self.assertEqual(all_user_dic.get(u1, False), "OK")
self.assertEqual(all_user_dic.get(u2, False), "OK")
self.assertEqual(all_user_dic.get(u3, False), "OK")
self.assertEqual(all_user_dic.get(u4, False), False) # New object
self.assertEqual(all_user_dic.get(b1, False), False) # Other object
self.assertEqual(all_user_dic.get(b2, False), False) # Other object
# in Set
all_user_set = set(User.objects.all())
self.assertTrue(u1 in all_user_set)
def test_picklable(self):
pickle_doc = PickleTest(number=1, string="One", lists=['1', '2'])
pickle_doc.embedded = PickleEmbedded()
pickled_doc = pickle.dumps(pickle_doc) # make sure pickling works even before the doc is saved
pickle_doc.save()
pickled_doc = pickle.dumps(pickle_doc)
resurrected = pickle.loads(pickled_doc)
self.assertEqual(resurrected, pickle_doc)
# Test pickling changed data
pickle_doc.lists.append("3")
pickled_doc = pickle.dumps(pickle_doc)
resurrected = pickle.loads(pickled_doc)
self.assertEqual(resurrected, pickle_doc)
resurrected.string = "Two"
resurrected.save()
pickle_doc = PickleTest.objects.first()
self.assertEqual(resurrected, pickle_doc)
self.assertEqual(pickle_doc.string, "Two")
self.assertEqual(pickle_doc.lists, ["1", "2", "3"])
def test_regular_document_pickle(self):
pickle_doc = PickleTest(number=1, string="One", lists=['1', '2'])
pickled_doc = pickle.dumps(pickle_doc) # make sure pickling works even before the doc is saved
pickle_doc.save()
pickled_doc = pickle.dumps(pickle_doc)
# Test that when a document's definition changes the new
# definition is used
fixtures.PickleTest = fixtures.NewDocumentPickleTest
resurrected = pickle.loads(pickled_doc)
self.assertEqual(resurrected.__class__,
fixtures.NewDocumentPickleTest)
self.assertEqual(resurrected._fields_ordered,
fixtures.NewDocumentPickleTest._fields_ordered)
self.assertNotEqual(resurrected._fields_ordered,
pickle_doc._fields_ordered)
# The local PickleTest is still a ref to the original
fixtures.PickleTest = PickleTest
def test_dynamic_document_pickle(self):
pickle_doc = PickleDynamicTest(
name="test", number=1, string="One", lists=['1', '2'])
pickle_doc.embedded = PickleDyanmicEmbedded(foo="Bar")
pickled_doc = pickle.dumps(pickle_doc) # make sure pickling works even before the doc is saved
pickle_doc.save()
pickled_doc = pickle.dumps(pickle_doc)
resurrected = pickle.loads(pickled_doc)
self.assertEqual(resurrected, pickle_doc)
self.assertEqual(resurrected._fields_ordered,
pickle_doc._fields_ordered)
self.assertEqual(resurrected._dynamic_fields.keys(),
pickle_doc._dynamic_fields.keys())
self.assertEqual(resurrected.embedded, pickle_doc.embedded)
self.assertEqual(resurrected.embedded._fields_ordered,
pickle_doc.embedded._fields_ordered)
self.assertEqual(resurrected.embedded._dynamic_fields.keys(),
pickle_doc.embedded._dynamic_fields.keys())
def test_picklable_on_signals(self):
pickle_doc = PickleSignalsTest(
number=1, string="One", lists=['1', '2'])
pickle_doc.embedded = PickleEmbedded()
pickle_doc.save()
pickle_doc.delete()
def test_throw_invalid_document_error(self):
# test handles people trying to upsert
def throw_invalid_document_error():
class Blog(Document):
validate = DictField()
self.assertRaises(InvalidDocumentError, throw_invalid_document_error)
def test_mutating_documents(self):
class B(EmbeddedDocument):
field1 = StringField(default='field1')
class A(Document):
b = EmbeddedDocumentField(B, default=lambda: B())
A.drop_collection()
a = A()
a.save()
a.reload()
self.assertEqual(a.b.field1, 'field1')
class C(EmbeddedDocument):
c_field = StringField(default='cfield')
class B(EmbeddedDocument):
field1 = StringField(default='field1')
field2 = EmbeddedDocumentField(C, default=lambda: C())
class A(Document):
b = EmbeddedDocumentField(B, default=lambda: B())
a = A.objects()[0]
a.b.field2.c_field = 'new value'
a.save()
a.reload()
self.assertEqual(a.b.field2.c_field, 'new value')
def test_can_save_false_values(self):
"""Ensures you can save False values on save"""
class Doc(Document):
foo = StringField()
archived = BooleanField(default=False, required=True)
Doc.drop_collection()
d = Doc()
d.save()
d.archived = False
d.save()
self.assertEqual(Doc.objects(archived=False).count(), 1)
def test_can_save_false_values_dynamic(self):
"""Ensures you can save False values on dynamic docs"""
class Doc(DynamicDocument):
foo = StringField()
Doc.drop_collection()
d = Doc()
d.save()
d.archived = False
d.save()
self.assertEqual(Doc.objects(archived=False).count(), 1)
def test_do_not_save_unchanged_references(self):
"""Ensures cascading saves dont auto update"""
class Job(Document):
name = StringField()
class Person(Document):
name = StringField()
age = IntField()
job = ReferenceField(Job)
Job.drop_collection()
Person.drop_collection()
job = Job(name="Job 1")
# job should not have any changed fields after the save
job.save()
person = Person(name="name", age=10, job=job)
from pymongo.collection import Collection
orig_update = Collection.update
try:
def fake_update(*args, **kwargs):
self.fail("Unexpected update for %s" % args[0].name)
return orig_update(*args, **kwargs)
Collection.update = fake_update
person.save()
finally:
Collection.update = orig_update
def test_db_alias_tests(self):
""" DB Alias tests """
# mongoenginetest - Is default connection alias from setUp()
# Register Aliases
register_connection('testdb-1', 'mongoenginetest2')
register_connection('testdb-2', 'mongoenginetest3')
register_connection('testdb-3', 'mongoenginetest4')
class User(Document):
name = StringField()
meta = {"db_alias": "testdb-1"}
class Book(Document):
name = StringField()
meta = {"db_alias": "testdb-2"}
# Drops
User.drop_collection()
Book.drop_collection()
# Create
bob = User.objects.create(name="Bob")
hp = Book.objects.create(name="Harry Potter")
# Selects
self.assertEqual(User.objects.first(), bob)
self.assertEqual(Book.objects.first(), hp)
# DeReference
class AuthorBooks(Document):
author = ReferenceField(User)
book = ReferenceField(Book)
meta = {"db_alias": "testdb-3"}
# Drops
AuthorBooks.drop_collection()
ab = AuthorBooks.objects.create(author=bob, book=hp)
# select
self.assertEqual(AuthorBooks.objects.first(), ab)
self.assertEqual(AuthorBooks.objects.first().book, hp)
self.assertEqual(AuthorBooks.objects.first().author, bob)
self.assertEqual(AuthorBooks.objects.filter(author=bob).first(), ab)
self.assertEqual(AuthorBooks.objects.filter(book=hp).first(), ab)
# DB Alias
self.assertEqual(User._get_db(), get_db("testdb-1"))
self.assertEqual(Book._get_db(), get_db("testdb-2"))
self.assertEqual(AuthorBooks._get_db(), get_db("testdb-3"))
# Collections
self.assertEqual(
User._get_collection(),
get_db("testdb-1")[User._get_collection_name()])
self.assertEqual(
Book._get_collection(),
get_db("testdb-2")[Book._get_collection_name()])
self.assertEqual(
AuthorBooks._get_collection(),
get_db("testdb-3")[AuthorBooks._get_collection_name()])
def test_db_alias_overrides(self):
"""db_alias can be overriden
"""
# Register a connection with db_alias testdb-2
register_connection('testdb-2', 'mongoenginetest2')
class A(Document):
"""Uses default db_alias
"""
name = StringField()
meta = {"allow_inheritance": True}
class B(A):
"""Uses testdb-2 db_alias
"""
meta = {"db_alias": "testdb-2"}
A.objects.all()
self.assertEqual('testdb-2', B._meta.get('db_alias'))
self.assertEqual('mongoenginetest',
A._get_collection().database.name)
self.assertEqual('mongoenginetest2',
B._get_collection().database.name)
def test_db_alias_propagates(self):
"""db_alias propagates?
"""
register_connection('testdb-1', 'mongoenginetest2')
class A(Document):
name = StringField()
meta = {"db_alias": "testdb-1", "allow_inheritance": True}
class B(A):
pass
self.assertEqual('testdb-1', B._meta.get('db_alias'))
def test_db_ref_usage(self):
""" DB Ref usage in dict_fields"""
class User(Document):
name = StringField()
class Book(Document):
name = StringField()
author = ReferenceField(User)
extra = DictField()
meta = {
'ordering': ['+name']
}
def __unicode__(self):
return self.name
def __str__(self):
return self.name
# Drops
User.drop_collection()
Book.drop_collection()
# Authors
bob = User.objects.create(name="Bob")
jon = User.objects.create(name="Jon")
# Redactors
karl = User.objects.create(name="Karl")
susan = User.objects.create(name="Susan")
peter = User.objects.create(name="Peter")
# Bob
Book.objects.create(name="1", author=bob, extra={
"a": bob.to_dbref(), "b": [karl.to_dbref(), susan.to_dbref()]})
Book.objects.create(name="2", author=bob, extra={
"a": bob.to_dbref(), "b": karl.to_dbref()})
Book.objects.create(name="3", author=bob, extra={
"a": bob.to_dbref(), "c": [jon.to_dbref(), peter.to_dbref()]})
Book.objects.create(name="4", author=bob)
# Jon
Book.objects.create(name="5", author=jon)
Book.objects.create(name="6", author=peter)
Book.objects.create(name="7", author=jon)
Book.objects.create(name="8", author=jon)
Book.objects.create(name="9", author=jon,
extra={"a": peter.to_dbref()})
# Checks
self.assertEqual(",".join([str(b) for b in Book.objects.all()]),
"1,2,3,4,5,6,7,8,9")
# bob related books
self.assertEqual(",".join([str(b) for b in Book.objects.filter(
Q(extra__a=bob) |
Q(author=bob) |
Q(extra__b=bob))]),
"1,2,3,4")
# Susan & Karl related books
self.assertEqual(",".join([str(b) for b in Book.objects.filter(
Q(extra__a__all=[karl, susan]) |
Q(author__all=[karl, susan]) |
Q(extra__b__all=[
karl.to_dbref(), susan.to_dbref()]))
]), "1")
# $Where
self.assertEqual(u",".join([str(b) for b in Book.objects.filter(
__raw__={
"$where": """
function(){
return this.name == '1' ||
this.name == '2';}"""
})]),
"1,2")
def test_switch_db_instance(self):
register_connection('testdb-1', 'mongoenginetest2')
class Group(Document):
name = StringField()
Group.drop_collection()
with switch_db(Group, 'testdb-1') as Group:
Group.drop_collection()
Group(name="hello - default").save()
self.assertEqual(1, Group.objects.count())
group = Group.objects.first()
group.switch_db('testdb-1')
group.name = "hello - testdb!"
group.save()
with switch_db(Group, 'testdb-1') as Group:
group = Group.objects.first()
self.assertEqual("hello - testdb!", group.name)
group = Group.objects.first()
self.assertEqual("hello - default", group.name)
# Slightly contrived now - perform an update
# Only works as they have the same object_id
group.switch_db('testdb-1')
group.update(set__name="hello - update")
with switch_db(Group, 'testdb-1') as Group:
group = Group.objects.first()
self.assertEqual("hello - update", group.name)
Group.drop_collection()
self.assertEqual(0, Group.objects.count())
group = Group.objects.first()
self.assertEqual("hello - default", group.name)
# Totally contrived now - perform a delete
# Only works as they have the same object_id
group.switch_db('testdb-1')
group.delete()
with switch_db(Group, 'testdb-1') as Group:
self.assertEqual(0, Group.objects.count())
group = Group.objects.first()
self.assertEqual("hello - default", group.name)
def test_load_undefined_fields(self):
class User(Document):
name = StringField()
User.drop_collection()
User._get_collection().save({
'name': 'John',
'foo': 'Bar',
'data': [1, 2, 3]
})
self.assertRaises(FieldDoesNotExist, User.objects.first)
def test_load_undefined_fields_with_strict_false(self):
class User(Document):
name = StringField()
meta = {'strict': False}
User.drop_collection()
User._get_collection().save({
'name': 'John',
'foo': 'Bar',
'data': [1, 2, 3]
})
user = User.objects.first()
self.assertEqual(user.name, 'John')
self.assertFalse(hasattr(user, 'foo'))
self.assertEqual(user._data['foo'], 'Bar')
self.assertFalse(hasattr(user, 'data'))
self.assertEqual(user._data['data'], [1, 2, 3])
def test_load_undefined_fields_on_embedded_document(self):
class Thing(EmbeddedDocument):
name = StringField()
class User(Document):
name = StringField()
thing = EmbeddedDocumentField(Thing)
User.drop_collection()
User._get_collection().save({
'name': 'John',
'thing': {
'name': 'My thing',
'foo': 'Bar',
'data': [1, 2, 3]
}
})
self.assertRaises(FieldDoesNotExist, User.objects.first)
def test_load_undefined_fields_on_embedded_document_with_strict_false_on_doc(self):
class Thing(EmbeddedDocument):
name = StringField()
class User(Document):
name = StringField()
thing = EmbeddedDocumentField(Thing)
meta = {'strict': False}
User.drop_collection()
User._get_collection().save({
'name': 'John',
'thing': {
'name': 'My thing',
'foo': 'Bar',
'data': [1, 2, 3]
}
})
self.assertRaises(FieldDoesNotExist, User.objects.first)
def test_load_undefined_fields_on_embedded_document_with_strict_false(self):
class Thing(EmbeddedDocument):
name = StringField()
meta = {'strict': False}
class User(Document):
name = StringField()
thing = EmbeddedDocumentField(Thing)
User.drop_collection()
User._get_collection().save({
'name': 'John',
'thing': {
'name': 'My thing',
'foo': 'Bar',
'data': [1, 2, 3]
}
})
user = User.objects.first()
self.assertEqual(user.name, 'John')
self.assertEqual(user.thing.name, 'My thing')
self.assertFalse(hasattr(user.thing, 'foo'))
self.assertEqual(user.thing._data['foo'], 'Bar')
self.assertFalse(hasattr(user.thing, 'data'))
self.assertEqual(user.thing._data['data'], [1, 2, 3])
def test_spaces_in_keys(self):
class Embedded(DynamicEmbeddedDocument):
pass
class Doc(DynamicDocument):
pass
Doc.drop_collection()
doc = Doc()
setattr(doc, 'hello world', 1)
doc.save()
one = Doc.objects.filter(**{'hello world': 1}).count()
self.assertEqual(1, one)
def test_shard_key(self):
class LogEntry(Document):
machine = StringField()
log = StringField()
meta = {
'shard_key': ('machine',)
}
LogEntry.drop_collection()
log = LogEntry()
log.machine = "Localhost"
log.save()
self.assertTrue(log.id is not None)
log.log = "Saving"
log.save()
def change_shard_key():
log.machine = "127.0.0.1"
self.assertRaises(OperationError, change_shard_key)
def test_shard_key_in_embedded_document(self):
class Foo(EmbeddedDocument):
foo = StringField()
class Bar(Document):
meta = {
'shard_key': ('foo.foo',)
}
foo = EmbeddedDocumentField(Foo)
bar = StringField()
foo_doc = Foo(foo='hello')
bar_doc = Bar(foo=foo_doc, bar='world')
bar_doc.save()
self.assertTrue(bar_doc.id is not None)
bar_doc.bar = 'baz'
bar_doc.save()
def change_shard_key():
bar_doc.foo.foo = 'something'
bar_doc.save()
self.assertRaises(OperationError, change_shard_key)
def test_shard_key_primary(self):
class LogEntry(Document):
machine = StringField(primary_key=True)
log = StringField()
meta = {
'shard_key': ('machine',)
}
LogEntry.drop_collection()
log = LogEntry()
log.machine = "Localhost"
log.save()
self.assertTrue(log.id is not None)
log.log = "Saving"
log.save()
def change_shard_key():
log.machine = "127.0.0.1"
self.assertRaises(OperationError, change_shard_key)
def test_kwargs_simple(self):
class Embedded(EmbeddedDocument):
name = StringField()
class Doc(Document):
doc_name = StringField()
doc = EmbeddedDocumentField(Embedded)
def __eq__(self, other):
return (self.doc_name == other.doc_name and
self.doc == other.doc)
classic_doc = Doc(doc_name="my doc", doc=Embedded(name="embedded doc"))
dict_doc = Doc(**{"doc_name": "my doc",
"doc": {"name": "embedded doc"}})
self.assertEqual(classic_doc, dict_doc)
self.assertEqual(classic_doc._data, dict_doc._data)
def test_kwargs_complex(self):
class Embedded(EmbeddedDocument):
name = StringField()
class Doc(Document):
doc_name = StringField()
docs = ListField(EmbeddedDocumentField(Embedded))
def __eq__(self, other):
return (self.doc_name == other.doc_name and
self.docs == other.docs)
classic_doc = Doc(doc_name="my doc", docs=[
Embedded(name="embedded doc1"),
Embedded(name="embedded doc2")])
dict_doc = Doc(**{"doc_name": "my doc",
"docs": [{"name": "embedded doc1"},
{"name": "embedded doc2"}]})
self.assertEqual(classic_doc, dict_doc)
self.assertEqual(classic_doc._data, dict_doc._data)
def test_positional_creation(self):
"""Ensure that document may be created using positional arguments.
"""
person = self.Person("Test User", 42)
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 42)
def test_mixed_creation(self):
"""Ensure that document may be created using mixed arguments.
"""
person = self.Person("Test User", age=42)
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 42)
def test_mixed_creation_dynamic(self):
"""Ensure that document may be created using mixed arguments.
"""
class Person(DynamicDocument):
name = StringField()
person = Person("Test User", age=42)
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 42)
def test_bad_mixed_creation(self):
"""Ensure that document gives correct error when duplicating arguments
"""
def construct_bad_instance():
return self.Person("Test User", 42, name="Bad User")
self.assertRaises(TypeError, construct_bad_instance)
def test_data_contains_id_field(self):
"""Ensure that asking for _data returns 'id'
"""
class Person(Document):
name = StringField()
Person.drop_collection()
Person(name="Harry Potter").save()
person = Person.objects.first()
self.assertTrue('id' in person._data.keys())
self.assertEqual(person._data.get('id'), person.id)
def test_complex_nesting_document_and_embedded_document(self):
class Macro(EmbeddedDocument):
value = DynamicField(default="UNDEFINED")
class Parameter(EmbeddedDocument):
macros = MapField(EmbeddedDocumentField(Macro))
def expand(self):
self.macros["test"] = Macro()
class Node(Document):
parameters = MapField(EmbeddedDocumentField(Parameter))
def expand(self):
self.flattened_parameter = {}
for parameter_name, parameter in self.parameters.iteritems():
parameter.expand()
class NodesSystem(Document):
name = StringField(required=True)
nodes = MapField(ReferenceField(Node, dbref=False))
def save(self, *args, **kwargs):
for node_name, node in self.nodes.iteritems():
node.expand()
node.save(*args, **kwargs)
super(NodesSystem, self).save(*args, **kwargs)
NodesSystem.drop_collection()
Node.drop_collection()
system = NodesSystem(name="system")
system.nodes["node"] = Node()
system.save()
system.nodes["node"].parameters["param"] = Parameter()
system.save()
system = NodesSystem.objects.first()
self.assertEqual(
"UNDEFINED",
system.nodes["node"].parameters["param"].macros["test"].value)
def test_embedded_document_equality(self):
class Test(Document):
field = StringField(required=True)
class Embedded(EmbeddedDocument):
ref = ReferenceField(Test)
Test.drop_collection()
test = Test(field='123').save() # has id
e = Embedded(ref=test)
f1 = Embedded._from_son(e.to_mongo())
f2 = Embedded._from_son(e.to_mongo())
self.assertEqual(f1, f2)
f1.ref # Dereferences lazily
self.assertEqual(f1, f2)
def test_dbref_equality(self):
class Test2(Document):
name = StringField()
class Test3(Document):
name = StringField()
class Test(Document):
name = StringField()
test2 = ReferenceField('Test2')
test3 = ReferenceField('Test3')
Test.drop_collection()
Test2.drop_collection()
Test3.drop_collection()
t2 = Test2(name='a')
t2.save()
t3 = Test3(name='x')
t3.id = t2.id
t3.save()
t = Test(name='b', test2=t2, test3=t3)
f = Test._from_son(t.to_mongo())
dbref2 = f._data['test2']
obj2 = f.test2
self.assertTrue(isinstance(dbref2, DBRef))
self.assertTrue(isinstance(obj2, Test2))
self.assertTrue(obj2.id == dbref2.id)
self.assertTrue(obj2 == dbref2)
self.assertTrue(dbref2 == obj2)
dbref3 = f._data['test3']
obj3 = f.test3
self.assertTrue(isinstance(dbref3, DBRef))
self.assertTrue(isinstance(obj3, Test3))
self.assertTrue(obj3.id == dbref3.id)
self.assertTrue(obj3 == dbref3)
self.assertTrue(dbref3 == obj3)
self.assertTrue(obj2.id == obj3.id)
self.assertTrue(dbref2.id == dbref3.id)
self.assertFalse(dbref2 == dbref3)
self.assertFalse(dbref3 == dbref2)
self.assertTrue(dbref2 != dbref3)
self.assertTrue(dbref3 != dbref2)
self.assertFalse(obj2 == dbref3)
self.assertFalse(dbref3 == obj2)
self.assertTrue(obj2 != dbref3)
self.assertTrue(dbref3 != obj2)
self.assertFalse(obj3 == dbref2)
self.assertFalse(dbref2 == obj3)
self.assertTrue(obj3 != dbref2)
self.assertTrue(dbref2 != obj3)
def test_default_values(self):
class Person(Document):
created_on = DateTimeField(default=lambda: datetime.utcnow())
name = StringField()
p = Person(name='alon')
p.save()
orig_created_on = Person.objects().only('created_on')[0].created_on
p2 = Person.objects().only('name')[0]
p2.name = 'alon2'
p2.save()
p3 = Person.objects().only('created_on')[0]
self.assertEquals(orig_created_on, p3.created_on)
class Person(Document):
created_on = DateTimeField(default=lambda: datetime.utcnow())
name = StringField()
height = IntField(default=189)
p4 = Person.objects()[0]
p4.save()
self.assertEquals(p4.height, 189)
self.assertEquals(Person.objects(height=189).count(), 1)
def test_from_son(self):
# 771
class MyPerson(self.Person):
meta = dict(shard_key=["id"])
p = MyPerson.from_json('{"name": "name", "age": 27}', created=True)
self.assertEquals(p.id, None)
p.id = "12345" # in case it is not working: "OperationError: Shard Keys are immutable..." will be raised here
p = MyPerson._from_son({"name": "name", "age": 27}, created=True)
self.assertEquals(p.id, None)
p.id = "12345" # in case it is not working: "OperationError: Shard Keys are immutable..." will be raised here
def test_null_field(self):
# 734
class User(Document):
name = StringField()
height = IntField(default=184, null=True)
str_fld = StringField(null=True)
int_fld = IntField(null=True)
flt_fld = FloatField(null=True)
dt_fld = DateTimeField(null=True)
cdt_fld = ComplexDateTimeField(null=True)
User.objects.delete()
u = User(name='user')
u.save()
u_from_db = User.objects.get(name='user')
u_from_db.height = None
u_from_db.save()
self.assertEquals(u_from_db.height, None)
# 864
self.assertEqual(u_from_db.str_fld, None)
self.assertEqual(u_from_db.int_fld, None)
self.assertEqual(u_from_db.flt_fld, None)
self.assertEqual(u_from_db.dt_fld, None)
self.assertEqual(u_from_db.cdt_fld, None)
# 735
User.objects.delete()
u = User(name='user')
u.save()
User.objects(name='user').update_one(set__height=None, upsert=True)
u_from_db = User.objects.get(name='user')
self.assertEquals(u_from_db.height, None)
def test_not_saved_eq(self):
"""Ensure we can compare documents not saved.
"""
class Person(Document):
pass
p = Person()
p1 = Person()
self.assertNotEqual(p, p1)
self.assertEqual(p, p)
def test_list_iter(self):
# 914
class B(EmbeddedDocument):
v = StringField()
class A(Document):
l = ListField(EmbeddedDocumentField(B))
A.objects.delete()
A(l=[B(v='1'), B(v='2'), B(v='3')]).save()
a = A.objects.get()
self.assertEqual(a.l._instance, a)
for idx, b in enumerate(a.l):
self.assertEqual(b._instance, a)
self.assertEqual(idx, 2)
if __name__ == '__main__':
unittest.main()
| mit |
magfest/ubersystem | tests/uber/models/test_attendee.py | 1 | 33034 | from datetime import datetime
import pytest
import pytz
from mock import Mock
from pytz import UTC
from uber import config
from uber.config import c
from uber.models import Attendee, Department, DeptMembership, DeptMembershipRequest, DeptRole, FoodRestrictions, \
Group, Job, Session, Shift
from uber.models.commerce import StripeTransaction, StripeTransactionAttendee
from uber.model_checks import extra_donation_valid, _invalid_phone_number
@pytest.fixture()
def dept():
yield Department(
id='97cc0050-11e0-42eb-9a1b-83f27a1acf76',
name='Console Challenges',
description='Console Challenges')
@pytest.fixture()
def shiftless_dept():
yield Department(
id='27152595-2ea8-43ee-8edb-a68cefb2b2ac',
name='Con Ops',
description='Con Ops',
is_shiftless=True)
@pytest.fixture()
def trusted_role(dept):
yield DeptRole(
id='45c3fd2a-df1d-46bd-a10c-7289bbfd1167',
name='Trusted',
description='Trusted',
department=dept)
class TestCosts:
@pytest.fixture(autouse=True)
def mocked_prices(self, monkeypatch):
monkeypatch.setattr(c, 'get_oneday_price', Mock(return_value=10))
monkeypatch.setattr(c, 'get_attendee_price', Mock(return_value=20))
def test_badge_cost(self):
assert 10 == Attendee(badge_type=c.ONE_DAY_BADGE).badge_cost
assert 20 == Attendee().badge_cost
assert 30 == Attendee(overridden_price=30).badge_cost
assert 0 == Attendee(paid=c.NEED_NOT_PAY).badge_cost
assert 20 == Attendee(paid=c.PAID_BY_GROUP).badge_cost
assert 30 == Attendee(base_badge_price=30).badge_cost
def test_total_cost(self):
assert 20 == Attendee().total_cost
assert 25 == Attendee(amount_extra=5).total_cost
def test_amount_unpaid(self, monkeypatch):
monkeypatch.setattr(Attendee, 'total_cost', 50)
assert 50 == Attendee().amount_unpaid
assert 10 == Attendee(amount_paid=40).amount_unpaid
assert 0 == Attendee(amount_paid=50).amount_unpaid
assert 0 == Attendee(amount_paid=51).amount_unpaid
def test_age_discount(self, monkeypatch):
monkeypatch.setattr(Attendee, 'age_group_conf', {'discount': 5})
assert 15 == Attendee().total_cost
assert 20 == Attendee(amount_extra=5).total_cost
assert 10 == Attendee(overridden_price=10).total_cost
assert 15 == Attendee(overridden_price=10, amount_extra=5).total_cost
def test_age_free(self, monkeypatch):
# makes badge_cost free unless overridden_price is set
monkeypatch.setattr(Attendee, 'age_group_conf', {'discount': 999})
assert 0 == Attendee().total_cost
assert 5 == Attendee(amount_extra=5).total_cost
assert 10 == Attendee(overridden_price=10).total_cost
assert 15 == Attendee(overridden_price=10, amount_extra=5).total_cost
def test_age_discount_doesnt_stack(self, monkeypatch):
monkeypatch.setattr(Attendee, 'age_group_conf', {'discount': 5})
assert 10 == Attendee(badge_type=c.ONE_DAY_BADGE).badge_cost
class TestHalfPriceAgeDiscountCosts:
@pytest.fixture(autouse=True)
def mocked_prices(self, monkeypatch):
monkeypatch.setattr(c, 'get_oneday_price', Mock(return_value=10))
monkeypatch.setattr(c, 'get_attendee_price', Mock(return_value=40))
def test_half_price_discount(self):
# Age group discount not set: badge is half off
assert 20 == Attendee(age_group=c.UNDER_13).badge_cost
def test_half_price_overrides_age_discount(self, monkeypatch):
# Age group discount is less than half off: badge is half off
monkeypatch.setattr(Attendee, 'age_group_conf', {'val': c.UNDER_13, 'discount': 5})
assert 20 == Attendee(age_group=c.UNDER_13).badge_cost
def test_age_discount_overrides_half_price(self, monkeypatch):
# Age group discount is greater than half off: badge price based on age discount instead
monkeypatch.setattr(Attendee, 'age_group_conf', {'val': c.UNDER_13, 'discount': 30})
assert 10 == Attendee(age_group=c.UNDER_13).badge_cost
def test_is_unpaid():
assert Attendee().is_unpaid
assert Attendee(paid=c.NOT_PAID).is_unpaid
for status in [c.NEED_NOT_PAY, c.PAID_BY_GROUP, c.REFUNDED]:
assert not Attendee(paid=status).is_unpaid
# we may eventually want to make this a little more explicit;
# at the moment I'm basically just testing an implementation detail
def test_is_unassigned():
assert Attendee().is_unassigned
assert not Attendee(first_name='x').is_unassigned
def test_is_dealer():
assert not Attendee().is_dealer
assert Attendee(ribbon=c.DEALER_RIBBON).is_dealer
assert Attendee(badge_type=c.PSEUDO_DEALER_BADGE).is_dealer
# not all attendees in a dealer group are necessarily dealers
dealer_group = Group(tables=1)
assert not Attendee(group=dealer_group).is_dealer
assert Attendee(group=dealer_group, paid=c.PAID_BY_GROUP).is_dealer
def test_is_dept_head():
assert not Attendee().is_dept_head
dept_membership = DeptMembership(is_dept_head=True)
assert Attendee(dept_memberships=[dept_membership]).is_dept_head
def test_dept_head_ribbon_label_from_ribbon_attr():
a = Attendee()
assert a.ribbon_labels == []
a.ribbon = '{}'.format(c.DEPT_HEAD_RIBBON)
assert a.ribbon_labels == ['Department Head']
a.ribbon = '{},{}'.format(c.VOLUNTEER_RIBBON, c.DEPT_HEAD_RIBBON)
assert a.ribbon_labels == ['Department Head', 'Volunteer']
a.ribbon = '{}'.format(c.VOLUNTEER_RIBBON)
assert a.ribbon_labels == ['Volunteer']
def test_dept_head_ribbon_label_from_dept_membership():
with Session() as session:
a = Attendee()
session.add(a)
a.presave_adjustments()
assert a.ribbon_labels == []
a.dept_memberships = [DeptMembership(is_dept_head=True)]
a.presave_adjustments()
assert a.ribbon_labels == ['Department Head']
a.presave_adjustments()
assert a.ribbon_labels == ['Department Head']
a.badge_type = c.ATTENDEE_BADGE
a.staffing = True
a.ribbon = '{}'.format(c.DEALER_RIBBON)
a.presave_adjustments()
assert set(a.ribbon_labels) == set(['Department Head', 'Shopkeep'])
a.presave_adjustments()
assert set(a.ribbon_labels) == set(['Department Head', 'Shopkeep'])
a.dept_memberships = [DeptMembership(is_dept_head=False)]
a.presave_adjustments()
assert set(a.ribbon_labels) == set(['Department Head', 'Shopkeep'])
a.presave_adjustments()
assert set(a.ribbon_labels) == set(['Department Head', 'Shopkeep'])
session.expunge_all()
def test_unassigned_name(monkeypatch):
monkeypatch.setattr(Attendee, 'badge', 'BadgeType')
assert not Attendee().unassigned_name
assert not Attendee(group_id=1, first_name='x').unassigned_name
assert '[Unassigned BadgeType]' == Attendee(group_id=1).unassigned_name
def test_full_name(monkeypatch):
assert 'x y' == Attendee(first_name='x', last_name='y').full_name
monkeypatch.setattr(Attendee, 'unassigned_name', 'xxx')
assert 'xxx' == Attendee(first_name='x', last_name='y').full_name
def test_last_first(monkeypatch):
assert 'y, x' == Attendee(first_name='x', last_name='y').last_first
monkeypatch.setattr(Attendee, 'unassigned_name', 'xxx')
assert 'xxx' == Attendee(first_name='x', last_name='y').last_first
def test_legal_name_same_as_full_name():
same_legal_name = Attendee(first_name='First', last_name='Last', legal_name='First Last')
same_legal_name._misc_adjustments()
assert '' == same_legal_name.legal_name
def test_legal_name_diff_from_full_name():
diff_legal_name = Attendee(first_name='first', last_name='last', legal_name='diff name')
diff_legal_name._misc_adjustments()
assert 'diff name' == diff_legal_name.legal_name
def test_badge():
assert Attendee().badge == 'Unpaid Attendee'
assert Attendee(paid=c.HAS_PAID).badge == 'Attendee'
assert Attendee(badge_num=123).badge == 'Unpaid Attendee'
assert Attendee(badge_num=123, paid=c.HAS_PAID).badge == 'Attendee #123'
assert Attendee(ribbon=c.VOLUNTEER_RIBBON).badge == 'Unpaid Attendee (Volunteer)'
def test_is_transferable(monkeypatch):
assert not Attendee(paid=c.HAS_PAID).is_transferable
monkeypatch.setattr(Attendee, 'is_new', False)
assert Attendee(paid=c.HAS_PAID).is_transferable
assert Attendee(paid=c.PAID_BY_GROUP).is_transferable
assert not Attendee(paid=c.NOT_PAID).is_transferable
assert not Attendee(paid=c.HAS_PAID, checked_in=datetime.now(UTC)).is_transferable
assert not Attendee(paid=c.HAS_PAID, badge_type=c.STAFF_BADGE).is_transferable
assert not Attendee(paid=c.HAS_PAID, badge_type=c.GUEST_BADGE).is_transferable
def test_is_not_transferable_trusted(monkeypatch, dept, trusted_role):
monkeypatch.setattr(Attendee, 'is_new', False)
with Session() as session:
attendee = Attendee(paid=c.HAS_PAID)
dept_membership = DeptMembership(
attendee=attendee,
department=dept,
dept_roles=[trusted_role])
session.add_all([attendee, dept, trusted_role, dept_membership])
session.flush()
assert not attendee.is_transferable
@pytest.mark.parametrize('open,expected', [
(lambda s: False, False),
(lambda s: True, True),
])
def test_self_service_refunds_if_on(monkeypatch, open, expected):
monkeypatch.setattr(config.Config, 'SELF_SERVICE_REFUNDS_OPEN',
property(open))
attendee = Attendee(paid=c.HAS_PAID, amount_paid=10)
txn = StripeTransaction(amount=1000)
attendee.stripe_txn_share_logs = [
StripeTransactionAttendee(attendee_id=attendee.id, txn_id=txn.id, share=1000)]
assert attendee.can_self_service_refund_badge == expected
@pytest.mark.parametrize('paid,expected', [
(c.NEED_NOT_PAY, False),
(c.REFUNDED, False),
(c.NOT_PAID, True),
(c.PAID_BY_GROUP, True),
(c.HAS_PAID, True)
])
def test_self_service_refunds_payment_status(monkeypatch, paid, expected):
monkeypatch.setattr(config.Config, 'SELF_SERVICE_REFUNDS_OPEN',
property(lambda s: True))
attendee = Attendee(paid=paid, amount_paid=10)
txn = StripeTransaction(amount=1000)
attendee.stripe_txn_share_logs = [
StripeTransactionAttendee(attendee_id=attendee.id, txn_id=txn.id, share=1000)]
assert attendee.can_self_service_refund_badge == expected
@pytest.mark.parametrize('amount_paid,checked_in,expected', [
(0, False, False),
(-10, False, False),
(None, False, None),
(10, True, False),
(10, False, True),
])
def test_self_service_refunds_misc(monkeypatch, amount_paid, checked_in, expected):
monkeypatch.setattr(config.Config, 'SELF_SERVICE_REFUNDS_OPEN',
property(lambda s: True))
attendee = Attendee(paid=c.HAS_PAID, amount_paid=amount_paid)
txn = StripeTransaction(amount=1000)
attendee.stripe_txn_share_logs = [
StripeTransactionAttendee(attendee_id=attendee.id, txn_id=txn.id, share=1000)]
attendee.checked_in = checked_in
assert attendee.can_self_service_refund_badge == expected
def test_self_service_refunds_no_stripe(monkeypatch):
monkeypatch.setattr(config.Config, 'SELF_SERVICE_REFUNDS_OPEN',
property(lambda s: True))
attendee = Attendee(paid=c.HAS_PAID, amount_paid=10)
attendee.stripe_txn_share_logs = []
assert not attendee.can_self_service_refund_badge
def test_self_service_refunds_group_leader(monkeypatch):
monkeypatch.setattr(config.Config, 'SELF_SERVICE_REFUNDS_OPEN',
property(lambda s: True))
attendee = Attendee(paid=c.HAS_PAID, amount_paid=10)
attendee.group = Group(leader_id=attendee.id)
txn = StripeTransaction(amount=1000)
attendee.stripe_txn_share_logs = [
StripeTransactionAttendee(attendee_id=attendee.id, txn_id=txn.id, share=1000)]
assert not attendee.can_self_service_refund_badge
def test_has_role_somewhere(dept, trusted_role):
with Session() as session:
attendee = Attendee(paid=c.HAS_PAID)
dept_membership = DeptMembership(
attendee=attendee,
department=dept,
dept_roles=[trusted_role])
session.add_all([attendee, dept, trusted_role, dept_membership])
session.flush()
assert attendee.has_role_somewhere
dept_membership.dept_roles = []
session.flush()
session.refresh(attendee)
assert not attendee.has_role_somewhere
def test_requested_any_dept():
dept1 = Department(name='Dept1', description='Dept1')
dept2 = Department(name='Dept2', description='Dept2')
volunteer = Attendee(paid=c.HAS_PAID, first_name='V', last_name='One')
volunteer.dept_membership_requests = [
DeptMembershipRequest(attendee=volunteer)]
with Session() as session:
session.add_all([dept1, dept2, volunteer])
session.commit()
session.refresh(volunteer)
all_depts = session.query(Department).order_by(Department.name).all()
assert all_depts == volunteer.requested_depts
def test_must_contact():
dept1 = Department(name='Dept1', description='Dept1')
dept2 = Department(name='Dept2', description='Dept2')
poc_dept1 = Attendee(
paid=c.NEED_NOT_PAY, first_name='Poc', last_name='Dept1')
poc_dept2 = Attendee(
paid=c.NEED_NOT_PAY, first_name='Poc', last_name='Dept2')
poc_both = Attendee(
paid=c.NEED_NOT_PAY, first_name='Poc', last_name='Both')
poc_dept1.dept_memberships = [DeptMembership(
department=dept1,
is_poc=True)]
poc_dept2.dept_memberships = [DeptMembership(
department=dept2,
is_poc=True)]
poc_both.dept_memberships = [
DeptMembership(
department=dept1,
is_poc=True),
DeptMembership(
department=dept2,
is_poc=True)]
start_time = datetime.now(tz=pytz.UTC)
job1 = Job(
name='Job1',
description='Job1',
start_time=start_time,
duration=1,
weight=1,
slots=1,
department=dept1)
job2 = Job(
name='Job2',
description='Job2',
start_time=start_time,
duration=1,
weight=1,
slots=1,
department=dept2)
volunteer = Attendee(paid=c.HAS_PAID, first_name='V', last_name='One')
job1.shifts = [Shift(attendee=volunteer, job=job1)]
job2.shifts = [Shift(attendee=volunteer, job=job2)]
with Session() as session:
session.add_all([
dept1, dept2, poc_dept1, poc_dept2, poc_both, job1, job2,
volunteer])
session.commit()
assert volunteer.must_contact == '(Dept1) Poc Both / Poc Dept1<br/>(Dept2) Poc Both / Poc Dept2'
def test_has_personalized_badge():
assert not Attendee().has_personalized_badge
assert Attendee(badge_type=c.STAFF_BADGE).has_personalized_badge
assert Attendee(badge_type=c.CONTRACTOR_BADGE).has_personalized_badge
for badge_type in [c.ATTENDEE_BADGE, c.ONE_DAY_BADGE, c.GUEST_BADGE]:
assert not Attendee(badge_type=badge_type).has_personalized_badge
def test_takes_shifts(dept, shiftless_dept):
assert not Attendee().takes_shifts
assert not Attendee(staffing=True).takes_shifts
assert Attendee(staffing=True, assigned_depts=[dept]).takes_shifts
assert not Attendee(staffing=True, assigned_depts=[shiftless_dept]).takes_shifts
assert Attendee(staffing=True, assigned_depts=[dept, shiftless_dept]).takes_shifts
class TestAttendeeFoodRestrictionsFilledOut:
@pytest.fixture
def staff_get_food_true(self, monkeypatch):
monkeypatch.setattr(config.Config, 'STAFF_GET_FOOD', property(lambda x: True))
assert c.STAFF_GET_FOOD
@pytest.fixture
def staff_get_food_false(self, monkeypatch):
monkeypatch.setattr(config.Config, 'STAFF_GET_FOOD', property(lambda x: False))
assert not c.STAFF_GET_FOOD
def test_food_restrictions_filled_out(self, staff_get_food_true):
assert Attendee(food_restrictions=FoodRestrictions()).food_restrictions_filled_out
def test_food_restrictions_not_filled_out(self, staff_get_food_true):
assert not Attendee().food_restrictions_filled_out
def test_food_restrictions_not_needed(self, staff_get_food_false):
assert Attendee().food_restrictions_filled_out
def test_shift_prereqs_complete(self, staff_get_food_true):
assert Attendee(placeholder=False, shirt=1, food_restrictions=FoodRestrictions()).shift_prereqs_complete
def test_shift_prereqs_placeholder(self, staff_get_food_true):
assert not Attendee(placeholder=True, shirt=1, food_restrictions=FoodRestrictions()).shift_prereqs_complete
def test_shift_prereqs_no_shirt(self, staff_get_food_true):
assert not Attendee(
placeholder=False, shirt=c.NO_SHIRT, food_restrictions=FoodRestrictions()).shift_prereqs_complete
assert not Attendee(
placeholder=False, shirt=c.SIZE_UNKNOWN, food_restrictions=FoodRestrictions()).shift_prereqs_complete
def test_shift_prereqs_no_food(self, staff_get_food_true):
assert not Attendee(placeholder=False, shirt=1).shift_prereqs_complete
def test_shift_prereqs_food_not_needed(self, staff_get_food_false):
assert Attendee(placeholder=False, shirt=1).shift_prereqs_complete
class TestUnsetVolunteer:
def test_basic(self, dept, trusted_role):
a = Attendee(
staffing=True,
requested_depts=[dept],
ribbon=c.VOLUNTEER_RIBBON,
shifts=[Shift()])
a.dept_memberships = [DeptMembership(
attendee=a,
department=dept,
dept_roles=[trusted_role])]
a.assigned_depts = [dept]
a.unset_volunteering()
assert not a.staffing
assert not a.has_role_somewhere
assert not a.requested_depts
assert not a.dept_memberships
assert not a.shifts
assert a.ribbon == ''
def test_different_ribbon(self):
a = Attendee(ribbon=c.DEALER_RIBBON)
a.unset_volunteering()
assert c.DEALER_RIBBON in a.ribbon_ints
def test_staff_badge(self, monkeypatch):
with Session() as session:
assert session
monkeypatch.setattr(Attendee, 'session', Mock())
a = Attendee(badge_type=c.STAFF_BADGE, badge_num=123)
a.unset_volunteering()
assert a.badge_type == c.ATTENDEE_BADGE and a.badge_num is None
def test_affiliate_with_extra(self):
a = Attendee(affiliate='xxx', amount_extra=1)
a._misc_adjustments()
assert a.affiliate == 'xxx'
def test_affiliate_without_extra(self):
a = Attendee(affiliate='xxx')
a._misc_adjustments()
assert a.affiliate == ''
def test_amount_refunded_when_refunded(self):
a = Attendee(amount_refunded=123, paid=c.REFUNDED)
a._misc_adjustments()
assert a.amount_refunded == 123
def test_amount_refunded_when_not_refunded(self):
a = Attendee(amount_refunded=123)
a._misc_adjustments()
assert not a.amount_refunded
def test_badge_precon(self):
a = Attendee(badge_num=1)
a._misc_adjustments()
assert not a.checked_in
def test_badge_at_con(self, monkeypatch, at_con):
a = Attendee()
a._misc_adjustments()
assert not a.checked_in
a = Attendee(badge_num=1)
a._misc_adjustments()
assert a.checked_in
a = Attendee(badge_num=1, badge_type=c.PREASSIGNED_BADGE_TYPES[0])
a._misc_adjustments()
assert not a.checked_in
monkeypatch.setattr(Attendee, 'is_new', False)
a = Attendee(badge_num=1)
a._misc_adjustments()
assert not a.checked_in
def test_names(self):
a = Attendee(first_name='nac', last_name='mac Feegle')
a._misc_adjustments()
assert a.full_name == 'Nac mac Feegle'
a = Attendee(first_name='NAC', last_name='mac feegle')
a._misc_adjustments()
assert a.full_name == 'Nac Mac Feegle'
class TestStaffingAdjustments:
@pytest.fixture(autouse=True)
def unset_volunteering(self, monkeypatch):
monkeypatch.setattr(Attendee, 'unset_volunteering', Mock())
return Attendee.unset_volunteering
@pytest.fixture(autouse=True)
def prevent_presave_adjustments(self, monkeypatch):
""" Prevent some tests from crashing on exit by not invoking presave_adjustements() """
monkeypatch.setattr(Attendee, 'presave_adjustments', Mock())
return Attendee.presave_adjustments
def test_dept_head_invariants(self, dept):
dept_membership = DeptMembership(
department=dept,
is_dept_head=True)
a = Attendee(dept_memberships=[dept_membership])
a._staffing_adjustments()
assert a.staffing
assert a.badge_type == c.STAFF_BADGE
def test_staffing_still_trusted_assigned(self, dept, shiftless_dept):
"""
After applying staffing adjustements:
Any depts you are both trusted and assigned to should remain unchanged
"""
a = Attendee(staffing=True)
dept_memberships = [
DeptMembership(
attendee=a,
attendee_id=a.id,
department=dept,
department_id=dept.id,
is_dept_head=True),
DeptMembership(
attendee=a,
attendee_id=a.id,
department=shiftless_dept,
department_id=shiftless_dept.id,
dept_roles=[DeptRole()])]
a.assigned_depts = [dept, shiftless_dept]
a.dept_memberships_with_role = dept_memberships
a._staffing_adjustments()
assert a.assigned_to(dept) and a.trusted_in(dept)
assert a.assigned_to(shiftless_dept) and a.trusted_in(shiftless_dept)
def test_unpaid_dept_head(self, dept):
dept_membership = DeptMembership(
department=dept,
is_dept_head=True)
a = Attendee(dept_memberships=[dept_membership])
a._staffing_adjustments()
assert a.paid == c.NEED_NOT_PAY
def test_under_18_at_con(self, at_con, unset_volunteering):
a = Attendee(age_group=c.UNDER_18)
a._staffing_adjustments()
assert not unset_volunteering.called
def test_staffers_need_no_volunteer_ribbon(self):
a = Attendee(badge_type=c.STAFF_BADGE, ribbon=c.VOLUNTEER_RIBBON)
a._staffing_adjustments()
assert a.ribbon == ''
def test_staffers_can_have_other_ribbons(self):
a = Attendee(badge_type=c.STAFF_BADGE, ribbon=c.DEALER_RIBBON)
a._staffing_adjustments()
assert c.DEALER_RIBBON in a.ribbon_ints
def test_no_to_yes_ribbon(self, unset_volunteering, prevent_presave_adjustments):
with Session() as session:
a = session.attendee(first_name='Regular', last_name='Attendee')
a.ribbon = c.VOLUNTEER_RIBBON
a._staffing_adjustments()
assert a.staffing
assert not unset_volunteering.called
def test_no_to_yes_volunteering(self, unset_volunteering, prevent_presave_adjustments):
with Session() as session:
a = session.attendee(first_name='Regular', last_name='Attendee')
a.staffing = True
a._staffing_adjustments()
assert a.ribbon_ints == [c.VOLUNTEER_RIBBON]
assert not unset_volunteering.called
def test_yes_to_no_ribbon(self, unset_volunteering, prevent_presave_adjustments):
with Session() as session:
a = session.attendee(first_name='Regular', last_name='Volunteer')
a.ribbon = ''
a._staffing_adjustments()
assert unset_volunteering.called
def test_yes_to_no_volunteering(self, unset_volunteering, prevent_presave_adjustments):
with Session() as session:
a = session.attendee(first_name='Regular', last_name='Volunteer')
a.staffing = False
a._staffing_adjustments()
assert unset_volunteering.called
class TestBadgeAdjustments:
@pytest.fixture(autouse=True)
def mock_attendee_session(self, monkeypatch):
monkeypatch.setattr(Attendee, 'session', Mock())
Attendee.session.get_next_badge_num = Mock(return_value=123)
@pytest.fixture
def fully_paid(self, monkeypatch):
monkeypatch.setattr(Attendee, 'paid', c.HAS_PAID)
monkeypatch.setattr(Attendee, 'amount_unpaid', 0)
def test_group_to_attendee(self):
a = Attendee(badge_type=c.PSEUDO_GROUP_BADGE)
a._badge_adjustments()
assert a.badge_type == c.ATTENDEE_BADGE and a.ribbon == ''
def test_dealer_to_attendee(self):
a = Attendee(badge_type=c.PSEUDO_DEALER_BADGE)
a._badge_adjustments()
assert a.badge_type == c.ATTENDEE_BADGE and a.ribbon_ints == [c.DEALER_RIBBON]
class TestStatusAdjustments:
def test_set_paid_to_complete(self):
a = Attendee(paid=c.HAS_PAID, badge_status=c.NEW_STATUS, first_name='Paid', placeholder=False)
a._status_adjustments()
assert a.badge_status == c.COMPLETED_STATUS
def test_set_comped_to_complete(self):
a = Attendee(paid=c.NEED_NOT_PAY, badge_status=c.NEW_STATUS, first_name='Paid', placeholder=False)
a._status_adjustments()
assert a.badge_status == c.COMPLETED_STATUS
def test_set_group_paid_to_complete(self, monkeypatch):
monkeypatch.setattr(Group, 'amount_unpaid', 0)
g = Group()
a = Attendee(
paid=c.PAID_BY_GROUP,
badge_status=c.NEW_STATUS,
first_name='Paid',
placeholder=False,
group=g,
group_id=g.id)
a._status_adjustments()
assert a.badge_status == c.COMPLETED_STATUS
def test_unpaid_group_not_completed(self, monkeypatch):
monkeypatch.setattr(Group, 'amount_unpaid', 100)
g = Group()
a = Attendee(paid=c.PAID_BY_GROUP, badge_status=c.NEW_STATUS, first_name='Paid', placeholder=False, group=g)
a._status_adjustments()
assert a.badge_status == c.NEW_STATUS
def test_placeholder_not_completed(self):
a = Attendee(paid=c.NEED_NOT_PAY, badge_status=c.NEW_STATUS, first_name='Paid', placeholder=True)
a._status_adjustments()
assert a.badge_status == c.NEW_STATUS
def test_unassigned_not_completed(self):
a = Attendee(paid=c.NEED_NOT_PAY, badge_status=c.NEW_STATUS, first_name='')
a._status_adjustments()
assert a.badge_status == c.NEW_STATUS
def test_banned_to_deferred(self, monkeypatch):
a = Attendee(paid=c.HAS_PAID, badge_status=c.NEW_STATUS, first_name='Paid', placeholder=False)
monkeypatch.setattr(Attendee, 'banned', True)
a._status_adjustments()
assert a.badge_status == c.WATCHED_STATUS
class TestLookupAttendee:
@pytest.fixture(autouse=True)
def searchable(self):
with Session() as session:
attendee = Attendee(
placeholder=True,
first_name='Searchable',
last_name='Attendee',
email='searchable@example.com',
zip_code='12345'
)
session.add(attendee)
session.add(Attendee(
placeholder=True,
first_name='Two First',
last_name='Names',
email='searchable@example.com',
zip_code='12345'
))
session.add(Attendee(
placeholder=True,
first_name='Two',
last_name='Last Names',
email='searchable@example.com',
zip_code='12345'
))
for status in [c.NEW_STATUS, c.INVALID_STATUS, c.REFUNDED_STATUS]:
session.add(Attendee(
placeholder=True,
first_name='Duplicate',
last_name=c.BADGE_STATUS[status],
email='duplicate@example.com',
zip_code='12345',
badge_status=status
))
session.add(Attendee(
placeholder=True,
first_name='Duplicate',
last_name=c.BADGE_STATUS[status],
email='duplicate@example.com',
zip_code='12345',
badge_status=c.COMPLETED_STATUS
))
return attendee.id
def test_search_not_found(self):
with Session() as session:
pytest.raises(
ValueError, session.lookup_attendee, 'Searchable', 'Attendee', 'searchable@example.com', 'xxxxx')
pytest.raises(ValueError, session.lookup_attendee, 'XXX', 'XXX', 'searchable@example.com', '12345')
pytest.raises(ValueError, session.lookup_attendee, 'Searchable', 'Attendee', 'xxx', '12345')
def test_search_basic(self, searchable):
with Session() as session:
assert str(searchable) == session.lookup_attendee(
'Searchable', 'Attendee', 'searchable@example.com', '12345').id
def test_search_case_insensitive(self, searchable):
with Session() as session:
assert str(searchable) == session.lookup_attendee(
'searchablE', 'attendeE', 'seArchAble@exAmple.com', '12345').id
def test_search_multi_word_names(self):
with Session() as session:
assert session.lookup_attendee('Two First', 'Names', 'searchable@example.com', '12345')
assert session.lookup_attendee('Two', 'Last Names', 'searchable@example.com', '12345')
def test_search_ordered_by_badge_status(self):
with Session() as session:
for status in [c.NEW_STATUS, c.INVALID_STATUS, c.REFUNDED_STATUS]:
attendee = session.lookup_attendee(
'Duplicate', c.BADGE_STATUS[status], 'duplicate@example.com', '12345')
assert attendee.badge_status == c.COMPLETED_STATUS
class TestExtraDonationValidations:
def test_extra_donation_nan(self):
assert "What you entered for Extra Donation (blah) isn't even a number" \
== extra_donation_valid(Attendee(extra_donation="blah"))
def test_extra_donation_below_zero(self):
assert "Extra Donation must be a number that is 0 or higher." \
== extra_donation_valid(Attendee(extra_donation=-10))
def test_extra_donation_valid(self):
assert None is extra_donation_valid(Attendee(extra_donation=10))
class TestPhoneNumberValidations:
@pytest.mark.parametrize('number', [
# valid US numbers
'7031234567',
'703 123 4567',
'(641) 123 4567',
'803-123-4567',
'(210)123-4567',
'12071234567',
'(202)fox-trot',
'+1 (202) 123-4567',
# valid international numbers
# all international numbers must have a leading +
'+44 20 7946 0974',
'+442079460974',
'+44 7700 900927',
'+61 491 570 156',
'+36 55 889 752',
'+353 20 914 9510',
'+49 033933-88213'
])
def test_valid_number(self, number):
assert not _invalid_phone_number(number)
@pytest.mark.parametrize('number', [
# invalid US numbers
# missing digits
'304123456',
'(864) 123 456',
'228-12-4567',
# too many digits
'405 123 45678',
'701 1234 4567',
# invalid characters
'f',
'404\\404 4040',
# normally a valid US number, but we want the area code
'123-4567',
# invalid international numbers
'+1234567890',
'+41458d98e5',
'+44,4930222'
])
def test_invalid_number(selfself, number):
assert _invalid_phone_number(number)
class TestNormalizedEmail:
def test_good_email(self):
attendee = Attendee(email='joe@gmail.com')
assert attendee.normalized_email == 'joe@gmailcom'
def test_dots(self):
attendee = Attendee(email='j.o.e@gmail.com')
assert attendee.normalized_email == 'joe@gmailcom'
def test_capitalized_beginning(self):
attendee = Attendee(email='JOE@gmail.com')
assert attendee.normalized_email == 'joe@gmailcom'
def test_capitalized_end(self):
attendee = Attendee(email='joe@GMAIL.COM')
assert attendee.normalized_email == 'joe@gmailcom'
def test_alternating_caps(self):
attendee = Attendee(email='jOe@GmAiL.cOm')
assert attendee.normalized_email == 'joe@gmailcom'
def test_empty_string(self):
attendee = Attendee(email='')
assert attendee.normalized_email == ''
| agpl-3.0 |
achang97/YouTunes | lib/python2.7/site-packages/youtube_dl/extractor/urplay.py | 50 | 2304 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class URPlayIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?ur(?:play|skola)\.se/(?:program|Produkter)/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://urplay.se/program/190031-tripp-trapp-trad-sovkudde',
'md5': 'ad5f0de86f16ca4c8062cd103959a9eb',
'info_dict': {
'id': '190031',
'ext': 'mp4',
'title': 'Tripp, Trapp, Träd : Sovkudde',
'description': 'md5:b86bffdae04a7e9379d1d7e5947df1d1',
},
}, {
'url': 'http://urskola.se/Produkter/155794-Smasagor-meankieli-Grodan-i-vida-varlden',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
urplayer_data = self._parse_json(self._search_regex(
r'urPlayer\.init\(({.+?})\);', webpage, 'urplayer data'), video_id)
host = self._download_json('http://streaming-loadbalancer.ur.se/loadbalancer.json', video_id)['redirect']
formats = []
for quality_attr, quality, preference in (('', 'sd', 0), ('_hd', 'hd', 1)):
file_http = urplayer_data.get('file_http' + quality_attr) or urplayer_data.get('file_http_sub' + quality_attr)
if file_http:
formats.extend(self._extract_wowza_formats(
'http://%s/%splaylist.m3u8' % (host, file_http), video_id, skip_protocols=['rtmp', 'rtsp']))
self._sort_formats(formats)
subtitles = {}
for subtitle in urplayer_data.get('subtitles', []):
subtitle_url = subtitle.get('file')
kind = subtitle.get('kind')
if not subtitle_url or (kind and kind != 'captions'):
continue
subtitles.setdefault(subtitle.get('label', 'Svenska'), []).append({
'url': subtitle_url,
})
return {
'id': video_id,
'title': urplayer_data['title'],
'description': self._og_search_description(webpage),
'thumbnail': urplayer_data.get('image'),
'series': urplayer_data.get('series_title'),
'subtitles': subtitles,
'formats': formats,
}
| mit |
eduNEXT/edx-platform | lms/djangoapps/course_api/blocks/transformers/tests/test_extra_fields.py | 4 | 1859 | """
Tests for ExtraFieldsTransformer.
"""
from django.test import override_settings
# pylint: disable=protected-access
from openedx.core.djangoapps.content.block_structure.factory import BlockStructureFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import SampleCourseFactory
from ..extra_fields import ExtraFieldsTransformer
@override_settings(COURSE_BLOCKS_API_EXTRA_FIELDS=[('course', 'other_course_settings')])
class TestExtraFieldsTransformer(ModuleStoreTestCase):
"""
Test proper behavior for ExtraFieldsTransformer
"""
shard = 4
OTHER_COURSE_SETTINGS_DEFAULT = {
'test key': 'test value',
'jackson 5': [
['a', 'b', 'c'],
'it\'s easy as',
[1, 2, 3],
'as simple as',
['do', 're', 'mi']
]
}
def setUp(self):
super().setUp()
self.course = SampleCourseFactory.create(
other_course_settings=self.OTHER_COURSE_SETTINGS_DEFAULT
)
self.course_key = self.course.id
self.course_usage_key = self.store.make_course_usage_key(self.course_key)
self.block_structure = BlockStructureFactory.create_from_modulestore(self.course_usage_key, self.store)
def test_transform(self):
# collect phase
ExtraFieldsTransformer.collect(self.block_structure)
self.block_structure._collect_requested_xblock_fields()
# transform phase
ExtraFieldsTransformer().transform(
usage_info=None,
block_structure=self.block_structure,
)
block_data = self.block_structure.get_transformer_block_data(
self.course_usage_key, ExtraFieldsTransformer,
)
assert block_data.other_course_settings == self.OTHER_COURSE_SETTINGS_DEFAULT
| agpl-3.0 |
nomadcube/scikit-learn | sklearn/covariance/robust_covariance.py | 198 | 29735 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, default None
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
cyberphox/MissionPlanner | Lib/site-packages/numpy/distutils/fcompiler/lahey.py | 94 | 1368 | import os
from numpy.distutils.fcompiler import FCompiler
compilers = ['LaheyFCompiler']
class LaheyFCompiler(FCompiler):
compiler_type = 'lahey'
description = 'Lahey/Fujitsu Fortran 95 Compiler'
version_pattern = r'Lahey/Fujitsu Fortran 95 Compiler Release (?P<version>[^\s*]*)'
executables = {
'version_cmd' : ["<F90>", "--version"],
'compiler_f77' : ["lf95", "--fix"],
'compiler_fix' : ["lf95", "--fix"],
'compiler_f90' : ["lf95"],
'linker_so' : ["lf95","-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = None #XXX Fix me
module_include_switch = None #XXX Fix me
def get_flags_opt(self):
return ['-O']
def get_flags_debug(self):
return ['-g','--chk','--chkglobal']
def get_library_dirs(self):
opt = []
d = os.environ.get('LAHEY')
if d:
opt.append(os.path.join(d,'lib'))
return opt
def get_libraries(self):
opt = []
opt.extend(['fj9f6', 'fj9i6', 'fj9ipp', 'fj9e6'])
return opt
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='lahey')
compiler.customize()
print(compiler.get_version())
| gpl-3.0 |
pcu4dros/pandora-core | workspace/lib/python3.5/site-packages/alembic/testing/exclusions.py | 3 | 12818 | # testing/exclusions.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
this should be removable when Alembic targets SQLAlchemy 1.0.0
"""
import operator
from .plugin.plugin_base import SkipTest
from sqlalchemy.util import decorator
from . import config
from sqlalchemy import util
from ..util import compat
import inspect
import contextlib
from .compat import get_url_driver_name, get_url_backend_name
def skip_if(predicate, reason=None):
rule = compound()
pred = _as_predicate(predicate, reason)
rule.skips.add(pred)
return rule
def fails_if(predicate, reason=None):
rule = compound()
pred = _as_predicate(predicate, reason)
rule.fails.add(pred)
return rule
class compound(object):
def __init__(self):
self.fails = set()
self.skips = set()
self.tags = set()
def __add__(self, other):
return self.add(other)
def add(self, *others):
copy = compound()
copy.fails.update(self.fails)
copy.skips.update(self.skips)
copy.tags.update(self.tags)
for other in others:
copy.fails.update(other.fails)
copy.skips.update(other.skips)
copy.tags.update(other.tags)
return copy
def not_(self):
copy = compound()
copy.fails.update(NotPredicate(fail) for fail in self.fails)
copy.skips.update(NotPredicate(skip) for skip in self.skips)
copy.tags.update(self.tags)
return copy
@property
def enabled(self):
return self.enabled_for_config(config._current)
def enabled_for_config(self, config):
for predicate in self.skips.union(self.fails):
if predicate(config):
return False
else:
return True
def matching_config_reasons(self, config):
return [
predicate._as_string(config) for predicate
in self.skips.union(self.fails)
if predicate(config)
]
def include_test(self, include_tags, exclude_tags):
return bool(
not self.tags.intersection(exclude_tags) and
(not include_tags or self.tags.intersection(include_tags))
)
def _extend(self, other):
self.skips.update(other.skips)
self.fails.update(other.fails)
self.tags.update(other.tags)
def __call__(self, fn):
if hasattr(fn, '_sa_exclusion_extend'):
fn._sa_exclusion_extend._extend(self)
return fn
@decorator
def decorate(fn, *args, **kw):
return self._do(config._current, fn, *args, **kw)
decorated = decorate(fn)
decorated._sa_exclusion_extend = self
return decorated
@contextlib.contextmanager
def fail_if(self):
all_fails = compound()
all_fails.fails.update(self.skips.union(self.fails))
try:
yield
except Exception as ex:
all_fails._expect_failure(config._current, ex)
else:
all_fails._expect_success(config._current)
def _do(self, config, fn, *args, **kw):
for skip in self.skips:
if skip(config):
msg = "'%s' : %s" % (
fn.__name__,
skip._as_string(config)
)
raise SkipTest(msg)
try:
return_value = fn(*args, **kw)
except Exception as ex:
self._expect_failure(config, ex, name=fn.__name__)
else:
self._expect_success(config, name=fn.__name__)
return return_value
def _expect_failure(self, config, ex, name='block'):
for fail in self.fails:
if fail(config):
print(("%s failed as expected (%s): %s " % (
name, fail._as_string(config), str(ex))))
break
else:
compat.raise_from_cause(ex)
def _expect_success(self, config, name='block'):
if not self.fails:
return
for fail in self.fails:
if not fail(config):
break
else:
raise AssertionError(
"Unexpected success for '%s' (%s)" %
(
name,
" and ".join(
fail._as_string(config)
for fail in self.fails
)
)
)
def requires_tag(tagname):
return tags([tagname])
def tags(tagnames):
comp = compound()
comp.tags.update(tagnames)
return comp
def only_if(predicate, reason=None):
predicate = _as_predicate(predicate)
return skip_if(NotPredicate(predicate), reason)
def succeeds_if(predicate, reason=None):
predicate = _as_predicate(predicate)
return fails_if(NotPredicate(predicate), reason)
class Predicate(object):
@classmethod
def as_predicate(cls, predicate, description=None):
if isinstance(predicate, compound):
return cls.as_predicate(predicate.fails.union(predicate.skips))
elif isinstance(predicate, Predicate):
if description and predicate.description is None:
predicate.description = description
return predicate
elif isinstance(predicate, (list, set)):
return OrPredicate(
[cls.as_predicate(pred) for pred in predicate],
description)
elif isinstance(predicate, tuple):
return SpecPredicate(*predicate)
elif isinstance(predicate, compat.string_types):
tokens = predicate.split(" ", 2)
op = spec = None
db = tokens.pop(0)
if tokens:
op = tokens.pop(0)
if tokens:
spec = tuple(int(d) for d in tokens.pop(0).split("."))
return SpecPredicate(db, op, spec, description=description)
elif util.callable(predicate):
return LambdaPredicate(predicate, description)
else:
assert False, "unknown predicate type: %s" % predicate
def _format_description(self, config, negate=False):
bool_ = self(config)
if negate:
bool_ = not negate
return self.description % {
"driver": get_url_driver_name(config.db.url),
"database": get_url_backend_name(config.db.url),
"doesnt_support": "doesn't support" if bool_ else "does support",
"does_support": "does support" if bool_ else "doesn't support"
}
def _as_string(self, config=None, negate=False):
raise NotImplementedError()
class BooleanPredicate(Predicate):
def __init__(self, value, description=None):
self.value = value
self.description = description or "boolean %s" % value
def __call__(self, config):
return self.value
def _as_string(self, config, negate=False):
return self._format_description(config, negate=negate)
class SpecPredicate(Predicate):
def __init__(self, db, op=None, spec=None, description=None):
self.db = db
self.op = op
self.spec = spec
self.description = description
_ops = {
'<': operator.lt,
'>': operator.gt,
'==': operator.eq,
'!=': operator.ne,
'<=': operator.le,
'>=': operator.ge,
'in': operator.contains,
'between': lambda val, pair: val >= pair[0] and val <= pair[1],
}
def __call__(self, config):
engine = config.db
if "+" in self.db:
dialect, driver = self.db.split('+')
else:
dialect, driver = self.db, None
if dialect and engine.name != dialect:
return False
if driver is not None and engine.driver != driver:
return False
if self.op is not None:
assert driver is None, "DBAPI version specs not supported yet"
version = _server_version(engine)
oper = hasattr(self.op, '__call__') and self.op \
or self._ops[self.op]
return oper(version, self.spec)
else:
return True
def _as_string(self, config, negate=False):
if self.description is not None:
return self._format_description(config)
elif self.op is None:
if negate:
return "not %s" % self.db
else:
return "%s" % self.db
else:
if negate:
return "not %s %s %s" % (
self.db,
self.op,
self.spec
)
else:
return "%s %s %s" % (
self.db,
self.op,
self.spec
)
class LambdaPredicate(Predicate):
def __init__(self, lambda_, description=None, args=None, kw=None):
spec = inspect.getargspec(lambda_)
if not spec[0]:
self.lambda_ = lambda db: lambda_()
else:
self.lambda_ = lambda_
self.args = args or ()
self.kw = kw or {}
if description:
self.description = description
elif lambda_.__doc__:
self.description = lambda_.__doc__
else:
self.description = "custom function"
def __call__(self, config):
return self.lambda_(config)
def _as_string(self, config, negate=False):
return self._format_description(config)
class NotPredicate(Predicate):
def __init__(self, predicate, description=None):
self.predicate = predicate
self.description = description
def __call__(self, config):
return not self.predicate(config)
def _as_string(self, config, negate=False):
if self.description:
return self._format_description(config, not negate)
else:
return self.predicate._as_string(config, not negate)
class OrPredicate(Predicate):
def __init__(self, predicates, description=None):
self.predicates = predicates
self.description = description
def __call__(self, config):
for pred in self.predicates:
if pred(config):
return True
return False
def _eval_str(self, config, negate=False):
if negate:
conjunction = " and "
else:
conjunction = " or "
return conjunction.join(p._as_string(config, negate=negate)
for p in self.predicates)
def _negation_str(self, config):
if self.description is not None:
return "Not " + self._format_description(config)
else:
return self._eval_str(config, negate=True)
def _as_string(self, config, negate=False):
if negate:
return self._negation_str(config)
else:
if self.description is not None:
return self._format_description(config)
else:
return self._eval_str(config)
_as_predicate = Predicate.as_predicate
def _is_excluded(db, op, spec):
return SpecPredicate(db, op, spec)(config._current)
def _server_version(engine):
"""Return a server_version_info tuple."""
# force metadata to be retrieved
conn = engine.connect()
version = getattr(engine.dialect, 'server_version_info', ())
conn.close()
return version
def db_spec(*dbs):
return OrPredicate(
[Predicate.as_predicate(db) for db in dbs]
)
def open():
return skip_if(BooleanPredicate(False, "mark as execute"))
def closed():
return skip_if(BooleanPredicate(True, "marked as skip"))
def fails(msg=None):
return fails_if(BooleanPredicate(True, msg or "expected to fail"))
@decorator
def future(fn, *arg):
return fails_if(LambdaPredicate(fn), "Future feature")
def fails_on(db, reason=None):
return fails_if(SpecPredicate(db), reason)
def fails_on_everything_except(*dbs):
return succeeds_if(
OrPredicate([
Predicate.as_predicate(db) for db in dbs
])
)
def skip(db, reason=None):
return skip_if(SpecPredicate(db), reason)
def only_on(dbs, reason=None):
return only_if(
OrPredicate([Predicate.as_predicate(db) for db in util.to_list(dbs)])
)
def exclude(db, op, spec, reason=None):
return skip_if(SpecPredicate(db, op, spec), reason)
def against(config, *queries):
assert queries, "no queries sent!"
return OrPredicate([
Predicate.as_predicate(query)
for query in queries
])(config)
| mit |
mbrubeck/servo | tests/wpt/web-platform-tests/tools/pywebsocket/mod_pywebsocket/memorizingfile.py | 14 | 3902 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Memorizing file.
A memorizing file wraps a file and memorizes lines read by readline.
"""
import sys
class MemorizingFile(object):
"""MemorizingFile wraps a file and memorizes lines read by readline.
Note that data read by other methods are not memorized. This behavior
is good enough for memorizing lines SimpleHTTPServer reads before
the control reaches WebSocketRequestHandler.
"""
def __init__(self, file_, max_memorized_lines=sys.maxint):
"""Construct an instance.
Args:
file_: the file object to wrap.
max_memorized_lines: the maximum number of lines to memorize.
Only the first max_memorized_lines are memorized.
Default: sys.maxint.
"""
self._file = file_
self._memorized_lines = []
self._max_memorized_lines = max_memorized_lines
self._buffered = False
self._buffered_line = None
def __getattribute__(self, name):
"""Return a file attribute.
Returns the value overridden by this class for some attributes,
and forwards the call to _file for the other attributes.
"""
if name in ('_file', '_memorized_lines', '_max_memorized_lines',
'_buffered', '_buffered_line', 'readline',
'get_memorized_lines'):
return object.__getattribute__(self, name)
return self._file.__getattribute__(name)
def readline(self, size=-1):
"""Override file.readline and memorize the line read.
Note that even if size is specified and smaller than actual size,
the whole line will be read out from underlying file object by
subsequent readline calls.
"""
if self._buffered:
line = self._buffered_line
self._buffered = False
else:
line = self._file.readline()
if line and len(self._memorized_lines) < self._max_memorized_lines:
self._memorized_lines.append(line)
if size >= 0 and size < len(line):
self._buffered = True
self._buffered_line = line[size:]
return line[:size]
return line
def get_memorized_lines(self):
"""Get lines memorized so far."""
return self._memorized_lines
# vi:sts=4 sw=4 et
| mpl-2.0 |
bww/webasm | resources/tags/pygments/pygments/lexers/web.py | 1 | 63329 | # -*- coding: utf-8 -*-
"""
pygments.lexers.web
~~~~~~~~~~~~~~~~~~~
Lexers for web-related languages and markup.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
try:
set
except NameError:
from sets import Set as set
from pygments.lexer import RegexLexer, ExtendedRegexLexer, bygroups, using, include, this
from pygments.token import \
Text, Comment, Operator, Keyword, Name, String, Number, Other, Punctuation
from pygments.util import get_bool_opt, get_list_opt, looks_like_xml, \
html_doctype_matches
from pygments.lexers.agile import RubyLexer
__all__ = ['HtmlLexer', 'XmlLexer', 'JavascriptLexer', 'CssLexer',
'PhpLexer', 'ActionScriptLexer', 'XsltLexer', 'ActionScript3Lexer',
'MxmlLexer', 'HaxeLexer', 'HamlLexer', 'SassLexer', 'ObjectiveJLexer']
class JavascriptLexer(RegexLexer):
"""
For JavaScript source code.
"""
name = 'JavaScript'
aliases = ['js', 'javascript']
filenames = ['*.js']
mimetypes = ['application/x-javascript', 'text/x-javascript', 'text/javascript']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
(r'', Text, '#pop')
],
'badregex': [
('\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&\|\^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class ActionScriptLexer(RegexLexer):
"""
For ActionScript source code.
*New in Pygments 0.9.*
"""
name = 'ActionScript'
aliases = ['as', 'actionscript']
filenames = ['*.as']
mimetypes = ['application/x-actionscript', 'text/x-actionscript',
'text/actionscript']
flags = re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\/|[^/\n])*/[gim]*', String.Regex),
(r'[~\^\*!%&<>\|+=:;,/?\\-]+', Operator),
(r'[{}\[\]();.]+', Punctuation),
(r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
r'throw|try|catch|var|with|new|typeof|arguments|instanceof|this|'
r'switch)\b', Keyword),
(r'(class|public|final|internal|native|override|private|protected|'
r'static|import|extends|implements|interface|intrinsic|return|super|'
r'dynamic|function|const|get|namespace|package|set)\b',
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|Void)\b',
Keyword.Constant),
(r'(Accessibility|AccessibilityProperties|ActionScriptVersion|'
r'ActivityEvent|AntiAliasType|ApplicationDomain|AsBroadcaster|Array|'
r'AsyncErrorEvent|AVM1Movie|BevelFilter|Bitmap|BitmapData|'
r'BitmapDataChannel|BitmapFilter|BitmapFilterQuality|BitmapFilterType|'
r'BlendMode|BlurFilter|Boolean|ByteArray|Camera|Capabilities|CapsStyle|'
r'Class|Color|ColorMatrixFilter|ColorTransform|ContextMenu|'
r'ContextMenuBuiltInItems|ContextMenuEvent|ContextMenuItem|'
r'ConvultionFilter|CSMSettings|DataEvent|Date|DefinitionError|'
r'DeleteObjectSample|Dictionary|DisplacmentMapFilter|DisplayObject|'
r'DisplacmentMapFilterMode|DisplayObjectContainer|DropShadowFilter|'
r'Endian|EOFError|Error|ErrorEvent|EvalError|Event|EventDispatcher|'
r'EventPhase|ExternalInterface|FileFilter|FileReference|'
r'FileReferenceList|FocusDirection|FocusEvent|Font|FontStyle|FontType|'
r'FrameLabel|FullScreenEvent|Function|GlowFilter|GradientBevelFilter|'
r'GradientGlowFilter|GradientType|Graphics|GridFitType|HTTPStatusEvent|'
r'IBitmapDrawable|ID3Info|IDataInput|IDataOutput|IDynamicPropertyOutput'
r'IDynamicPropertyWriter|IEventDispatcher|IExternalizable|'
r'IllegalOperationError|IME|IMEConversionMode|IMEEvent|int|'
r'InteractiveObject|InterpolationMethod|InvalidSWFError|InvokeEvent|'
r'IOError|IOErrorEvent|JointStyle|Key|Keyboard|KeyboardEvent|KeyLocation|'
r'LineScaleMode|Loader|LoaderContext|LoaderInfo|LoadVars|LocalConnection|'
r'Locale|Math|Matrix|MemoryError|Microphone|MorphShape|Mouse|MouseEvent|'
r'MovieClip|MovieClipLoader|Namespace|NetConnection|NetStatusEvent|'
r'NetStream|NewObjectSample|Number|Object|ObjectEncoding|PixelSnapping|'
r'Point|PrintJob|PrintJobOptions|PrintJobOrientation|ProgressEvent|Proxy|'
r'QName|RangeError|Rectangle|ReferenceError|RegExp|Responder|Sample|Scene|'
r'ScriptTimeoutError|Security|SecurityDomain|SecurityError|'
r'SecurityErrorEvent|SecurityPanel|Selection|Shape|SharedObject|'
r'SharedObjectFlushStatus|SimpleButton|Socket|Sound|SoundChannel|'
r'SoundLoaderContext|SoundMixer|SoundTransform|SpreadMethod|Sprite|'
r'StackFrame|StackOverflowError|Stage|StageAlign|StageDisplayState|'
r'StageQuality|StageScaleMode|StaticText|StatusEvent|String|StyleSheet|'
r'SWFVersion|SyncEvent|SyntaxError|System|TextColorType|TextField|'
r'TextFieldAutoSize|TextFieldType|TextFormat|TextFormatAlign|'
r'TextLineMetrics|TextRenderer|TextSnapshot|Timer|TimerEvent|Transform|'
r'TypeError|uint|URIError|URLLoader|URLLoaderDataFormat|URLRequest|'
r'URLRequestHeader|URLRequestMethod|URLStream|URLVariabeles|VerifyError|'
r'Video|XML|XMLDocument|XMLList|XMLNode|XMLNodeType|XMLSocket|XMLUI)\b',
Name.Builtin),
(r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
r'unescape)\b',Name.Function),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
def analyse_text(text):
return 0.05
class ActionScript3Lexer(RegexLexer):
"""
For ActionScript 3 source code.
*New in Pygments 0.11.*
"""
name = 'ActionScript 3'
aliases = ['as3', 'actionscript3']
filenames = ['*.as']
mimetypes = ['application/x-actionscript', 'text/x-actionscript',
'text/actionscript']
identifier = r'[$a-zA-Z_][a-zA-Z0-9_]*'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'\s+', Text),
(r'(function\s+)(' + identifier + r')(\s*)(\()',
bygroups(Keyword.Declaration, Name.Function, Text, Operator),
'funcparams'),
(r'(var|const)(\s+)(' + identifier + r')(\s*)(:)(\s*)(' + identifier + r')',
bygroups(Keyword.Declaration, Text, Name, Text, Punctuation, Text,
Keyword.Type)),
(r'(import|package)(\s+)((?:' + identifier + r'|\.)+)(\s*)',
bygroups(Keyword, Text, Name.Namespace, Text)),
(r'(new)(\s+)(' + identifier + r')(\s*)(\()',
bygroups(Keyword, Text, Keyword.Type, Text, Operator)),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\/|[^\n])*/[gisx]*', String.Regex),
(r'(\.)(' + identifier + r')', bygroups(Operator, Name.Attribute)),
(r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
r'throw|try|catch|with|new|typeof|arguments|instanceof|this|'
r'switch|import|include|as|is)\b',
Keyword),
(r'(class|public|final|internal|native|override|private|protected|'
r'static|import|extends|implements|interface|intrinsic|return|super|'
r'dynamic|function|const|get|namespace|package|set)\b',
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|void)\b',
Keyword.Constant),
(r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
r'unescape)\b', Name.Function),
(identifier, Name),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[~\^\*!%&<>\|+=:;,/?\\{}\[\]();.-]+', Operator),
],
'funcparams': [
(r'\s+', Text),
(r'(\s*)(\.\.\.)?(' + identifier + r')(\s*)(:)(\s*)(' +
identifier + r'|\*)(\s*)',
bygroups(Text, Punctuation, Name, Text, Operator, Text,
Keyword.Type, Text), 'defval'),
(r'\)', Operator, 'type')
],
'type': [
(r'(\s*)(:)(\s*)(' + identifier + r'|\*)',
bygroups(Text, Operator, Text, Keyword.Type), '#pop:2'),
(r'\s*', Text, '#pop:2')
],
'defval': [
(r'(=)(\s*)([^(),]+)(\s*)(,?)',
bygroups(Operator, Text, using(this), Text, Operator), '#pop'),
(r',?', Operator, '#pop')
]
}
def analyse_text(text):
if re.match(r'\w+\s*:\s*\w', text): return 0.3
return 0.1
class CssLexer(RegexLexer):
"""
For CSS (Cascading Style Sheets).
"""
name = 'CSS'
aliases = ['css']
filenames = ['*.css']
mimetypes = ['text/css']
tokens = {
'root': [
include('basics'),
],
'basics': [
(r'\s+', Text),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'{', Punctuation, 'content'),
(r'\:[a-zA-Z0-9_-]+', Name.Decorator),
(r'\.[a-zA-Z0-9_-]+', Name.Class),
(r'\#[a-zA-Z0-9_-]+', Name.Function),
(r'@[a-zA-Z0-9_-]+', Keyword, 'atrule'),
(r'[a-zA-Z0-9_-]+', Name.Tag),
(r'[~\^\*!%&\[\]\(\)<>\|+=@:;,./?-]', Operator),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single)
],
'atrule': [
(r'{', Punctuation, 'atcontent'),
(r';', Punctuation, '#pop'),
include('basics'),
],
'atcontent': [
include('basics'),
(r'}', Punctuation, '#pop:2'),
],
'content': [
(r'\s+', Text),
(r'}', Punctuation, '#pop'),
(r'url\(.*?\)', String.Other),
(r'^@.*?$', Comment.Preproc),
(r'(azimuth|background-attachment|background-color|'
r'background-image|background-position|background-repeat|'
r'background|border-bottom-color|border-bottom-style|'
r'border-bottom-width|border-left-color|border-left-style|'
r'border-left-width|border-right|border-right-color|'
r'border-right-style|border-right-width|border-top-color|'
r'border-top-style|border-top-width|border-bottom|'
r'border-collapse|border-left|border-width|border-color|'
r'border-spacing|border-style|border-top|border|caption-side|'
r'clear|clip|color|content|counter-increment|counter-reset|'
r'cue-after|cue-before|cue|cursor|direction|display|'
r'elevation|empty-cells|float|font-family|font-size|'
r'font-size-adjust|font-stretch|font-style|font-variant|'
r'font-weight|font|height|letter-spacing|line-height|'
r'list-style-type|list-style-image|list-style-position|'
r'list-style|margin-bottom|margin-left|margin-right|'
r'margin-top|margin|marker-offset|marks|max-height|max-width|'
r'min-height|min-width|opacity|orphans|outline|outline-color|'
r'outline-style|outline-width|overflow(?:-x|-y|)|padding-bottom|'
r'padding-left|padding-right|padding-top|padding|page|'
r'page-break-after|page-break-before|page-break-inside|'
r'pause-after|pause-before|pause|pitch|pitch-range|'
r'play-during|position|quotes|richness|right|size|'
r'speak-header|speak-numeral|speak-punctuation|speak|'
r'speech-rate|stress|table-layout|text-align|text-decoration|'
r'text-indent|text-shadow|text-transform|top|unicode-bidi|'
r'vertical-align|visibility|voice-family|volume|white-space|'
r'widows|width|word-spacing|z-index|bottom|left|'
r'above|absolute|always|armenian|aural|auto|avoid|baseline|'
r'behind|below|bidi-override|blink|block|bold|bolder|both|'
r'capitalize|center-left|center-right|center|circle|'
r'cjk-ideographic|close-quote|collapse|condensed|continuous|'
r'crop|crosshair|cross|cursive|dashed|decimal-leading-zero|'
r'decimal|default|digits|disc|dotted|double|e-resize|embed|'
r'extra-condensed|extra-expanded|expanded|fantasy|far-left|'
r'far-right|faster|fast|fixed|georgian|groove|hebrew|help|'
r'hidden|hide|higher|high|hiragana-iroha|hiragana|icon|'
r'inherit|inline-table|inline|inset|inside|invert|italic|'
r'justify|katakana-iroha|katakana|landscape|larger|large|'
r'left-side|leftwards|level|lighter|line-through|list-item|'
r'loud|lower-alpha|lower-greek|lower-roman|lowercase|ltr|'
r'lower|low|medium|message-box|middle|mix|monospace|'
r'n-resize|narrower|ne-resize|no-close-quote|no-open-quote|'
r'no-repeat|none|normal|nowrap|nw-resize|oblique|once|'
r'open-quote|outset|outside|overline|pointer|portrait|px|'
r'relative|repeat-x|repeat-y|repeat|rgb|ridge|right-side|'
r'rightwards|s-resize|sans-serif|scroll|se-resize|'
r'semi-condensed|semi-expanded|separate|serif|show|silent|'
r'slow|slower|small-caps|small-caption|smaller|soft|solid|'
r'spell-out|square|static|status-bar|super|sw-resize|'
r'table-caption|table-cell|table-column|table-column-group|'
r'table-footer-group|table-header-group|table-row|'
r'table-row-group|text|text-bottom|text-top|thick|thin|'
r'transparent|ultra-condensed|ultra-expanded|underline|'
r'upper-alpha|upper-latin|upper-roman|uppercase|url|'
r'visible|w-resize|wait|wider|x-fast|x-high|x-large|x-loud|'
r'x-low|x-small|x-soft|xx-large|xx-small|yes)\b', Keyword),
(r'(indigo|gold|firebrick|indianred|yellow|darkolivegreen|'
r'darkseagreen|mediumvioletred|mediumorchid|chartreuse|'
r'mediumslateblue|black|springgreen|crimson|lightsalmon|brown|'
r'turquoise|olivedrab|cyan|silver|skyblue|gray|darkturquoise|'
r'goldenrod|darkgreen|darkviolet|darkgray|lightpink|teal|'
r'darkmagenta|lightgoldenrodyellow|lavender|yellowgreen|thistle|'
r'violet|navy|orchid|blue|ghostwhite|honeydew|cornflowerblue|'
r'darkblue|darkkhaki|mediumpurple|cornsilk|red|bisque|slategray|'
r'darkcyan|khaki|wheat|deepskyblue|darkred|steelblue|aliceblue|'
r'gainsboro|mediumturquoise|floralwhite|coral|purple|lightgrey|'
r'lightcyan|darksalmon|beige|azure|lightsteelblue|oldlace|'
r'greenyellow|royalblue|lightseagreen|mistyrose|sienna|'
r'lightcoral|orangered|navajowhite|lime|palegreen|burlywood|'
r'seashell|mediumspringgreen|fuchsia|papayawhip|blanchedalmond|'
r'peru|aquamarine|white|darkslategray|ivory|dodgerblue|'
r'lemonchiffon|chocolate|orange|forestgreen|slateblue|olive|'
r'mintcream|antiquewhite|darkorange|cadetblue|moccasin|'
r'limegreen|saddlebrown|darkslateblue|lightskyblue|deeppink|'
r'plum|aqua|darkgoldenrod|maroon|sandybrown|magenta|tan|'
r'rosybrown|pink|lightblue|palevioletred|mediumseagreen|'
r'dimgray|powderblue|seagreen|snow|mediumblue|midnightblue|'
r'paleturquoise|palegoldenrod|whitesmoke|darkorchid|salmon|'
r'lightslategray|lawngreen|lightgreen|tomato|hotpink|'
r'lightyellow|lavenderblush|linen|mediumaquamarine|green|'
r'blueviolet|peachpuff)\b', Name.Builtin),
(r'\!important', Comment.Preproc),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'\#[a-zA-Z0-9]{1,6}', Number),
(r'[\.-]?[0-9]*[\.]?[0-9]+(em|px|\%|pt|pc|in|mm|cm|ex)', Number),
(r'-?[0-9]+', Number),
(r'[~\^\*!%&<>\|+=@:,./?-]+', Operator),
(r'[\[\]();]+', Punctuation),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[a-zA-Z][a-zA-Z0-9]+', Name)
]
}
class ObjectiveJLexer(RegexLexer):
"""
For Objective-J source code with preprocessor directives.
*New in Pygments 1.3.*
"""
name = 'Objective-J'
aliases = ['objective-j', 'objectivej', 'obj-j', 'objj']
filenames = ['*.j']
mimetypes = ['text/x-objective-j']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)*'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
# function definition
(r'^(' + _ws + r'[\+-]' + _ws + r')([\(a-zA-Z_].*?[^\(])(' + _ws + '{)',
bygroups(using(this), using(this, state='function_signature'),
using(this))),
# class definition
(r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text),
'classname'),
(r'(@class|@protocol)(\s*)', bygroups(Keyword, Text),
'forward_classname'),
(r'(\s*)(@end)(\s*)', bygroups(Text, Keyword, Text)),
include('statements'),
('[{\(\)}]', Punctuation),
(';', Punctuation),
],
'whitespace': [
(r'(@import)(\s+)("(\\\\|\\"|[^"])*")',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(@import)(\s+)(<(\\\\|\\>|[^>])*>)',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(#(?:include|import))(\s+)("(\\\\|\\"|[^"])*")',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(#(?:include|import))(\s+)(<(\\\\|\\>|[^>])*>)',
bygroups(Comment.Preproc, Text, String.Double)),
(r'#if\s+0', Comment.Preproc, 'if0'),
(r'#', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'<!--', Comment),
],
'slashstartsregex': [
include('whitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
(r'', Text, '#pop'),
],
'badregex': [
('\n', Text, '#pop'),
],
'statements': [
(r'(L|@)?"', String, 'string'),
(r"(L|@)?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&\|\^/])=?',
Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|'
r'else|throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'prototype|__proto__)\b', Keyword, 'slashstartsregex'),
(r'(var|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(@selector|@private|@protected|@public|@encode|'
r'@synchronized|@try|@throw|@catch|@finally|@end|@property|'
r'@synthesize|@dynamic|@for|@accessors|new)\b', Keyword),
(r'(int|long|float|short|double|char|unsigned|signed|void|'
r'id|BOOL|bool|boolean|IBOutlet|IBAction|SEL|@outlet|@action)\b',
Keyword.Type),
(r'(self|super)\b', Name.Builtin),
(r'(TRUE|YES|FALSE|NO|Nil|nil|NULL)\b', Keyword.Constant),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(ABS|ASIN|ACOS|ATAN|ATAN2|SIN|COS|TAN|EXP|POW|CEIL|FLOOR|ROUND|'
r'MIN|MAX|RAND|SQRT|E|LN2|LN10|LOG2E|LOG10E|PI|PI2|PI_2|SQRT1_2|'
r'SQRT2)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'([$a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r')(?=\()',
bygroups(Name.Function, using(this))),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'classname' : [
# interface definition that inherits
(r'([a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r':' + _ws +
r')([a-zA-Z_][a-zA-Z0-9_]*)?',
bygroups(Name.Class, using(this), Name.Class), '#pop'),
# interface definition for a category
(r'([a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r'\()([a-zA-Z_][a-zA-Z0-9_]*)(\))',
bygroups(Name.Class, using(this), Name.Label, Text), '#pop'),
# simple interface / implementation
(r'([a-zA-Z_][a-zA-Z0-9_]*)', Name.Class, '#pop'),
],
'forward_classname' : [
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*,\s*)',
bygroups(Name.Class, Text), '#push'),
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*;?)',
bygroups(Name.Class, Text), '#pop'),
],
'function_signature': [
include('whitespace'),
# start of a selector w/ parameters
(r'(\(' + _ws + r')' # open paren
r'([a-zA-Z_][a-zA-Z0-9_]+)' # return type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
bygroups(using(this), Keyword.Type, using(this),
Name.Function), 'function_parameters'),
# no-param function
(r'(\(' + _ws + r')' # open paren
r'([a-zA-Z_][a-zA-Z0-9_]+)' # return type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_][a-zA-Z0-9_]+)', # function name
bygroups(using(this), Keyword.Type, using(this),
Name.Function), "#pop"),
# no return type given, start of a selector w/ parameters
(r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
bygroups (Name.Function), 'function_parameters'),
# no return type given, no-param function
(r'([$a-zA-Z_][a-zA-Z0-9_]+)', # function name
bygroups(Name.Function), "#pop"),
('', Text, '#pop'),
],
'function_parameters': [
include('whitespace'),
# parameters
(r'(\(' + _ws + ')' # open paren
r'([^\)]+)' # type
r'(' + _ws + r'\)' + _ws + r')+' # close paren
r'([$a-zA-Z_][a-zA-Z0-9_]+)', # param name
bygroups(using(this), Keyword.Type, using(this), Text)),
# one piece of a selector name
(r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
Name.Function),
# smallest possible selector piece
(r'(:)', Name.Function),
# var args
(r'(,' + _ws + r'...)', using(this)),
# param name
(r'([$a-zA-Z_][a-zA-Z0-9_]+)', Text),
],
'expression' : [
(r'([$a-zA-Z_][a-zA-Z0-9_]*)(\()', bygroups(Name.Function,
Punctuation)),
(r'(\))', Punctuation, "#pop"),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
def analyse_text(text):
if re.search('^\s*@import\s+[<"]', text, re.MULTILINE):
# special directive found in most Objective-J files
return True
return False
class HtmlLexer(RegexLexer):
"""
For HTML 4 and XHTML 1 markup. Nested JavaScript and CSS is highlighted
by the appropriate lexer.
"""
name = 'HTML'
aliases = ['html']
filenames = ['*.html', '*.htm', '*.xhtml', '*.xslt']
mimetypes = ['text/html', 'application/xhtml+xml']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*script\s*', Name.Tag, ('script-content', 'tag')),
(r'<\s*style\s*', Name.Tag, ('style-content', 'tag')),
(r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'),
(r'<\s*/\s*[a-zA-Z0-9:]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'attr'),
(r'[a-zA-Z0-9_:-]+', Name.Attribute),
(r'/?\s*>', Name.Tag, '#pop'),
],
'script-content': [
(r'<\s*/\s*script\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)),
],
'style-content': [
(r'<\s*/\s*style\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if html_doctype_matches(text):
return 0.5
class PhpLexer(RegexLexer):
"""
For `PHP <http://www.php.net/>`_ source code.
For PHP embedded in HTML, use the `HtmlPhpLexer`.
Additional options accepted:
`startinline`
If given and ``True`` the lexer starts highlighting with
php code (i.e.: no starting ``<?php`` required). The default
is ``False``.
`funcnamehighlighting`
If given and ``True``, highlight builtin function names
(default: ``True``).
`disabledmodules`
If given, must be a list of module names whose function names
should not be highlighted. By default all modules are highlighted
except the special ``'unknown'`` module that includes functions
that are known to php but are undocumented.
To get a list of allowed modules have a look into the
`_phpbuiltins` module:
.. sourcecode:: pycon
>>> from pygments.lexers._phpbuiltins import MODULES
>>> MODULES.keys()
['PHP Options/Info', 'Zip', 'dba', ...]
In fact the names of those modules match the module names from
the php documentation.
"""
name = 'PHP'
aliases = ['php', 'php3', 'php4', 'php5']
filenames = ['*.php', '*.php[345]']
mimetypes = ['text/x-php']
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'<\?(php)?', Comment.Preproc, 'php'),
(r'[^<]+', Other),
(r'<', Other)
],
'php': [
(r'\?>', Comment.Preproc, '#pop'),
(r'<<<([a-zA-Z_][a-zA-Z0-9_]*)\n.*?\n\1\;?\n', String),
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(r'//.*?\n', Comment.Single),
# put the empty comment here, it is otherwise seen as
# the start of a docstring
(r'/\*\*/', Comment.Multiline),
(r'/\*\*.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
(r'(->|::)(\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Operator, Text, Name.Attribute)),
(r'[~!%^&*+=|:.<>/?@-]+', Operator),
(r'[\[\]{}();,]+', Punctuation),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(function)(\s+)(&?)(\s*)',
bygroups(Keyword, Text, Operator, Text), 'functionname'),
(r'(const)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Constant)),
(r'(and|E_PARSE|old_function|E_ERROR|or|as|E_WARNING|parent|'
r'eval|PHP_OS|break|exit|case|extends|PHP_VERSION|cfunction|'
r'FALSE|print|for|require|continue|foreach|require_once|'
r'declare|return|default|static|do|switch|die|stdClass|'
r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|'
r'virtual|endfor|include_once|while|endforeach|global|__FILE__|'
r'endif|list|__LINE__|endswitch|new|__sleep|endwhile|not|'
r'array|__wakeup|E_ALL|NULL|final|php_user_filter|interface|'
r'implements|public|private|protected|abstract|clone|try|'
r'catch|throw|this)\b', Keyword),
('(true|false|null)\b', Keyword.Constant),
(r'\$\{\$+[a-zA-Z_][a-zA-Z0-9_]*\}', Name.Variable),
(r'\$+[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable),
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single),
(r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick),
(r'"', String.Double, 'string'),
],
'classname': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'functionname': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')
],
'string': [
(r'"', String.Double, '#pop'),
(r'[^{$"\\]+', String.Double),
(r'\\([nrt\"$]|[0-7]{1,3}|x[0-9A-Fa-f]{1,2})', String.Escape),
(r'\$[a-zA-Z_][a-zA-Z0-9_]*(\[\S+\]|->[a-zA-Z_][a-zA-Z0-9_]*)?',
String.Interpol),
(r'(\{\$\{)(.*?)(\}\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\{)(\$.*?)(\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\$\{)(\S+)(\})',
bygroups(String.Interpol, Name.Variable, String.Interpol)),
(r'[${\\]+', String.Double)
],
}
def __init__(self, **options):
self.funcnamehighlighting = get_bool_opt(
options, 'funcnamehighlighting', True)
self.disabledmodules = get_list_opt(
options, 'disabledmodules', ['unknown'])
self.startinline = get_bool_opt(options, 'startinline', False)
# private option argument for the lexer itself
if '_startinline' in options:
self.startinline = options.pop('_startinline')
# collect activated functions in a set
self._functions = set()
if self.funcnamehighlighting:
from pygments.lexers._phpbuiltins import MODULES
for key, value in MODULES.iteritems():
if key not in self.disabledmodules:
self._functions.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.startinline:
stack.append('php')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Other:
if value in self._functions:
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if re.search(r'<\?(?!xml)', text):
rv += 0.3
if '?>' in text:
rv += 0.1
return rv
class XmlLexer(RegexLexer):
"""
Generic lexer for XML (eXtensible Markup Language).
"""
flags = re.MULTILINE | re.DOTALL
name = 'XML'
aliases = ['xml']
filenames = ['*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl']
mimetypes = ['text/xml', 'application/xml', 'image/svg+xml',
'application/rss+xml', 'application/atom+xml',
'application/xsl+xml', 'application/xslt+xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[a-zA-Z0-9:._-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[a-zA-Z0-9:._-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[a-zA-Z0-9_.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if looks_like_xml(text):
return 0.5
class XsltLexer(XmlLexer):
'''
A lexer for XSLT.
*New in Pygments 0.10.*
'''
name = 'XSLT'
aliases = ['xslt']
filenames = ['*.xsl', '*.xslt']
EXTRA_KEYWORDS = set([
'apply-imports', 'apply-templates', 'attribute',
'attribute-set', 'call-template', 'choose', 'comment',
'copy', 'copy-of', 'decimal-format', 'element', 'fallback',
'for-each', 'if', 'import', 'include', 'key', 'message',
'namespace-alias', 'number', 'otherwise', 'output', 'param',
'preserve-space', 'processing-instruction', 'sort',
'strip-space', 'stylesheet', 'template', 'text', 'transform',
'value-of', 'variable', 'when', 'with-param'
])
def get_tokens_unprocessed(self, text):
for index, token, value in XmlLexer.get_tokens_unprocessed(self, text):
m = re.match('</?xsl:([^>]*)/?>?', value)
if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS:
yield index, Keyword, value
else:
yield index, token, value
def analyse_text(text):
if looks_like_xml(text) and '<xsl' in text:
return 0.8
class MxmlLexer(RegexLexer):
"""
For MXML markup.
Nested AS3 in <script> tags is highlighted by the appropriate lexer.
"""
flags = re.MULTILINE | re.DOTALL
name = 'MXML'
aliases = ['mxml']
filenames = ['*.mxml']
mimetimes = ['text/xml', 'application/xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'(\<\!\[CDATA\[)(.*?)(\]\]\>)',
bygroups(String, using(ActionScript3Lexer), String)),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[a-zA-Z0-9:._-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[a-zA-Z0-9:._-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[a-zA-Z0-9_.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
class HaxeLexer(RegexLexer):
"""
For haXe source code (http://haxe.org/).
"""
name = 'haXe'
aliases = ['hx', 'haXe']
filenames = ['*.hx']
mimetypes = ['text/haxe']
ident = r'(?:[a-zA-Z_][a-zA-Z0-9_]*)'
typeid = r'(?:(?:[a-z0-9_\.])*[A-Z_][A-Za-z0-9_]*)'
key_prop = r'(?:default|null|never)'
key_decl_mod = r'(?:public|private|override|static|inline|extern|dynamic)'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
include('comments'),
(key_decl_mod, Keyword.Declaration),
include('enumdef'),
include('typedef'),
include('classdef'),
include('imports'),
],
# General constructs
'comments': [
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'#[^\n]*', Comment.Preproc),
],
'whitespace': [
include('comments'),
(r'\s+', Text),
],
'codekeywords': [
(r'\b(if|else|while|do|for|in|break|continue|'
r'return|switch|case|try|catch|throw|null|trace|'
r'new|this|super|untyped|cast|callback|here)\b',
Keyword.Reserved),
],
'literals': [
(r'0[xX][0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r'~/([^\n])*?/[gisx]*', String.Regex),
(r'\b(true|false|null)\b', Keyword.Constant),
],
'codeblock': [
include('whitespace'),
include('new'),
include('case'),
include('anonfundef'),
include('literals'),
include('vardef'),
include('codekeywords'),
(r'[();,\[\]]', Punctuation),
(r'(?:=|\+=|-=|\*=|/=|%=|&=|\|=|\^=|<<=|>>=|>>>=|\|\||&&|'
r'\.\.\.|==|!=|>|<|>=|<=|\||&|\^|<<|>>|>>>|\+|\-|\*|/|%|'
r'!|\+\+|\-\-|~|\.|\?|\:)',
Operator),
(ident, Name),
(r'}', Punctuation,'#pop'),
(r'{', Punctuation,'#push'),
],
# Instance/Block level constructs
'propertydef': [
(r'(\()(' + key_prop + ')(,)(' + key_prop + ')(\))',
bygroups(Punctuation, Keyword.Reserved, Punctuation,
Keyword.Reserved, Punctuation)),
],
'new': [
(r'\bnew\b', Keyword, 'typedecl'),
],
'case': [
(r'\b(case)(\s+)(' + ident + ')(\s*)(\()',
bygroups(Keyword.Reserved, Text, Name, Text, Punctuation),
'funargdecl'),
],
'vardef': [
(r'\b(var)(\s+)(' + ident + ')',
bygroups(Keyword.Declaration, Text, Name.Variable), 'vardecl'),
],
'vardecl': [
include('whitespace'),
include('typelabel'),
(r'=', Operator,'#pop'),
(r';', Punctuation,'#pop'),
],
'instancevardef': [
(key_decl_mod,Keyword.Declaration),
(r'\b(var)(\s+)(' + ident + ')',
bygroups(Keyword.Declaration, Text, Name.Variable.Instance),
'instancevardecl'),
],
'instancevardecl': [
include('vardecl'),
include('propertydef'),
],
'anonfundef': [
(r'\bfunction\b', Keyword.Declaration, 'fundecl'),
],
'instancefundef': [
(key_decl_mod, Keyword.Declaration),
(r'\b(function)(\s+)(' + ident + ')',
bygroups(Keyword.Declaration, Text, Name.Function), 'fundecl'),
],
'fundecl': [
include('whitespace'),
include('typelabel'),
include('generictypedecl'),
(r'\(',Punctuation,'funargdecl'),
(r'(?=[a-zA-Z0-9_])',Text,'#pop'),
(r'{',Punctuation,('#pop','codeblock')),
(r';',Punctuation,'#pop'),
],
'funargdecl': [
include('whitespace'),
(ident, Name.Variable),
include('typelabel'),
include('literals'),
(r'=', Operator),
(r',', Punctuation),
(r'\?', Punctuation),
(r'\)', Punctuation, '#pop'),
],
'typelabel': [
(r':', Punctuation, 'type'),
],
'typedecl': [
include('whitespace'),
(typeid, Name.Class),
(r'<', Punctuation, 'generictypedecl'),
(r'(?=[{}()=,a-z])', Text,'#pop'),
],
'type': [
include('whitespace'),
(typeid, Name.Class),
(r'<', Punctuation, 'generictypedecl'),
(r'->', Keyword.Type),
(r'(?=[{}(),;=])', Text, '#pop'),
],
'generictypedecl': [
include('whitespace'),
(typeid, Name.Class),
(r'<', Punctuation, '#push'),
(r'>', Punctuation, '#pop'),
(r',', Punctuation),
],
# Top level constructs
'imports': [
(r'(package|import|using)(\s+)([^;]+)(;)',
bygroups(Keyword.Namespace, Text, Name.Namespace,Punctuation)),
],
'typedef': [
(r'typedef', Keyword.Declaration, ('typedefprebody', 'typedecl')),
],
'typedefprebody': [
include('whitespace'),
(r'(=)(\s*)({)', bygroups(Punctuation, Text, Punctuation),
('#pop', 'typedefbody')),
],
'enumdef': [
(r'enum', Keyword.Declaration, ('enumdefprebody', 'typedecl')),
],
'enumdefprebody': [
include('whitespace'),
(r'{', Punctuation, ('#pop','enumdefbody')),
],
'classdef': [
(r'class', Keyword.Declaration, ('classdefprebody', 'typedecl')),
],
'classdefprebody': [
include('whitespace'),
(r'(extends|implements)', Keyword.Declaration,'typedecl'),
(r'{', Punctuation, ('#pop', 'classdefbody')),
],
'interfacedef': [
(r'interface', Keyword.Declaration,
('interfacedefprebody', 'typedecl')),
],
'interfacedefprebody': [
include('whitespace'),
(r'(extends)', Keyword.Declaration, 'typedecl'),
(r'{', Punctuation, ('#pop', 'classdefbody')),
],
'typedefbody': [
include('whitespace'),
include('instancevardef'),
include('instancefundef'),
(r'>', Punctuation, 'typedecl'),
(r',', Punctuation),
(r'}', Punctuation, '#pop'),
],
'enumdefbody': [
include('whitespace'),
(ident, Name.Variable.Instance),
(r'\(', Punctuation, 'funargdecl'),
(r';', Punctuation),
(r'}', Punctuation, '#pop'),
],
'classdefbody': [
include('whitespace'),
include('instancevardef'),
include('instancefundef'),
(r'}', Punctuation, '#pop'),
include('codeblock'),
],
}
def analyse_text(text):
if re.match(r'\w+\s*:\s*\w', text): return 0.3
def _indentation(lexer, match, ctx):
indentation = match.group(0)
yield match.start(), Text, indentation
ctx.last_indentation = indentation
ctx.pos = match.end()
if hasattr(ctx, 'block_state') and ctx.block_state and \
indentation.startswith(ctx.block_indentation) and \
indentation != ctx.block_indentation:
ctx.stack.append(ctx.block_state)
else:
ctx.block_state = None
ctx.block_indentation = None
ctx.stack.append('content')
def _starts_block(token, state):
def callback(lexer, match, ctx):
yield match.start(), token, match.group(0)
if hasattr(ctx, 'last_indentation'):
ctx.block_indentation = ctx.last_indentation
else:
ctx.block_indentation = ''
ctx.block_state = state
ctx.pos = match.end()
return callback
class HamlLexer(ExtendedRegexLexer):
"""
For Haml markup.
*New in Pygments 1.3.*
"""
name = 'Haml'
aliases = ['haml', 'HAML']
filenames = ['*.haml']
mimetypes = ['text/x-haml']
flags = re.IGNORECASE
# Haml can include " |\n" anywhere,
# which is ignored and used to wrap long lines.
# To accomodate this, use this custom faux dot instead.
_dot = r'(?: \|\n(?=.* \|)|.)'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[a-z0-9_:-]+', Name.Class, 'tag'),
(r'\#[a-z0-9_:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + '*\n)',
bygroups(Punctuation, using(RubyLexer)),
'root'),
(r'', Text, 'plain'),
],
'content': [
include('css'),
(r'%[a-z0-9_:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + '*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + '*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + '*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + '*\n', _starts_block(Comment.Preproc,
'haml-comment-block'), '#pop'),
(r'(-)(' + _dot + '*\n)',
bygroups(Punctuation, using(RubyLexer)),
'#pop'),
(r':' + _dot + '*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(RubyLexer)),
(r'\[' + _dot + '*?\]', using(RubyLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[a-z0-9_:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'[a-z0-9_]+', Name.Variable, '#pop'),
(r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'),
(r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'haml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class SassLexer(ExtendedRegexLexer):
"""
For Sass stylesheets.
*New in Pygments 1.3.*
"""
name = 'Sass'
aliases = ['sass', 'SASS']
filenames = ['*.sass']
mimetypes = ['text/x-sass']
flags = re.IGNORECASE
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'content': [
(r'//[^\n]*', _starts_block(Comment.Single, 'single-comment'),
'root'),
(r'/\*[^\n]*', _starts_block(Comment.Multiline, 'multi-comment'),
'root'),
(r'@import', Keyword, 'import'),
(r'@for', Keyword, 'for'),
(r'@(debug|if|while)', Keyword, 'script'),
(r'@[a-z0-9_-]+', Keyword, 'selector'),
(r'=[\w-]+', Name.Function, 'script'),
(r'\+[\w-]+', Name.Decorator, 'script'),
(r'(![a-z_]\w*)([ \t]*(?:\|\|)?=)',
bygroups(Name.Variable, Operator), 'script'),
(r':', Name.Attribute, 'old-style-attr'),
(r'(?=[^\s:"\[]+\s*[=:]([ \t]|$))', Name.Attribute, 'new-style-attr'),
(r'', Text, 'selector'),
],
'single-comment': [
(r'.+', Comment.Single),
(r'\n', Text, 'root'),
],
'multi-comment': [
(r'.+', Comment.Multiline),
(r'\n', Text, 'root'),
],
'import': [
(r'[ \t]+', Text),
(r'[^\s]+', String),
(r'\n', Text, 'root'),
],
'for': [
(r'(from|to|through)', Operator.Word),
include('script'),
],
'old-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#{', String.Interpol, 'interpolation'),
(r'[ \t]*=', Operator, 'script'),
(r'', Text, 'value'),
],
'new-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#{', String.Interpol, 'interpolation'),
(r'[ \t]*=', Operator, 'script'),
(r':', Name.Attribute, 'value'),
],
'value': [
(r'[ \t]+', Text),
(r'url\(', String.Other, 'string-url'),
(r'(azimuth|background-attachment|background-color|'
r'background-image|background-position|background-repeat|'
r'background|border-bottom-color|border-bottom-style|'
r'border-bottom-width|border-left-color|border-left-style|'
r'border-left-width|border-right|border-right-color|'
r'border-right-style|border-right-width|border-top-color|'
r'border-top-style|border-top-width|border-bottom|'
r'border-collapse|border-left|border-width|border-color|'
r'border-spacing|border-style|border-top|border|caption-side|'
r'clear|clip|color|content|counter-increment|counter-reset|'
r'cue-after|cue-before|cue|cursor|direction|display|'
r'elevation|empty-cells|float|font-family|font-size|'
r'font-size-adjust|font-stretch|font-style|font-variant|'
r'font-weight|font|height|letter-spacing|line-height|'
r'list-style-type|list-style-image|list-style-position|'
r'list-style|margin-bottom|margin-left|margin-right|'
r'margin-top|margin|marker-offset|marks|max-height|max-width|'
r'min-height|min-width|opacity|orphans|outline|outline-color|'
r'outline-style|outline-width|overflow|padding-bottom|'
r'padding-left|padding-right|padding-top|padding|page|'
r'page-break-after|page-break-before|page-break-inside|'
r'pause-after|pause-before|pause|pitch|pitch-range|'
r'play-during|position|quotes|richness|right|size|'
r'speak-header|speak-numeral|speak-punctuation|speak|'
r'speech-rate|stress|table-layout|text-align|text-decoration|'
r'text-indent|text-shadow|text-transform|top|unicode-bidi|'
r'vertical-align|visibility|voice-family|volume|white-space|'
r'widows|width|word-spacing|z-index|bottom|left|'
r'above|absolute|always|armenian|aural|auto|avoid|baseline|'
r'behind|below|bidi-override|blink|block|bold|bolder|both|'
r'capitalize|center-left|center-right|center|circle|'
r'cjk-ideographic|close-quote|collapse|condensed|continuous|'
r'crop|crosshair|cross|cursive|dashed|decimal-leading-zero|'
r'decimal|default|digits|disc|dotted|double|e-resize|embed|'
r'extra-condensed|extra-expanded|expanded|fantasy|far-left|'
r'far-right|faster|fast|fixed|georgian|groove|hebrew|help|'
r'hidden|hide|higher|high|hiragana-iroha|hiragana|icon|'
r'inherit|inline-table|inline|inset|inside|invert|italic|'
r'justify|katakana-iroha|katakana|landscape|larger|large|'
r'left-side|leftwards|level|lighter|line-through|list-item|'
r'loud|lower-alpha|lower-greek|lower-roman|lowercase|ltr|'
r'lower|low|medium|message-box|middle|mix|monospace|'
r'n-resize|narrower|ne-resize|no-close-quote|no-open-quote|'
r'no-repeat|none|normal|nowrap|nw-resize|oblique|once|'
r'open-quote|outset|outside|overline|pointer|portrait|px|'
r'relative|repeat-x|repeat-y|repeat|rgb|ridge|right-side|'
r'rightwards|s-resize|sans-serif|scroll|se-resize|'
r'semi-condensed|semi-expanded|separate|serif|show|silent|'
r'slow|slower|small-caps|small-caption|smaller|soft|solid|'
r'spell-out|square|static|status-bar|super|sw-resize|'
r'table-caption|table-cell|table-column|table-column-group|'
r'table-footer-group|table-header-group|table-row|'
r'table-row-group|text|text-bottom|text-top|thick|thin|'
r'transparent|ultra-condensed|ultra-expanded|underline|'
r'upper-alpha|upper-latin|upper-roman|uppercase|url|'
r'visible|w-resize|wait|wider|x-fast|x-high|x-large|x-loud|'
r'x-low|x-small|x-soft|xx-large|xx-small|yes)\b', Name.Constant),
(r'(indigo|gold|firebrick|indianred|yellow|darkolivegreen|'
r'darkseagreen|mediumvioletred|mediumorchid|chartreuse|'
r'mediumslateblue|black|springgreen|crimson|lightsalmon|brown|'
r'turquoise|olivedrab|cyan|silver|skyblue|gray|darkturquoise|'
r'goldenrod|darkgreen|darkviolet|darkgray|lightpink|teal|'
r'darkmagenta|lightgoldenrodyellow|lavender|yellowgreen|thistle|'
r'violet|navy|orchid|blue|ghostwhite|honeydew|cornflowerblue|'
r'darkblue|darkkhaki|mediumpurple|cornsilk|red|bisque|slategray|'
r'darkcyan|khaki|wheat|deepskyblue|darkred|steelblue|aliceblue|'
r'gainsboro|mediumturquoise|floralwhite|coral|purple|lightgrey|'
r'lightcyan|darksalmon|beige|azure|lightsteelblue|oldlace|'
r'greenyellow|royalblue|lightseagreen|mistyrose|sienna|'
r'lightcoral|orangered|navajowhite|lime|palegreen|burlywood|'
r'seashell|mediumspringgreen|fuchsia|papayawhip|blanchedalmond|'
r'peru|aquamarine|white|darkslategray|ivory|dodgerblue|'
r'lemonchiffon|chocolate|orange|forestgreen|slateblue|olive|'
r'mintcream|antiquewhite|darkorange|cadetblue|moccasin|'
r'limegreen|saddlebrown|darkslateblue|lightskyblue|deeppink|'
r'plum|aqua|darkgoldenrod|maroon|sandybrown|magenta|tan|'
r'rosybrown|pink|lightblue|palevioletred|mediumseagreen|'
r'dimgray|powderblue|seagreen|snow|mediumblue|midnightblue|'
r'paleturquoise|palegoldenrod|whitesmoke|darkorchid|salmon|'
r'lightslategray|lawngreen|lightgreen|tomato|hotpink|'
r'lightyellow|lavenderblush|linen|mediumaquamarine|green|'
r'blueviolet|peachpuff)\b', Name.Entity),
(r'\!important', Name.Exception),
(r'/\*', Comment, 'inline-comment'),
(r'\#[a-z0-9]{1,6}', Number.Hex),
(r'(-?\d+)(\%|[a-z]+)?', bygroups(Number.Integer, Keyword.Type)),
(r'(-?\d*\.\d+)(\%|[a-z]+)?', bygroups(Number.Float, Keyword.Type)),
(r'#{', String.Interpol, 'interpolation'),
(r'[~\^\*!&%<>\|+=@:,./?-]+', Operator),
(r'[\[\]();]+', Punctuation),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
(r'[a-z][\w-]*', Name),
(r'\n', Text, 'root'),
],
'script': [
(r'[ \t]+', Text),
(r'![\w_]+', Name.Variable),
(r'[+\-*/%=(),!><]', Operator),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
(r'\#[a-z0-9]{1,6}', Number.Hex),
(r'(-?\d+)(\%|[a-z]+)?', bygroups(Number.Integer, Keyword.Type)),
(r'(-?\d*\.\d+)(\%|[a-z]+)?', bygroups(Number.Float, Keyword.Type)),
(r'(black|silver|gray|white|maroon|red|purple|fuchsia|green|'
r'lime|olive|yellow|navy|blue|teal|aqua)\b', Name.Builtin),
(r'(true|false)', Name.Pseudo),
(r'(and|or|not)', Operator.Word),
(r'(\\.|[^\s\\+*\/%(),=!])+(?=[ \t]*\()', Name.Function),
(r'(\\.|[^\s\\+*\/%(),=!])+', Name),
(r'\n', Text, 'root'),
],
'interpolation': [
(r'\}', String.Interpol, '#pop'),
include('script'),
],
'selector': [
(r'[ \t]+', Text),
(r'\:', Name.Decorator, 'pseudo-class'),
(r'\.', Name.Class, 'class'),
(r'\#', Name.Namespace, 'id'),
(r'[a-zA-Z0-9_-]+', Name.Tag),
(r'#\{', String.Interpol, 'interpolation'),
(r'&', Keyword),
(r'[~\^\*!&\[\]\(\)<>\|+=@:;,./?-]', Operator),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
(r'\n', Text, 'root'),
],
'string-double': [
(r'(\\.|#(?=[^\n{])|[^\n"#])+', String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r'"', String.Double, '#pop'),
],
'string-single': [
(r"(\\.|#(?=[^\n{])|[^\n'#])+", String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r"'", String.Double, '#pop'),
],
'string-url': [
(r'(\\#|#(?=[^\n{])|[^\n#)])+', String.Other),
(r'#\{', String.Interpol, 'interpolation'),
(r'\)', String.Other, '#pop'),
],
'inline-comment': [
(r"(\\#|#(?=[^\n{])|\*(?=[^\n/])|[^\n#*])+", Comment),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
'pseudo-class': [
(r'[\w-]+', Name.Decorator),
(r'#\{', String.Interpol, 'interpolation'),
(r'', Text, '#pop'),
],
'class': [
(r'[\w-]+', Name.Class),
(r'#\{', String.Interpol, 'interpolation'),
(r'', Text, '#pop'),
],
'id': [
(r'[\w-]+', Name.Namespace),
(r'#\{', String.Interpol, 'interpolation'),
(r'', Text, '#pop'),
],
}
| bsd-3-clause |
HenryTheHamster/cloud-init | tests/unittests/test_handler/test_handler_apt_configure.py | 5 | 4036 | from cloudinit import util
from cloudinit.config import cc_apt_configure
from ..helpers import TestCase
import os
import re
import shutil
import tempfile
def load_tfile_or_url(*args, **kwargs):
return(util.decode_binary(util.read_file_or_url(*args, **kwargs).contents))
class TestAptProxyConfig(TestCase):
def setUp(self):
super(TestAptProxyConfig, self).setUp()
self.tmp = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp)
self.pfile = os.path.join(self.tmp, "proxy.cfg")
self.cfile = os.path.join(self.tmp, "config.cfg")
def _search_apt_config(self, contents, ptype, value):
return re.search(
r"acquire::%s::proxy\s+[\"']%s[\"'];\n" % (ptype, value),
contents, flags=re.IGNORECASE)
def test_apt_proxy_written(self):
cfg = {'apt_proxy': 'myproxy'}
cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.pfile))
self.assertFalse(os.path.isfile(self.cfile))
contents = load_tfile_or_url(self.pfile)
self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
def test_apt_http_proxy_written(self):
cfg = {'apt_http_proxy': 'myproxy'}
cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.pfile))
self.assertFalse(os.path.isfile(self.cfile))
contents = load_tfile_or_url(self.pfile)
self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
def test_apt_all_proxy_written(self):
cfg = {'apt_http_proxy': 'myproxy_http_proxy',
'apt_https_proxy': 'myproxy_https_proxy',
'apt_ftp_proxy': 'myproxy_ftp_proxy'}
values = {'http': cfg['apt_http_proxy'],
'https': cfg['apt_https_proxy'],
'ftp': cfg['apt_ftp_proxy'],
}
cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.pfile))
self.assertFalse(os.path.isfile(self.cfile))
contents = load_tfile_or_url(self.pfile)
for ptype, pval in values.items():
self.assertTrue(self._search_apt_config(contents, ptype, pval))
def test_proxy_deleted(self):
util.write_file(self.cfile, "content doesnt matter")
cc_apt_configure.apply_apt_config({}, self.pfile, self.cfile)
self.assertFalse(os.path.isfile(self.pfile))
self.assertFalse(os.path.isfile(self.cfile))
def test_proxy_replaced(self):
util.write_file(self.cfile, "content doesnt matter")
cc_apt_configure.apply_apt_config({'apt_proxy': "foo"},
self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.pfile))
contents = load_tfile_or_url(self.pfile)
self.assertTrue(self._search_apt_config(contents, "http", "foo"))
def test_config_written(self):
payload = 'this is my apt config'
cfg = {'apt_config': payload}
cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.cfile))
self.assertFalse(os.path.isfile(self.pfile))
self.assertEqual(load_tfile_or_url(self.cfile), payload)
def test_config_replaced(self):
util.write_file(self.pfile, "content doesnt matter")
cc_apt_configure.apply_apt_config({'apt_config': "foo"},
self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.cfile))
self.assertEqual(load_tfile_or_url(self.cfile), "foo")
def test_config_deleted(self):
# if no 'apt_config' is provided, delete any previously written file
util.write_file(self.pfile, "content doesnt matter")
cc_apt_configure.apply_apt_config({}, self.pfile, self.cfile)
self.assertFalse(os.path.isfile(self.pfile))
self.assertFalse(os.path.isfile(self.cfile))
# vi: ts=4 expandtab
| gpl-3.0 |
lunatyq/hunalign | scripts/ladder2text.py | 4 | 2120 | #!/usr/bin/python
import sys
import itertools
'''file -> array holding the lines of the file'''
def readfile(name):
# Open the input files and read lines
infile = file(name, 'r')
lines = map( lambda s : s.strip("\n"), infile.readlines() )
return lines
'''s -> (s0,s1), (s1,s2), (s2, s3), ...
see http://docs.python.org/library/itertools.html'''
def pairwise(iterable):
a, b = itertools.tee(iterable)
b.next()
return itertools.izip(a, b)
'''Create aligned text from two sentence files and hunalign's ladder-style output.
Usage: ladder2text.py <aligner.ladder> <hu.sen> <en.sen> > aligned.txt
See http://mokk.bme.hu/resources/hunalign for detailed format specification and more.
The output file is tab-delimited, with three columns. The first is a probability score.
The second and third columns are the chunks corresponding to each other.
" ~~~ " is the sentence delimiter inside chunks.
'''
def main() :
if len(sys.argv) == 4:
ladderlines = readfile(sys.argv[1])
hulines = readfile(sys.argv[2])
enlines = readfile(sys.argv[3])
def parseLadderLine(l) :
a = l.split()
assert len(a)==3
return ( int(a[0]), int(a[1]), a[2] ) # The score we leave as a string, to avoid small diffs caused by different numerical representations.
ladder = map( parseLadderLine, ladderlines )
# the next map() does all the work, so here are some comments...
# the map() iterates over the holes of the ladder.
# a hole is supposed to be two consecutive items in the array holding the lines of the ladder. /an array of holes is returned by pairwise(ladder)/
# the following segment returns an interval of sentences corresponding to a hole:
# hulines[int(hole[0][0]):int(hole[1][0])]
outputlines = map( lambda hole:
hole[0][2] + "\t" +
" ~~~ ".join(hulines[int(hole[0][0]):int(hole[1][0])])
+ "\t" +
" ~~~ ".join(enlines[int(hole[0][1]):int(hole[1][1])])
,
pairwise(ladder)
)
for l in outputlines :
print l
else:
print 'usage: ladder2text.py <aligned.ladder> <hu.raw> <en.raw> > aligned.txt'
sys.exit(-1)
if __name__ == "__main__" :
main()
| lgpl-2.1 |
dya2/python-for-android | python-build/python-libs/gdata/src/gdata/tlslite/utils/cryptomath.py | 172 | 11559 | """cryptomath module
This module has basic math/crypto code."""
import os
import math
import base64
import binascii
import sha
from compat import *
# **************************************************************************
# Load Optional Modules
# **************************************************************************
# Try to load M2Crypto/OpenSSL
try:
from M2Crypto import m2
m2cryptoLoaded = True
except ImportError:
m2cryptoLoaded = False
# Try to load cryptlib
try:
import cryptlib_py
try:
cryptlib_py.cryptInit()
except cryptlib_py.CryptException, e:
#If tlslite and cryptoIDlib are both present,
#they might each try to re-initialize this,
#so we're tolerant of that.
if e[0] != cryptlib_py.CRYPT_ERROR_INITED:
raise
cryptlibpyLoaded = True
except ImportError:
cryptlibpyLoaded = False
#Try to load GMPY
try:
import gmpy
gmpyLoaded = True
except ImportError:
gmpyLoaded = False
#Try to load pycrypto
try:
import Crypto.Cipher.AES
pycryptoLoaded = True
except ImportError:
pycryptoLoaded = False
# **************************************************************************
# PRNG Functions
# **************************************************************************
# Get os.urandom PRNG
try:
os.urandom(1)
def getRandomBytes(howMany):
return stringToBytes(os.urandom(howMany))
prngName = "os.urandom"
except:
# Else get cryptlib PRNG
if cryptlibpyLoaded:
def getRandomBytes(howMany):
randomKey = cryptlib_py.cryptCreateContext(cryptlib_py.CRYPT_UNUSED,
cryptlib_py.CRYPT_ALGO_AES)
cryptlib_py.cryptSetAttribute(randomKey,
cryptlib_py.CRYPT_CTXINFO_MODE,
cryptlib_py.CRYPT_MODE_OFB)
cryptlib_py.cryptGenerateKey(randomKey)
bytes = createByteArrayZeros(howMany)
cryptlib_py.cryptEncrypt(randomKey, bytes)
return bytes
prngName = "cryptlib"
else:
#Else get UNIX /dev/urandom PRNG
try:
devRandomFile = open("/dev/urandom", "rb")
def getRandomBytes(howMany):
return stringToBytes(devRandomFile.read(howMany))
prngName = "/dev/urandom"
except IOError:
#Else get Win32 CryptoAPI PRNG
try:
import win32prng
def getRandomBytes(howMany):
s = win32prng.getRandomBytes(howMany)
if len(s) != howMany:
raise AssertionError()
return stringToBytes(s)
prngName ="CryptoAPI"
except ImportError:
#Else no PRNG :-(
def getRandomBytes(howMany):
raise NotImplementedError("No Random Number Generator "\
"available.")
prngName = "None"
# **************************************************************************
# Converter Functions
# **************************************************************************
def bytesToNumber(bytes):
total = 0L
multiplier = 1L
for count in range(len(bytes)-1, -1, -1):
byte = bytes[count]
total += multiplier * byte
multiplier *= 256
return total
def numberToBytes(n):
howManyBytes = numBytes(n)
bytes = createByteArrayZeros(howManyBytes)
for count in range(howManyBytes-1, -1, -1):
bytes[count] = int(n % 256)
n >>= 8
return bytes
def bytesToBase64(bytes):
s = bytesToString(bytes)
return stringToBase64(s)
def base64ToBytes(s):
s = base64ToString(s)
return stringToBytes(s)
def numberToBase64(n):
bytes = numberToBytes(n)
return bytesToBase64(bytes)
def base64ToNumber(s):
bytes = base64ToBytes(s)
return bytesToNumber(bytes)
def stringToNumber(s):
bytes = stringToBytes(s)
return bytesToNumber(bytes)
def numberToString(s):
bytes = numberToBytes(s)
return bytesToString(bytes)
def base64ToString(s):
try:
return base64.decodestring(s)
except binascii.Error, e:
raise SyntaxError(e)
except binascii.Incomplete, e:
raise SyntaxError(e)
def stringToBase64(s):
return base64.encodestring(s).replace("\n", "")
def mpiToNumber(mpi): #mpi is an openssl-format bignum string
if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number
raise AssertionError()
bytes = stringToBytes(mpi[4:])
return bytesToNumber(bytes)
def numberToMPI(n):
bytes = numberToBytes(n)
ext = 0
#If the high-order bit is going to be set,
#add an extra byte of zeros
if (numBits(n) & 0x7)==0:
ext = 1
length = numBytes(n) + ext
bytes = concatArrays(createByteArrayZeros(4+ext), bytes)
bytes[0] = (length >> 24) & 0xFF
bytes[1] = (length >> 16) & 0xFF
bytes[2] = (length >> 8) & 0xFF
bytes[3] = length & 0xFF
return bytesToString(bytes)
# **************************************************************************
# Misc. Utility Functions
# **************************************************************************
def numBytes(n):
if n==0:
return 0
bits = numBits(n)
return int(math.ceil(bits / 8.0))
def hashAndBase64(s):
return stringToBase64(sha.sha(s).digest())
def getBase64Nonce(numChars=22): #defaults to an 132 bit nonce
bytes = getRandomBytes(numChars)
bytesStr = "".join([chr(b) for b in bytes])
return stringToBase64(bytesStr)[:numChars]
# **************************************************************************
# Big Number Math
# **************************************************************************
def getRandomNumber(low, high):
if low >= high:
raise AssertionError()
howManyBits = numBits(high)
howManyBytes = numBytes(high)
lastBits = howManyBits % 8
while 1:
bytes = getRandomBytes(howManyBytes)
if lastBits:
bytes[0] = bytes[0] % (1 << lastBits)
n = bytesToNumber(bytes)
if n >= low and n < high:
return n
def gcd(a,b):
a, b = max(a,b), min(a,b)
while b:
a, b = b, a % b
return a
def lcm(a, b):
#This will break when python division changes, but we can't use // cause
#of Jython
return (a * b) / gcd(a, b)
#Returns inverse of a mod b, zero if none
#Uses Extended Euclidean Algorithm
def invMod(a, b):
c, d = a, b
uc, ud = 1, 0
while c != 0:
#This will break when python division changes, but we can't use //
#cause of Jython
q = d / c
c, d = d-(q*c), c
uc, ud = ud - (q * uc), uc
if d == 1:
return ud % b
return 0
if gmpyLoaded:
def powMod(base, power, modulus):
base = gmpy.mpz(base)
power = gmpy.mpz(power)
modulus = gmpy.mpz(modulus)
result = pow(base, power, modulus)
return long(result)
else:
#Copied from Bryan G. Olson's post to comp.lang.python
#Does left-to-right instead of pow()'s right-to-left,
#thus about 30% faster than the python built-in with small bases
def powMod(base, power, modulus):
nBitScan = 5
""" Return base**power mod modulus, using multi bit scanning
with nBitScan bits at a time."""
#TREV - Added support for negative exponents
negativeResult = False
if (power < 0):
power *= -1
negativeResult = True
exp2 = 2**nBitScan
mask = exp2 - 1
# Break power into a list of digits of nBitScan bits.
# The list is recursive so easy to read in reverse direction.
nibbles = None
while power:
nibbles = int(power & mask), nibbles
power = power >> nBitScan
# Make a table of powers of base up to 2**nBitScan - 1
lowPowers = [1]
for i in xrange(1, exp2):
lowPowers.append((lowPowers[i-1] * base) % modulus)
# To exponentiate by the first nibble, look it up in the table
nib, nibbles = nibbles
prod = lowPowers[nib]
# For the rest, square nBitScan times, then multiply by
# base^nibble
while nibbles:
nib, nibbles = nibbles
for i in xrange(nBitScan):
prod = (prod * prod) % modulus
if nib: prod = (prod * lowPowers[nib]) % modulus
#TREV - Added support for negative exponents
if negativeResult:
prodInv = invMod(prod, modulus)
#Check to make sure the inverse is correct
if (prod * prodInv) % modulus != 1:
raise AssertionError()
return prodInv
return prod
#Pre-calculate a sieve of the ~100 primes < 1000:
def makeSieve(n):
sieve = range(n)
for count in range(2, int(math.sqrt(n))):
if sieve[count] == 0:
continue
x = sieve[count] * 2
while x < len(sieve):
sieve[x] = 0
x += sieve[count]
sieve = [x for x in sieve[2:] if x]
return sieve
sieve = makeSieve(1000)
def isPrime(n, iterations=5, display=False):
#Trial division with sieve
for x in sieve:
if x >= n: return True
if n % x == 0: return False
#Passed trial division, proceed to Rabin-Miller
#Rabin-Miller implemented per Ferguson & Schneier
#Compute s, t for Rabin-Miller
if display: print "*",
s, t = n-1, 0
while s % 2 == 0:
s, t = s/2, t+1
#Repeat Rabin-Miller x times
a = 2 #Use 2 as a base for first iteration speedup, per HAC
for count in range(iterations):
v = powMod(a, s, n)
if v==1:
continue
i = 0
while v != n-1:
if i == t-1:
return False
else:
v, i = powMod(v, 2, n), i+1
a = getRandomNumber(2, n)
return True
def getRandomPrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2L ** (bits-1)) * 3/2
high = 2L ** bits - 30
p = getRandomNumber(low, high)
p += 29 - (p % 30)
while 1:
if display: print ".",
p += 30
if p >= high:
p = getRandomNumber(low, high)
p += 29 - (p % 30)
if isPrime(p, display=display):
return p
#Unused at the moment...
def getRandomSafePrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2 ** (bits-2)) * 3/2
high = (2 ** (bits-1)) - 30
q = getRandomNumber(low, high)
q += 29 - (q % 30)
while 1:
if display: print ".",
q += 30
if (q >= high):
q = getRandomNumber(low, high)
q += 29 - (q % 30)
#Ideas from Tom Wu's SRP code
#Do trial division on p and q before Rabin-Miller
if isPrime(q, 0, display=display):
p = (2 * q) + 1
if isPrime(p, display=display):
if isPrime(q, display=display):
return p
| apache-2.0 |
cms-externals/pyqt | examples/painting/basicdrawing/basicdrawing.py | 15 | 15260 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2010 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
# This is only needed for Python v2 but is harmless for Python v3.
import sip
sip.setapi('QVariant', 2)
from PyQt4 import QtCore, QtGui
import basicdrawing_rc
class RenderArea(QtGui.QWidget):
points = QtGui.QPolygon([
QtCore.QPoint(10, 80),
QtCore.QPoint(20, 10),
QtCore.QPoint(80, 30),
QtCore.QPoint(90, 70)
])
Line, Points, Polyline, Polygon, Rect, RoundedRect, Ellipse, Arc, Chord, \
Pie, Path, Text, Pixmap = range(13)
def __init__(self, parent=None):
super(RenderArea, self).__init__(parent)
self.pen = QtGui.QPen()
self.brush = QtGui.QBrush()
self.pixmap = QtGui.QPixmap()
self.shape = RenderArea.Polygon
self.antialiased = False
self.transformed = False
self.pixmap.load(':/images/qt-logo.png')
self.setBackgroundRole(QtGui.QPalette.Base)
self.setAutoFillBackground(True)
def minimumSizeHint(self):
return QtCore.QSize(100, 100)
def sizeHint(self):
return QtCore.QSize(400, 200)
def setShape(self, shape):
self.shape = shape
self.update()
def setPen(self, pen):
self.pen = pen
self.update()
def setBrush(self, brush):
self.brush = brush
self.update()
def setAntialiased(self, antialiased):
self.antialiased = antialiased
self.update()
def setTransformed(self, transformed):
self.transformed = transformed
self.update()
def paintEvent(self, event):
rect = QtCore.QRect(10, 20, 80, 60)
path = QtGui.QPainterPath()
path.moveTo(20, 80)
path.lineTo(20, 30)
path.cubicTo(80, 0, 50, 50, 80, 80)
startAngle = 30 * 16
arcLength = 120 * 16
painter = QtGui.QPainter(self)
painter.setPen(self.pen)
painter.setBrush(self.brush)
if self.antialiased:
painter.setRenderHint(QtGui.QPainter.Antialiasing)
for x in range(0, self.width(), 100):
for y in range(0, self.height(), 100):
painter.save()
painter.translate(x, y)
if self.transformed:
painter.translate(50, 50)
painter.rotate(60.0)
painter.scale(0.6, 0.9)
painter.translate(-50, -50)
if self.shape == RenderArea.Line:
painter.drawLine(rect.bottomLeft(), rect.topRight())
elif self.shape == RenderArea.Points:
painter.drawPoints(RenderArea.points)
elif self.shape == RenderArea.Polyline:
painter.drawPolyline(RenderArea.points)
elif self.shape == RenderArea.Polygon:
painter.drawPolygon(RenderArea.points)
elif self.shape == RenderArea.Rect:
painter.drawRect(rect)
elif self.shape == RenderArea.RoundedRect:
painter.drawRoundedRect(rect, 25, 25,
QtCore.Qt.RelativeSize)
elif self.shape == RenderArea.Ellipse:
painter.drawEllipse(rect)
elif self.shape == RenderArea.Arc:
painter.drawArc(rect, startAngle, arcLength)
elif self.shape == RenderArea.Chord:
painter.drawChord(rect, startAngle, arcLength)
elif self.shape == RenderArea.Pie:
painter.drawPie(rect, startAngle, arcLength)
elif self.shape == RenderArea.Path:
painter.drawPath(path)
elif self.shape == RenderArea.Text:
painter.drawText(rect, QtCore.Qt.AlignCenter,
"Qt by\nQt Software")
elif self.shape == RenderArea.Pixmap:
painter.drawPixmap(10, 10, self.pixmap)
painter.restore()
painter.setPen(self.palette().dark().color())
painter.setBrush(QtCore.Qt.NoBrush)
painter.drawRect(QtCore.QRect(0, 0, self.width() - 1,
self.height() - 1))
IdRole = QtCore.Qt.UserRole
class Window(QtGui.QWidget):
def __init__(self):
super(Window, self).__init__()
self.renderArea = RenderArea()
self.shapeComboBox = QtGui.QComboBox()
self.shapeComboBox.addItem("Polygon", RenderArea.Polygon)
self.shapeComboBox.addItem("Rectangle", RenderArea.Rect)
self.shapeComboBox.addItem("Rounded Rectangle", RenderArea.RoundedRect)
self.shapeComboBox.addItem("Ellipse", RenderArea.Ellipse)
self.shapeComboBox.addItem("Pie", RenderArea.Pie)
self.shapeComboBox.addItem("Chord", RenderArea.Chord)
self.shapeComboBox.addItem("Path", RenderArea.Path)
self.shapeComboBox.addItem("Line", RenderArea.Line)
self.shapeComboBox.addItem("Polyline", RenderArea.Polyline)
self.shapeComboBox.addItem("Arc", RenderArea.Arc)
self.shapeComboBox.addItem("Points", RenderArea.Points)
self.shapeComboBox.addItem("Text", RenderArea.Text)
self.shapeComboBox.addItem("Pixmap", RenderArea.Pixmap)
shapeLabel = QtGui.QLabel("&Shape:")
shapeLabel.setBuddy(self.shapeComboBox)
self.penWidthSpinBox = QtGui.QSpinBox()
self.penWidthSpinBox.setRange(0, 20)
self.penWidthSpinBox.setSpecialValueText("0 (cosmetic pen)")
penWidthLabel = QtGui.QLabel("Pen &Width:")
penWidthLabel.setBuddy(self.penWidthSpinBox)
self.penStyleComboBox = QtGui.QComboBox()
self.penStyleComboBox.addItem("Solid", QtCore.Qt.SolidLine)
self.penStyleComboBox.addItem("Dash", QtCore.Qt.DashLine)
self.penStyleComboBox.addItem("Dot", QtCore.Qt.DotLine)
self.penStyleComboBox.addItem("Dash Dot", QtCore.Qt.DashDotLine)
self.penStyleComboBox.addItem("Dash Dot Dot", QtCore.Qt.DashDotDotLine)
self.penStyleComboBox.addItem("None", QtCore.Qt.NoPen)
penStyleLabel = QtGui.QLabel("&Pen Style:")
penStyleLabel.setBuddy(self.penStyleComboBox)
self.penCapComboBox = QtGui.QComboBox()
self.penCapComboBox.addItem("Flat", QtCore.Qt.FlatCap)
self.penCapComboBox.addItem("Square", QtCore.Qt.SquareCap)
self.penCapComboBox.addItem("Round", QtCore.Qt.RoundCap)
penCapLabel = QtGui.QLabel("Pen &Cap:")
penCapLabel.setBuddy(self.penCapComboBox)
self.penJoinComboBox = QtGui.QComboBox()
self.penJoinComboBox.addItem("Miter", QtCore.Qt.MiterJoin)
self.penJoinComboBox.addItem("Bevel", QtCore.Qt.BevelJoin)
self.penJoinComboBox.addItem("Round", QtCore.Qt.RoundJoin)
penJoinLabel = QtGui.QLabel("Pen &Join:")
penJoinLabel.setBuddy(self.penJoinComboBox)
self.brushStyleComboBox = QtGui.QComboBox()
self.brushStyleComboBox.addItem("Linear Gradient",
QtCore.Qt.LinearGradientPattern)
self.brushStyleComboBox.addItem("Radial Gradient",
QtCore.Qt.RadialGradientPattern)
self.brushStyleComboBox.addItem("Conical Gradient",
QtCore.Qt.ConicalGradientPattern)
self.brushStyleComboBox.addItem("Texture", QtCore.Qt.TexturePattern)
self.brushStyleComboBox.addItem("Solid", QtCore.Qt.SolidPattern)
self.brushStyleComboBox.addItem("Horizontal", QtCore.Qt.HorPattern)
self.brushStyleComboBox.addItem("Vertical", QtCore.Qt.VerPattern)
self.brushStyleComboBox.addItem("Cross", QtCore.Qt.CrossPattern)
self.brushStyleComboBox.addItem("Backward Diagonal",
QtCore.Qt.BDiagPattern)
self.brushStyleComboBox.addItem("Forward Diagonal",
QtCore.Qt.FDiagPattern)
self.brushStyleComboBox.addItem("Diagonal Cross",
QtCore.Qt.DiagCrossPattern)
self.brushStyleComboBox.addItem("Dense 1", QtCore.Qt.Dense1Pattern)
self.brushStyleComboBox.addItem("Dense 2", QtCore.Qt.Dense2Pattern)
self.brushStyleComboBox.addItem("Dense 3", QtCore.Qt.Dense3Pattern)
self.brushStyleComboBox.addItem("Dense 4", QtCore.Qt.Dense4Pattern)
self.brushStyleComboBox.addItem("Dense 5", QtCore.Qt.Dense5Pattern)
self.brushStyleComboBox.addItem("Dense 6", QtCore.Qt.Dense6Pattern)
self.brushStyleComboBox.addItem("Dense 7", QtCore.Qt.Dense7Pattern)
self.brushStyleComboBox.addItem("None", QtCore.Qt.NoBrush)
brushStyleLabel = QtGui.QLabel("&Brush Style:")
brushStyleLabel.setBuddy(self.brushStyleComboBox)
otherOptionsLabel = QtGui.QLabel("Other Options:")
self.antialiasingCheckBox = QtGui.QCheckBox("&Antialiasing")
self.transformationsCheckBox = QtGui.QCheckBox("&Transformations")
self.shapeComboBox.activated.connect(self.shapeChanged)
self.penWidthSpinBox.valueChanged.connect(self.penChanged)
self.penStyleComboBox.activated.connect(self.penChanged)
self.penCapComboBox.activated.connect(self.penChanged)
self.penJoinComboBox.activated.connect(self.penChanged)
self.brushStyleComboBox.activated.connect(self.brushChanged)
self.antialiasingCheckBox.toggled.connect(self.renderArea.setAntialiased)
self.transformationsCheckBox.toggled.connect(self.renderArea.setTransformed)
mainLayout = QtGui.QGridLayout()
mainLayout.setColumnStretch(0, 1)
mainLayout.setColumnStretch(3, 1)
mainLayout.addWidget(self.renderArea, 0, 0, 1, 4)
mainLayout.setRowMinimumHeight(1, 6)
mainLayout.addWidget(shapeLabel, 2, 1, QtCore.Qt.AlignRight)
mainLayout.addWidget(self.shapeComboBox, 2, 2)
mainLayout.addWidget(penWidthLabel, 3, 1, QtCore.Qt.AlignRight)
mainLayout.addWidget(self.penWidthSpinBox, 3, 2)
mainLayout.addWidget(penStyleLabel, 4, 1, QtCore.Qt.AlignRight)
mainLayout.addWidget(self.penStyleComboBox, 4, 2)
mainLayout.addWidget(penCapLabel, 5, 1, QtCore.Qt.AlignRight)
mainLayout.addWidget(self.penCapComboBox, 5, 2)
mainLayout.addWidget(penJoinLabel, 6, 1, QtCore.Qt.AlignRight)
mainLayout.addWidget(self.penJoinComboBox, 6, 2)
mainLayout.addWidget(brushStyleLabel, 7, 1, QtCore.Qt.AlignRight)
mainLayout.addWidget(self.brushStyleComboBox, 7, 2)
mainLayout.setRowMinimumHeight(8, 6)
mainLayout.addWidget(otherOptionsLabel, 9, 1, QtCore.Qt.AlignRight)
mainLayout.addWidget(self.antialiasingCheckBox, 9, 2)
mainLayout.addWidget(self.transformationsCheckBox, 10, 2)
self.setLayout(mainLayout)
self.shapeChanged()
self.penChanged()
self.brushChanged()
self.antialiasingCheckBox.setChecked(True)
self.setWindowTitle("Basic Drawing")
def shapeChanged(self):
shape = self.shapeComboBox.itemData(self.shapeComboBox.currentIndex(),
IdRole)
self.renderArea.setShape(shape)
def penChanged(self):
width = self.penWidthSpinBox.value()
style = QtCore.Qt.PenStyle(self.penStyleComboBox.itemData(
self.penStyleComboBox.currentIndex(), IdRole))
cap = QtCore.Qt.PenCapStyle(self.penCapComboBox.itemData(
self.penCapComboBox.currentIndex(), IdRole))
join = QtCore.Qt.PenJoinStyle(self.penJoinComboBox.itemData(
self.penJoinComboBox.currentIndex(), IdRole))
self.renderArea.setPen(QtGui.QPen(QtCore.Qt.blue, width, style, cap, join))
def brushChanged(self):
style = QtCore.Qt.BrushStyle(self.brushStyleComboBox.itemData(
self.brushStyleComboBox.currentIndex(), IdRole))
if style == QtCore.Qt.LinearGradientPattern:
linearGradient = QtGui.QLinearGradient(0, 0, 100, 100)
linearGradient.setColorAt(0.0, QtCore.Qt.white)
linearGradient.setColorAt(0.2, QtCore.Qt.green)
linearGradient.setColorAt(1.0, QtCore.Qt.black)
self.renderArea.setBrush(QtGui.QBrush(linearGradient))
elif style == QtCore.Qt.RadialGradientPattern:
radialGradient = QtGui.QRadialGradient(50, 50, 50, 70, 70)
radialGradient.setColorAt(0.0, QtCore.Qt.white)
radialGradient.setColorAt(0.2, QtCore.Qt.green)
radialGradient.setColorAt(1.0, QtCore.Qt.black)
self.renderArea.setBrush(QtGui.QBrush(radialGradient))
elif style == QtCore.Qt.ConicalGradientPattern:
conicalGradient = QtGui.QConicalGradient(50, 50, 150)
conicalGradient.setColorAt(0.0, QtCore.Qt.white)
conicalGradient.setColorAt(0.2, QtCore.Qt.green)
conicalGradient.setColorAt(1.0, QtCore.Qt.black)
self.renderArea.setBrush(QtGui.QBrush(conicalGradient))
elif style == QtCore.Qt.TexturePattern:
self.renderArea.setBrush(QtGui.QBrush(QtGui.QPixmap(':/images/brick.png')))
else:
self.renderArea.setBrush(QtGui.QBrush(QtCore.Qt.green, style))
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
| gpl-3.0 |
zhanghenry/stocks | django/db/models/sql/constants.py | 633 | 1039 | """
Constants specific to the SQL storage portion of the ORM.
"""
import re
# Valid query types (a set is used for speedy lookups). These are (currently)
# considered SQL-specific; other storage systems may choose to use different
# lookup types.
QUERY_TERMS = {
'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in',
'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year',
'month', 'day', 'week_day', 'hour', 'minute', 'second', 'isnull', 'search',
'regex', 'iregex',
}
# Size of each "chunk" for get_iterator calls.
# Larger values are slightly faster at the expense of more storage space.
GET_ITERATOR_CHUNK_SIZE = 100
# Namedtuples for sql.* internal use.
# How many results to expect from a cursor.execute call
MULTI = 'multi'
SINGLE = 'single'
CURSOR = 'cursor'
NO_RESULTS = 'no results'
ORDER_PATTERN = re.compile(r'\?|[-+]?[.\w]+$')
ORDER_DIR = {
'ASC': ('ASC', 'DESC'),
'DESC': ('DESC', 'ASC'),
}
# SQL join types.
INNER = 'INNER JOIN'
LOUTER = 'LEFT OUTER JOIN'
| bsd-3-clause |
hughsaunders/keystone | keystone/common/sql/migrate_repo/versions/037_add_region_table.py | 9 | 1314 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
region_table = sql.Table(
'region',
meta,
sql.Column('id', sql.String(64), primary_key=True),
sql.Column('description', sql.String(255), nullable=False),
sql.Column('parent_region_id', sql.String(64), nullable=True),
sql.Column('extra', sql.Text()),
mysql_engine='InnoDB',
mysql_charset='utf8')
region_table.create(migrate_engine, checkfirst=True)
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
region = sql.Table('region', meta, autoload=True)
region.drop(migrate_engine, checkfirst=True)
| apache-2.0 |
lpantano/bcbio-nextgen | bcbio/illumina/samplesheet.py | 10 | 5032 | """Converts Illumina SampleSheet CSV files to the run_info.yaml input file.
This allows running the analysis pipeline without Galaxy, using CSV input
files from Illumina SampleSheet or Genesifter.
"""
import os
import csv
import itertools
import difflib
import glob
import yaml
from bcbio.illumina import flowcell
from bcbio import utils
# ## Create samplesheets
def from_flowcell(run_folder, lane_details, out_dir=None):
"""Convert a flowcell into a samplesheet for demultiplexing.
"""
fcid = os.path.basename(run_folder)
if out_dir is None:
out_dir = run_folder
out_file = os.path.join(out_dir, "%s.csv" % fcid)
with open(out_file, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(["FCID", "Lane", "Sample_ID", "SampleRef", "Index",
"Description", "Control", "Recipe", "Operator", "SampleProject"])
for ldetail in lane_details:
writer.writerow(_lane_detail_to_ss(fcid, ldetail))
return out_file
def _lane_detail_to_ss(fcid, ldetail):
"""Convert information about a lane into Illumina samplesheet output.
"""
return [fcid, ldetail["lane"], ldetail["name"], ldetail["genome_build"],
ldetail["bc_index"], ldetail["description"], "N", "", "",
ldetail["project_name"]]
# ## Use samplesheets to create YAML files
def _organize_lanes(info_iter, barcode_ids):
"""Organize flat lane information into nested YAML structure.
"""
all_lanes = []
for (fcid, lane, sampleref), info in itertools.groupby(info_iter, lambda x: (x[0], x[1], x[1])):
info = list(info)
cur_lane = dict(flowcell_id=fcid, lane=lane, genome_build=info[0][3], analysis="Standard")
if not _has_barcode(info):
cur_lane["description"] = info[0][1]
else: # barcoded sample
cur_lane["description"] = "Barcoded lane %s" % lane
multiplex = []
for (_, _, sample_id, _, bc_seq) in info:
bc_type, bc_id = barcode_ids[bc_seq]
multiplex.append(dict(barcode_type=bc_type,
barcode_id=bc_id,
sequence=bc_seq,
name=sample_id))
cur_lane["multiplex"] = multiplex
all_lanes.append(cur_lane)
return all_lanes
def _has_barcode(sample):
if sample[0][4]:
return True
def _generate_barcode_ids(info_iter):
"""Create unique barcode IDs assigned to sequences
"""
bc_type = "SampleSheet"
barcodes = list(set([x[-1] for x in info_iter]))
barcodes.sort()
barcode_ids = {}
for i, bc in enumerate(barcodes):
barcode_ids[bc] = (bc_type, i+1)
return barcode_ids
def _read_input_csv(in_file):
"""Parse useful details from SampleSheet CSV file.
"""
with open(in_file, "rU") as in_handle:
reader = csv.reader(in_handle)
reader.next() # header
for line in reader:
if line: # empty lines
(fc_id, lane, sample_id, genome, barcode) = line[:5]
yield fc_id, lane, sample_id, genome, barcode
def _get_flowcell_id(in_file, require_single=True):
"""Retrieve the unique flowcell id represented in the SampleSheet.
"""
fc_ids = set([x[0] for x in _read_input_csv(in_file)])
if require_single and len(fc_ids) > 1:
raise ValueError("There are several FCIDs in the same samplesheet file: %s" % in_file)
else:
return fc_ids
def csv2yaml(in_file, out_file=None):
"""Convert a CSV SampleSheet to YAML run_info format.
"""
if out_file is None:
out_file = "%s.yaml" % os.path.splitext(in_file)[0]
barcode_ids = _generate_barcode_ids(_read_input_csv(in_file))
lanes = _organize_lanes(_read_input_csv(in_file), barcode_ids)
with open(out_file, "w") as out_handle:
out_handle.write(yaml.safe_dump(lanes, default_flow_style=False))
return out_file
def run_has_samplesheet(fc_dir, config, require_single=True):
"""Checks if there's a suitable SampleSheet.csv present for the run
"""
fc_name, _ = flowcell.parse_dirname(fc_dir)
sheet_dirs = config.get("samplesheet_directories", [])
fcid_sheet = {}
for ss_dir in (s for s in sheet_dirs if os.path.exists(s)):
with utils.chdir(ss_dir):
for ss in glob.glob("*.csv"):
fc_ids = _get_flowcell_id(ss, require_single)
for fcid in fc_ids:
if fcid:
fcid_sheet[fcid] = os.path.join(ss_dir, ss)
# difflib handles human errors while entering data on the SampleSheet.
# Only one best candidate is returned (if any). 0.85 cutoff allows for
# maximum of 2 mismatches in fcid
potential_fcids = difflib.get_close_matches(fc_name, fcid_sheet.keys(), 1, 0.85)
if len(potential_fcids) > 0 and fcid_sheet.has_key(potential_fcids[0]):
return fcid_sheet[potential_fcids[0]]
else:
return None
| mit |
davidmfinol/py3NEAT | neat/chromosome.py | 1 | 16707 | import random
import math
from neat import config
from neat import genome
# Temporary workaround - default settings
#node_gene_type = genome.NodeGene
conn_gene_type = genome.ConnectionGene
class Chromosome(object):
""" A chromosome for general recurrent neural networks. """
_id = 0
def __init__(self, parent1_id, parent2_id, node_gene_type, conn_gene_type):
self._id = self.__get_new_id()
self._input_nodes = config.Config.input_nodes
self._output_nodes = config.Config.output_nodes
# the type of NodeGene and ConnectionGene the chromosome carries
self._node_gene_type = node_gene_type
self._conn_gene_type = conn_gene_type
# how many genes of the previous type the chromosome has
self._connection_genes = {} # dictionary of connection genes
self._node_genes = []
self.fitness = None
self.species_id = None
# my parents id: helps in tracking chromosome's genealogy
self.parent1_id = parent1_id
self.parent2_id = parent2_id
conn_genes = property(lambda self: list(self._connection_genes.values()))
node_genes = property(lambda self: self._node_genes)
sensors = property(lambda self: self._input_nodes)
actuators = property(lambda self: self._output_nodes)
id = property(lambda self: self._id)
@classmethod
def __get_new_id(cls):
cls._id += 1
return cls._id
def mutate(self):
""" Mutates this chromosome """
r = random.random
if r() < config.Config.prob_addnode:
self._mutate_add_node()
elif r() < config.Config.prob_addconn:
self._mutate_add_connection()
else:
for cg in list(self._connection_genes.values()):
cg.mutate() # mutate weights
for ng in self._node_genes[self._input_nodes:]:
ng.mutate() # mutate bias, response, and etc...
return self
def crossover(self, other):
""" Crosses over parents' chromosomes and returns a child. """
# This can't happen! Parents must belong to the same species.
assert self.species_id == other.species_id, 'Different parents species ID: %d vs %d' \
% (self.species_id, other.species_id)
# TODO: if they're of equal fitnesses, choose the shortest
if self.fitness > other.fitness:
parent1 = self
parent2 = other
else:
parent1 = other
parent2 = self
# creates a new child
child = self.__class__(self.id, other.id, self._node_gene_type, self._conn_gene_type)
child._inherit_genes(parent1, parent2)
child.species_id = parent1.species_id
#child._input_nodes = parent1._input_nodes
return child
def _inherit_genes(child, parent1, parent2):
""" Applies the crossover operator. """
assert(parent1.fitness >= parent2.fitness)
# Crossover connection genes
for cg1 in list(parent1._connection_genes.values()):
try:
cg2 = parent2._connection_genes[cg1.key]
except KeyError:
# Copy excess or disjoint genes from the fittest parent
child._connection_genes[cg1.key] = cg1.copy()
else:
if cg2.is_same_innov(cg1): # Always true for *global* INs
# Homologous gene found
new_gene = cg1.get_child(cg2)
#new_gene.enable() # avoids disconnected neurons
else:
new_gene = cg1.copy()
child._connection_genes[new_gene.key] = new_gene
# Crossover node genes
for i, ng1 in enumerate(parent1._node_genes):
try:
# matching node genes: randomly selects the neuron's bias and response
child._node_genes.append(ng1.get_child(parent2._node_genes[i]))
except IndexError:
# copies extra genes from the fittest parent
child._node_genes.append(ng1.copy())
def _mutate_add_node(self):
# Choose a random connection to split
conn_to_split = random.choice(list(self._connection_genes.values()))
ng = self._node_gene_type(len(self._node_genes) + 1, 'HIDDEN', activation_type = config.Config.nn_activation)
self._node_genes.append(ng)
new_conn1, new_conn2 = conn_to_split.split(ng.id)
self._connection_genes[new_conn1.key] = new_conn1
self._connection_genes[new_conn2.key] = new_conn2
return (ng, conn_to_split) # the return is only used in genome_feedforward
def _mutate_add_connection(self):
# Only for recurrent networks
total_possible_conns = (len(self._node_genes) - self._input_nodes) \
* len(self._node_genes)
remaining_conns = total_possible_conns - len(self._connection_genes)
# Check if new connection can be added:
if remaining_conns > 0:
n = random.randint(0, remaining_conns - 1)
count = 0
# Count connections
for in_node in self._node_genes:
for out_node in self._node_genes[self._input_nodes:]:
if (in_node.id, out_node.id) not in list(self._connection_genes.keys()):
# Free connection
if count == n: # Connection to create
weight = random.gauss(0, config.Config.weight_stdev)
cg = self._conn_gene_type(in_node.id, out_node.id, weight, True)
self._connection_genes[cg.key] = cg
return
else:
count += 1
# compatibility function
def distance(self, other):
""" Returns the distance between this chromosome and the other. """
if len(self._connection_genes) > len(other._connection_genes):
chromo1 = self
chromo2 = other
else:
chromo1 = other
chromo2 = self
weight_diff = 0
matching = 0
disjoint = 0
excess = 0
max_cg_chromo2 = max(chromo2._connection_genes.values())
for cg1 in list(chromo1._connection_genes.values()):
try:
cg2 = chromo2._connection_genes[cg1.key]
except KeyError:
if cg1 > max_cg_chromo2:
excess += 1
else:
disjoint += 1
else:
# Homologous genes
weight_diff += math.fabs(cg1.weight - cg2.weight)
matching += 1
disjoint += len(chromo2._connection_genes) - matching
assert(matching > 0) # this can't happen
distance = config.Config.excess_coeficient * excess + \
config.Config.disjoint_coeficient * disjoint + \
config.Config.weight_coeficient * (weight_diff/matching)
return distance
def size(self):
""" Defines chromosome 'complexity': number of hidden nodes plus
number of enabled connections (bias is not considered)
"""
# number of hidden nodes
num_hidden = len(self._node_genes) - self._input_nodes - self._output_nodes
# number of enabled connections
conns_enabled = sum([1 for cg in list(self._connection_genes.values()) if cg.enabled is True])
return (num_hidden, conns_enabled)
def __lt__(self, other):
""" First compare chromosomes by their fitness and then by their id.
Older chromosomes (lower ids) should be prefered if newer ones
performs the same.
"""
#return cmp(self.fitness, other.fitness) or cmp(other.id, self.id)
return (self.fitness < other.fitness)
def __str__(self):
s = "Nodes:"
for ng in self._node_genes:
s += "\n\t" + str(ng)
s += "\nConnections:"
connections = list(self._connection_genes.values())
connections.sort()
for c in connections:
s += "\n\t" + str(c)
return s
def add_hidden_nodes(self, num_hidden):
id = len(self._node_genes)+1
for i in range(num_hidden):
node_gene = self._node_gene_type(id,
nodetype = 'HIDDEN',
activation_type = config.Config.nn_activation)
self._node_genes.append(node_gene)
id += 1
# Connect all nodes to it
for pre in self._node_genes:
weight = random.gauss(0, config.Config.weight_stdev)
cg = self._conn_gene_type(pre.id, node_gene.id, weight, True)
self._connection_genes[cg.key] = cg
# Connect it to all nodes except input nodes
for post in self._node_genes[self._input_nodes:]:
weight = random.gauss(0, config.Config.weight_stdev)
cg = self._conn_gene_type(node_gene.id, post.id, weight, True)
self._connection_genes[cg.key] = cg
@classmethod
def create_fully_connected(cls):
"""
Factory method
Creates a chromosome for a fully connected feedforward network with no hidden nodes.
"""
c = cls(0, 0, node_gene_type, conn_gene_type)
id = 1
# Create node genes
for i in range(config.Config.input_nodes):
c._node_genes.append(c._node_gene_type(id, 'INPUT'))
id += 1
#c._input_nodes += num_input
for i in range(config.Config.output_nodes):
node_gene = c._node_gene_type(id,
nodetype = 'OUTPUT',
activation_type = config.Config.nn_activation)
c._node_genes.append(node_gene)
id += 1
# Connect it to all input nodes
for input_node in c._node_genes[:config.Config.input_nodes]:
#TODO: review the initial weights distribution
#weight = random.uniform(-1, 1)*Config.random_range
weight = random.gauss(0, config.Config.weight_stdev)
cg = c._conn_gene_type(input_node.id, node_gene.id, weight, True)
c._connection_genes[cg.key] = cg
assert id == len(c._node_genes) + 1
return c
class FFChromosome(Chromosome):
""" A chromosome for feedforward neural networks. Feedforward
topologies are a particular case of Recurrent NNs.
"""
def __init__(self, parent1_id, parent2_id, node_gene_type, conn_gene_type):
super(FFChromosome, self).__init__(parent1_id, parent2_id, node_gene_type, conn_gene_type)
self.__node_order = [] # hidden node order (for feedforward networks)
node_order = property(lambda self: self.__node_order)
def _inherit_genes(child, parent1, parent2):
super(FFChromosome, child)._inherit_genes(parent1, parent2)
child.__node_order = parent1.__node_order[:]
assert(len(child.__node_order) == len([n for n in child.node_genes if n.type == 'HIDDEN']))
def _mutate_add_node(self):
ng, split_conn = super(FFChromosome, self)._mutate_add_node()
# Add node to node order list: after the presynaptic node of the split connection
# and before the postsynaptic node of the split connection
if self._node_genes[split_conn.innodeid - 1].type == 'HIDDEN':
mini = self.__node_order.index(split_conn.innodeid) + 1
else:
# Presynaptic node is an input node, not hidden node
mini = 0
if self._node_genes[split_conn.outnodeid - 1].type == 'HIDDEN':
maxi = self.__node_order.index(split_conn.outnodeid)
else:
# Postsynaptic node is an output node, not hidden node
maxi = len(self.__node_order)
self.__node_order.insert(random.randint(mini, maxi), ng.id)
assert(len(self.__node_order) == len([n for n in self.node_genes if n.type == 'HIDDEN']))
return (ng, split_conn)
def _mutate_add_connection(self):
# Only for feedforwad networks
num_hidden = len(self.__node_order)
num_output = len(self._node_genes) - self._input_nodes - num_hidden
total_possible_conns = (num_hidden+num_output)*(self._input_nodes+num_hidden) - \
sum(range(num_hidden+1))
remaining_conns = total_possible_conns - len(self._connection_genes)
# Check if new connection can be added:
if remaining_conns > 0:
n = random.randint(0, remaining_conns - 1)
count = 0
# Count connections
for in_node in (self._node_genes[:self._input_nodes] + self._node_genes[-num_hidden:]):
for out_node in self._node_genes[self._input_nodes:]:
if (in_node.id, out_node.id) not in list(self._connection_genes.keys()) and \
self.__is_connection_feedforward(in_node, out_node):
# Free connection
if count == n: # Connection to create
#weight = random.uniform(-config.Config.random_range, config.Config.random_range)
weight = random.gauss(0,1)
cg = self._conn_gene_type(in_node.id, out_node.id, weight, True)
self._connection_genes[cg.key] = cg
return
else:
count += 1
def __is_connection_feedforward(self, in_node, out_node):
return in_node.type == 'INPUT' or out_node.type == 'OUTPUT' or \
self.__node_order.index(in_node.id) < self.__node_order.index(out_node.id)
def add_hidden_nodes(self, num_hidden):
id = len(self._node_genes)+1
for i in range(num_hidden):
node_gene = self._node_gene_type(id,
nodetype = 'HIDDEN',
activation_type = config.Config.nn_activation)
self._node_genes.append(node_gene)
self.__node_order.append(node_gene.id)
id += 1
# Connect all input nodes to it
for pre in self._node_genes[:self._input_nodes]:
weight = random.gauss(0, config.Config.weight_stdev)
cg = self._conn_gene_type(pre.id, node_gene.id, weight, True)
self._connection_genes[cg.key] = cg
assert self.__is_connection_feedforward(pre, node_gene)
# Connect all previous hidden nodes to it
for pre_id in self.__node_order[:-1]:
assert pre_id != node_gene.id
weight = random.gauss(0, config.Config.weight_stdev)
cg = self._conn_gene_type(pre_id, node_gene.id, weight, True)
self._connection_genes[cg.key] = cg
# Connect it to all output nodes
for post in self._node_genes[self._input_nodes:(self._input_nodes + self._output_nodes)]:
assert post.type == 'OUTPUT'
weight = random.gauss(0, config.Config.weight_stdev)
cg = self._conn_gene_type(node_gene.id, post.id, weight, True)
self._connection_genes[cg.key] = cg
assert self.__is_connection_feedforward(node_gene, post)
def __str__(self):
s = super(FFChromosome, self).__str__()
s += '\nNode order: ' + str(self.__node_order)
return s
if __name__ == '__main__':
# Example
import visualize
# define some attributes
node_gene_type = genome.NodeGene # standard neuron model
conn_gene_type = genome.ConnectionGene # and connection link
config.Config.nn_activation = 'exp' # activation function
config.Config.weight_stdev = 0.9 # weights distribution
config.Config.input_nodes = 2 # number of inputs
config.Config.output_nodes = 1 # number of outputs
# creates a chromosome for recurrent networks
#c1 = Chromosome.create_fully_connected()
# creates a chromosome for feedforward networks
c2 = FFChromosome.create_fully_connected()
# add two hidden nodes
c2.add_hidden_nodes(2)
# apply some mutations
#c2._mutate_add_node()
#c2._mutate_add_connection()
# check the result
#visualize.draw_net(c1) # for recurrent nets
visualize.draw_ff(c2) # for feedforward nets
# print the chromosome
print(c2)
| gpl-3.0 |
rabipanda/tensorflow | tensorflow/contrib/training/python/training/bucket_ops.py | 48 | 18717 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for bucketing data into groups.
The classes and functions in this module are used to queue up data into
buckets conditional on side information (e.g. sequence length).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.summary import summary
from tensorflow.python.training import input as input_py
from tensorflow.python.training import queue_runner
# pylint: disable=protected-access
_as_original_type = input_py._as_original_type
_as_tensor_list = input_py._as_tensor_list
_restore_sparse_tensors = input_py._restore_sparse_tensors
_dtypes = input_py._dtypes
_store_sparse_tensors = input_py._store_sparse_tensors
_validate_keep_input = input_py._validate_keep_input
_shapes = input_py._shapes
_which_queue = input_py._which_queue
# pylint: enable=protected-access
def _validate_bucket(tensor_list):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError("Expected at least one tensor in bucket().")
return tensor_list
def bucket(tensors,
which_bucket,
batch_size,
num_buckets,
num_threads=1,
capacity=32,
bucket_capacities=None,
shapes=None,
dynamic_pad=False,
allow_smaller_final_batch=False,
keep_input=True,
shared_name=None,
name=None):
"""Lazy bucketing of input tensors according to `which_bucket`.
The argument `tensors` can be a list or a dictionary of tensors.
The value returned by the function will be of the same type
as `tensors`.
The tensors entering this function are put into the bucket given by
`which_bucket`. Each bucket has its own queue. When a bucket contains
`batch_size` elements, this minibatch is pushed onto a top queue. The
tensors returned from this function are a the result of dequeueing the
next minibatch from this top queue.
This function is implemented using several queues. A `QueueRunner` for the
queues is added to the current `Graph`'s `QUEUE_RUNNER` collection.
As the returned tensors are the result of a dequeue operation, evaluating
them will throw a `tf.errors.OutOfRangeError` when the input queue is
exhausted. If these tensors are feeding another input queue, its queue runner
will catch this exception, however, if they are used in your main thread
you are responsible for catching this yourself.
*N.B.:* If `dynamic_pad` is `False`, you must ensure that either
(i) the `shapes` argument is passed, or (ii) all of the tensors in
`tensors` must have fully-defined shapes. `ValueError` will be
raised if neither of these conditions holds.
If `dynamic_pad` is `True`, it is sufficient that the *rank* of the
tensors is known, but individual dimensions may have shape `None`.
In this case, for each enqueue the dimensions with value `None`
may have a variable length; upon dequeue, the output tensors will be padded
on the right to the maximum shape of the tensors in the current minibatch.
For numbers, this padding takes value 0. For strings, this padding is
the empty string. See `PaddingFIFOQueue` for more info.
If `allow_smaller_final_batch` is `True`, a smaller batch value than
`batch_size` is returned when the queues are closed and there are not enough
elements to fill the batch, otherwise the pending elements are discarded.
In addition, all output tensors' static shapes, as accessed via the
`get_shape()` method will have a 0th `Dimension` value of `None`, and
operations that depend on fixed batch_size would fail.
Args:
tensors: The list or dictionary of tensors, representing a single element,
to bucket. Nested lists are not supported.
which_bucket: An `int32` scalar Tensor taking a value in `[0, num_buckets)`.
batch_size: The new batch size pulled from the queue (all queues will have
the same size). If a list is passed in then each bucket will have a
different batch_size.
(python int, int32 scalar or iterable of integers of length num_buckets).
num_buckets: A python integer, the number of buckets.
num_threads: An integer. The number of threads enqueuing `tensors`.
capacity: An integer. The maximum number of minibatches in the top queue,
and also (by default) the maximum number of elements within each bucket.
bucket_capacities: (Optional) None or a list of integers, the capacities of
each bucket. If None, capacity is used (default). If specified, it must
be a list of integers of length num_buckets: the i-th element is used
as capacity for the i-th bucket queue.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batches to be smaller if there are insufficient items left in the queues.
keep_input: A `bool` scalar Tensor. If provided, this tensor controls
whether the input is added to the queue or not. If it evaluates `True`,
then `tensors` are added to the bucket; otherwise they are dropped. This
tensor essentially acts as a filtering mechanism.
shared_name: (Optional). If set, the queues will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A tuple `(bucket, outputs)` where `bucket` is
a `int32` scalar tensor and `outputs` is a list or
dictionary of batched outputs corresponding to elements of `tensors`.
Every step will receive a new bucket of outputs.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensors` or if batch_size is a sequence
but its length != num_buckets. Also if bucket_capacities is not None but
its length != num_buckets.
"""
batch_size_per_bucket = False
if isinstance(batch_size, (list, tuple)):
batch_size_per_bucket = True
if len(batch_size) != num_buckets:
raise ValueError(
"If batch_size is a list it must have num_buckets elements")
else:
batch_size = [batch_size] * num_buckets
if bucket_capacities is None:
bucket_capacities = [capacity] * num_buckets
if len(bucket_capacities) != num_buckets:
raise ValueError(
"The list bucket_capacities (%s) must have exactly num_buckets (%d) "
"elements." % (str(bucket_capacities), num_buckets))
tensor_list = _as_tensor_list(tensors)
with ops.name_scope(name, "bucket", tensor_list) as name:
tensor_list = _validate_bucket(tensor_list)
keep_input = _validate_keep_input(keep_input, enqueue_many=False)
(tensor_list, sparse_info) = _store_sparse_tensors(
tensor_list, enqueue_many=False, keep_input=keep_input)
# Round-trip batch_size to a tensor, and possibly back
for i, bucket_batch_size in enumerate(batch_size):
bucket_batch_size = ops.convert_to_tensor(
bucket_batch_size, dtype=dtypes.int32, name="batch_size")
static_batch_size = tensor_util.constant_value(bucket_batch_size)
batch_size[i] = (static_batch_size if static_batch_size is not None else
bucket_batch_size)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many=False)
which_bucket = ops.convert_to_tensor(
which_bucket, dtype=dtypes.int32, name="which_bucket")
queue_creator = _which_queue(dynamic_pad)
bucket_queues = []
for i in range(num_buckets):
shared_name_i = ("%s_%d" % (shared_name, i) if shared_name is not None
else None)
bucket_queues.append(
queue_creator(
capacity=bucket_capacities[i],
dtypes=types,
shapes=shapes,
shared_name=shared_name_i,
name="bucket_queue_%d" % i))
maybe_static_batch_size = (
None if (allow_smaller_final_batch or batch_size_per_bucket)
else static_batch_size)
bucket_shapes = [
tensor_shape.vector(maybe_static_batch_size).concatenate(s)
for s in bucket_queues[0].shapes
]
# top_queue is a PaddingFIFOQueue even if the bucket queues are regular FIFO
# queues because if we use allow_smaller_final_batch, shapes will
# contain Nones in their first entry; as a result, a regular
# FIFOQueue would die when being passed shapes that are not fully defined.
top_queue = data_flow_ops.PaddingFIFOQueue(
capacity=capacity,
dtypes=[dtypes.int32] + types,
shapes=[tensor_shape.scalar()] + bucket_shapes,
shared_name=shared_name,
name="top_queue")
def enqueue_which():
"""Return an op that enqueues conditionally in one of the queues."""
def enqueue_single(i):
return bucket_queues[i].enqueue(tensor_list)
enqueues = [
control_flow_ops.cond(
math_ops.equal(which_bucket, i),
functools.partial(enqueue_single, i), control_flow_ops.no_op)
for i in range(num_buckets)
]
return control_flow_ops.group(*enqueues, name="group_enqueues")
maybe_enqueue = utils.smart_cond(
keep_input,
enqueue_which,
control_flow_ops.no_op)
bucket_enqueue_ops = [maybe_enqueue] * num_threads
if allow_smaller_final_batch:
which_dequeue = lambda q: q.dequeue_up_to
else:
which_dequeue = lambda q: q.dequeue_many
def make_list(t):
if isinstance(t, (list, tuple)):
return t
else:
return [t]
enqueues_to_top = [
top_queue.enqueue(
[constant_op.constant(i)] + make_list(which_dequeue(q)(
bs, name="read_bucket_%d" % i)),
name="enqueue_from_bucket_%d" % i)
for i, (q, bs) in enumerate(zip(bucket_queues, batch_size))
]
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
bucket_queues[0], enqueues_to_top,
close_op=top_queue.close(),
cancel_op=top_queue.close(cancel_pending_enqueues=True),
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError)))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
top_queue,
bucket_enqueue_ops,
close_op=control_flow_ops.group(
*[q.close() for q in bucket_queues]),
cancel_op=control_flow_ops.group(
*[q.close(cancel_pending_enqueues=True)
for q in bucket_queues]),
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError)))
for q in bucket_queues:
summary.scalar("bucket/%s/size" % q.name,
math_ops.cast(top_queue.size(), dtypes.float32))
summary.scalar("bucket/%s/fraction_of_%d_full" % (top_queue.name, capacity),
math_ops.cast(top_queue.size(), dtypes.float32) *
(1. / capacity))
dequeued = top_queue.dequeue(name="dequeue_top")
which_bucket_dequeued = dequeued[0]
dequeued = dequeued[1:]
if len(dequeued) == 1:
dequeued = dequeued[0]
dequeued = _restore_sparse_tensors(dequeued, sparse_info)
return (which_bucket_dequeued, _as_original_type(tensors, dequeued))
def bucket_by_sequence_length(input_length,
tensors,
batch_size,
bucket_boundaries,
num_threads=1,
capacity=32,
bucket_capacities=None,
shapes=None,
dynamic_pad=False,
allow_smaller_final_batch=False,
keep_input=True,
shared_name=None,
name=None):
"""Lazy bucketing of inputs according to their length.
This method calls `tf.contrib.training.bucket` under the hood, after first
subdividing the bucket boundaries into separate buckets and identifying which
bucket the given `input_length` belongs to. See the documentation for
`which_bucket` for details of the other arguments.
Args:
input_length: `int32` scalar `Tensor`, the sequence length of tensors.
tensors: The list or dictionary of tensors, representing a single element,
to bucket. Nested lists are not supported.
batch_size: The new batch size pulled from the queue (all queues will have
the same size). If a list is passed in then each bucket will have a
different batch_size.
(python int, int32 scalar or iterable of integers of length num_buckets).
bucket_boundaries: int list, increasing non-negative numbers.
The edges of the buckets to use when bucketing tensors. Two extra buckets
are created, one for `input_length < bucket_boundaries[0]` and
one for `input_length >= bucket_boundaries[-1]`.
num_threads: An integer. The number of threads enqueuing `tensors`.
capacity: An integer. The maximum number of minibatches in the top queue,
and also the maximum number of elements within each bucket.
bucket_capacities: (Optional) None or a list of integers, the capacities of
each bucket. If None, capacity is used (default). If specified, it must
be a list of integers of length one larger than bucket_boundaries.
Its i-th element is used as capacity for the i-th bucket queue.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensors`.
dynamic_pad: Boolean. Allow variable dimensions in input shapes.
The given dimensions are padded upon dequeue so that tensors within a
batch have the same shapes.
allow_smaller_final_batch: (Optional) Boolean. If `True`, allow the final
batches to be smaller if there are insufficient items left in the queues.
keep_input: A `bool` scalar Tensor. If provided, this tensor controls
whether the input is added to the queue or not. If it evaluates `True`,
then `tensors` are added to the bucket; otherwise they are dropped. This
tensor essentially acts as a filtering mechanism.
shared_name: (Optional). If set, the queues will be shared under the given
name across multiple sessions.
name: (Optional) A name for the operations.
Returns:
A tuple `(sequence_length, outputs)` where `sequence_length` is
a 1-D `Tensor` of size `batch_size` and `outputs` is a list or dictionary
of batched, bucketed, outputs corresponding to elements of `tensors`.
Raises:
TypeError: if `bucket_boundaries` is not a list of python integers.
ValueError: if `bucket_boundaries` is empty or contains non-increasing
values or if batch_size is a list and it's length doesn't equal the number
of buckets.
"""
tensor_list = _as_tensor_list(tensors)
if not isinstance(bucket_boundaries, (list, tuple)):
raise TypeError(
"bucket_boundaries must be a list or tuple, but received: %s" %
bucket_boundaries)
if not bucket_boundaries:
raise ValueError("bucket_boundaries must not be empty")
for (s, e) in zip(bucket_boundaries[:-1], bucket_boundaries[1:]):
if not isinstance(s, int) or not isinstance(e, int):
raise TypeError("bucket boundaries must be integers, but saw: %s and %s" %
(s, e))
if s >= e:
raise ValueError(
"Buckets must contain sequential increasing lengths, but saw: "
"%d before %d" % (s, e))
with ops.name_scope(name, "bucket_by_sequence_length",
[input_length] + tensor_list) as name:
input_length = ops.convert_to_tensor(
input_length, dtype=dtypes.int32, name="input_length")
# Bucketing conditions are:
# l < b[0]
# b[0] <= l < b[1]
# b[1] <= l < b[2]
# ...
# b[N-2] <= l < b[N-1]
# b[N-1] <= l
# Equivalent to:
# [-inf, b[0], b[1], ..., b[N-1]] <= l < [b[0], b[1], ..., b[N-1], inf]
buckets_min = [np.iinfo(np.int32).min] + list(bucket_boundaries)
buckets_max = list(bucket_boundaries) + [np.iinfo(np.int32).max]
conditions_c = math_ops.logical_and(
math_ops.less_equal(buckets_min, input_length),
math_ops.less(input_length, buckets_max))
which_bucket = math_ops.reduce_min(array_ops.where(conditions_c))
which_bucket = math_ops.to_int32(which_bucket)
if shapes is not None:
shapes = [tensor_shape.scalar()] + shapes
_, dequeued = bucket(
tensors=[input_length] + tensor_list,
which_bucket=which_bucket,
batch_size=batch_size,
num_buckets=len(bucket_boundaries) + 1,
num_threads=num_threads,
capacity=capacity,
bucket_capacities=bucket_capacities,
shapes=shapes,
dynamic_pad=dynamic_pad,
allow_smaller_final_batch=allow_smaller_final_batch,
keep_input=keep_input,
shared_name=shared_name)
return (dequeued[0], _as_original_type(tensors, dequeued[1:]))
__all__ = ["bucket", "bucket_by_sequence_length"]
| apache-2.0 |
kakunbsc/enigma2.4 | lib/python/Tools/NumericalTextInput.py | 5 | 3518 | # -*- coding: utf-8 -*-
from enigma import eTimer
from Components.Language import language
class NumericalTextInput:
def __init__(self, nextFunc=None, handleTimeout = True, search = False):
self.mapping = []
self.lang = language.getLanguage()
self.useableChars=None
self.nextFunction=nextFunc
if handleTimeout:
self.timer = eTimer()
self.timer.callback.append(self.timeout)
else:
self.timer = None
self.lastKey = -1
self.pos = -1
if search:
self.mapping.append (u"%_0") # 0
self.mapping.append (u" 1") # 1
self.mapping.append (u"abc2") # 2
self.mapping.append (u"def3") # 3
self.mapping.append (u"ghi4") # 4
self.mapping.append (u"jkl5") # 5
self.mapping.append (u"mno6") # 6
self.mapping.append (u"pqrs7") # 7
self.mapping.append (u"tuv8") # 8
self.mapping.append (u"wxyz9") # 9
return
if self.lang == 'de_DE':
self.mapping.append (u".,?'+\"0-()@/:_$!") # 0
self.mapping.append (u" 1") # 1
self.mapping.append (u"aäbc2AÄBC") # 2
self.mapping.append (u"def3DEF") # 3
self.mapping.append (u"ghi4GHI") # 4
self.mapping.append (u"jkl5JKL") # 5
self.mapping.append (u"mnoö6MNOÖ") # 6
self.mapping.append (u"pqrsß7PQRSß") # 7
self.mapping.append (u"tuüv8TUÜV") # 8
self.mapping.append (u"wxyz9WXYZ") # 9
elif self.lang == 'es_ES':
self.mapping.append (u".,?'+\"0-()@/:_$!") # 0
self.mapping.append (u" 1") # 1
self.mapping.append (u"abcáà2ABCÁÀ") # 2
self.mapping.append (u"deéèf3DEFÉÈ") # 3
self.mapping.append (u"ghiíì4GHIÍÌ") # 4
self.mapping.append (u"jkl5JKL") # 5
self.mapping.append (u"mnñoóò6MNÑOÓÒ") # 6
self.mapping.append (u"pqrs7PQRS") # 7
self.mapping.append (u"tuvúù8TUVÚÙ") # 8
self.mapping.append (u"wxyz9WXYZ") # 9
if self.lang in ('sv_SE', 'fi_FI'):
self.mapping.append (u".,?'+\"0-()@/:_$!") # 0
self.mapping.append (u" 1") # 1
self.mapping.append (u"abcåä2ABCÅÄ") # 2
self.mapping.append (u"defé3DEFÉ") # 3
self.mapping.append (u"ghi4GHI") # 4
self.mapping.append (u"jkl5JKL") # 5
self.mapping.append (u"mnoö6MNOÖ") # 6
self.mapping.append (u"pqrs7PQRS") # 7
self.mapping.append (u"tuv8TUV") # 8
self.mapping.append (u"wxyz9WXYZ") # 9
else:
self.mapping.append (u".,?'+\"0-()@/:_$!") # 0
self.mapping.append (u" 1") # 1
self.mapping.append (u"abc2ABC") # 2
self.mapping.append (u"def3DEF") # 3
self.mapping.append (u"ghi4GHI") # 4
self.mapping.append (u"jkl5JKL") # 5
self.mapping.append (u"mno6MNO") # 6
self.mapping.append (u"pqrs7PQRS") # 7
self.mapping.append (u"tuv8TUV") # 8
self.mapping.append (u"wxyz9WXYZ") # 9
def setUseableChars(self, useable):
self.useableChars = useable
def getKey(self, num):
cnt=0
if self.lastKey != num:
if self.lastKey != -1:
self.nextChar()
self.lastKey = num
self.pos = -1
if self.timer is not None:
self.timer.start(1000, True)
while True:
self.pos += 1
if len(self.mapping[num]) <= self.pos:
self.pos = 0
if self.useableChars:
pos = self.useableChars.find(self.mapping[num][self.pos])
if pos == -1:
cnt += 1
if cnt < len(self.mapping[num]):
continue
else:
return None
break
return self.mapping[num][self.pos]
def nextKey(self):
if self.timer is not None:
self.timer.stop()
self.lastKey = -1
def nextChar(self):
self.nextKey()
if self.nextFunction:
self.nextFunction()
def timeout(self):
if self.lastKey != -1:
self.nextChar()
| gpl-2.0 |
madan96/sympy | sympy/core/tests/test_eval_power.py | 32 | 12168 | from sympy.core import (
Rational, Symbol, S, Float, Integer, Number, Pow,
Basic, I, nan, pi, symbols, oo, zoo)
from sympy.core.tests.test_evalf import NS
from sympy.functions.elementary.miscellaneous import sqrt, cbrt
from sympy.functions.elementary.exponential import exp, log
from sympy.functions.elementary.trigonometric import sin, cos
from sympy.series.order import O
from sympy.utilities.pytest import XFAIL
def test_rational():
a = Rational(1, 5)
r = sqrt(5)/5
assert sqrt(a) == r
assert 2*sqrt(a) == 2*r
r = a*a**Rational(1, 2)
assert a**Rational(3, 2) == r
assert 2*a**Rational(3, 2) == 2*r
r = a**5*a**Rational(2, 3)
assert a**Rational(17, 3) == r
assert 2 * a**Rational(17, 3) == 2*r
def test_large_rational():
e = (Rational(123712**12 - 1, 7) + Rational(1, 7))**Rational(1, 3)
assert e == 234232585392159195136 * (Rational(1, 7)**Rational(1, 3))
def test_negative_real():
def feq(a, b):
return abs(a - b) < 1E-10
assert feq(S.One / Float(-0.5), -Integer(2))
def test_expand():
x = Symbol('x')
assert (2**(-1 - x)).expand() == Rational(1, 2)*2**(-x)
def test_issue_3449():
#test if powers are simplified correctly
#see also issue 3995
x = Symbol('x')
assert ((x**Rational(1, 3))**Rational(2)) == x**Rational(2, 3)
assert (
(x**Rational(3))**Rational(2, 5)) == (x**Rational(3))**Rational(2, 5)
a = Symbol('a', real=True)
b = Symbol('b', real=True)
assert (a**2)**b == (abs(a)**b)**2
assert sqrt(1/a) != 1/sqrt(a) # e.g. for a = -1
assert (a**3)**Rational(1, 3) != a
assert (x**a)**b != x**(a*b) # e.g. x = -1, a=2, b=1/2
assert (x**.5)**b == x**(.5*b)
assert (x**.5)**.5 == x**.25
assert (x**2.5)**.5 != x**1.25 # e.g. for x = 5*I
k = Symbol('k', integer=True)
m = Symbol('m', integer=True)
assert (x**k)**m == x**(k*m)
assert Number(5)**Rational(2, 3) == Number(25)**Rational(1, 3)
assert (x**.5)**2 == x**1.0
assert (x**2)**k == (x**k)**2 == x**(2*k)
a = Symbol('a', positive=True)
assert (a**3)**Rational(2, 5) == a**Rational(6, 5)
assert (a**2)**b == (a**b)**2
assert (a**Rational(2, 3))**x == (a**(2*x/3)) != (a**x)**Rational(2, 3)
def test_issue_3866():
assert --sqrt(sqrt(5) - 1) == sqrt(sqrt(5) - 1)
def test_negative_one():
x = Symbol('x', complex=True)
y = Symbol('y', complex=True)
assert 1/x**y == x**(-y)
def test_issue_4362():
neg = Symbol('neg', negative=True)
nonneg = Symbol('nonneg', nonnegative=True)
any = Symbol('any')
num, den = sqrt(1/neg).as_numer_denom()
assert num == sqrt(-1)
assert den == sqrt(-neg)
num, den = sqrt(1/nonneg).as_numer_denom()
assert num == 1
assert den == sqrt(nonneg)
num, den = sqrt(1/any).as_numer_denom()
assert num == sqrt(1/any)
assert den == 1
def eqn(num, den, pow):
return (num/den)**pow
npos = 1
nneg = -1
dpos = 2 - sqrt(3)
dneg = 1 - sqrt(3)
assert dpos > 0 and dneg < 0 and npos > 0 and nneg < 0
# pos or neg integer
eq = eqn(npos, dpos, 2)
assert eq.is_Pow and eq.as_numer_denom() == (1, dpos**2)
eq = eqn(npos, dneg, 2)
assert eq.is_Pow and eq.as_numer_denom() == (1, dneg**2)
eq = eqn(nneg, dpos, 2)
assert eq.is_Pow and eq.as_numer_denom() == (1, dpos**2)
eq = eqn(nneg, dneg, 2)
assert eq.is_Pow and eq.as_numer_denom() == (1, dneg**2)
eq = eqn(npos, dpos, -2)
assert eq.is_Pow and eq.as_numer_denom() == (dpos**2, 1)
eq = eqn(npos, dneg, -2)
assert eq.is_Pow and eq.as_numer_denom() == (dneg**2, 1)
eq = eqn(nneg, dpos, -2)
assert eq.is_Pow and eq.as_numer_denom() == (dpos**2, 1)
eq = eqn(nneg, dneg, -2)
assert eq.is_Pow and eq.as_numer_denom() == (dneg**2, 1)
# pos or neg rational
pow = S.Half
eq = eqn(npos, dpos, pow)
assert eq.is_Pow and eq.as_numer_denom() == (npos**pow, dpos**pow)
eq = eqn(npos, dneg, pow)
assert eq.is_Pow is False and eq.as_numer_denom() == ((-npos)**pow, (-dneg)**pow)
eq = eqn(nneg, dpos, pow)
assert not eq.is_Pow or eq.as_numer_denom() == (nneg**pow, dpos**pow)
eq = eqn(nneg, dneg, pow)
assert eq.is_Pow and eq.as_numer_denom() == ((-nneg)**pow, (-dneg)**pow)
eq = eqn(npos, dpos, -pow)
assert eq.is_Pow and eq.as_numer_denom() == (dpos**pow, npos**pow)
eq = eqn(npos, dneg, -pow)
assert eq.is_Pow is False and eq.as_numer_denom() == (-(-npos)**pow*(-dneg)**pow, npos)
eq = eqn(nneg, dpos, -pow)
assert not eq.is_Pow or eq.as_numer_denom() == (dpos**pow, nneg**pow)
eq = eqn(nneg, dneg, -pow)
assert eq.is_Pow and eq.as_numer_denom() == ((-dneg)**pow, (-nneg)**pow)
# unknown exponent
pow = 2*any
eq = eqn(npos, dpos, pow)
assert eq.is_Pow and eq.as_numer_denom() == (npos**pow, dpos**pow)
eq = eqn(npos, dneg, pow)
assert eq.is_Pow and eq.as_numer_denom() == ((-npos)**pow, (-dneg)**pow)
eq = eqn(nneg, dpos, pow)
assert eq.is_Pow and eq.as_numer_denom() == (nneg**pow, dpos**pow)
eq = eqn(nneg, dneg, pow)
assert eq.is_Pow and eq.as_numer_denom() == ((-nneg)**pow, (-dneg)**pow)
eq = eqn(npos, dpos, -pow)
assert eq.as_numer_denom() == (dpos**pow, npos**pow)
eq = eqn(npos, dneg, -pow)
assert eq.is_Pow and eq.as_numer_denom() == ((-dneg)**pow, (-npos)**pow)
eq = eqn(nneg, dpos, -pow)
assert eq.is_Pow and eq.as_numer_denom() == (dpos**pow, nneg**pow)
eq = eqn(nneg, dneg, -pow)
assert eq.is_Pow and eq.as_numer_denom() == ((-dneg)**pow, (-nneg)**pow)
x = Symbol('x')
y = Symbol('y')
assert ((1/(1 + x/3))**(-S.One)).as_numer_denom() == (3 + x, 3)
notp = Symbol('notp', positive=False) # not positive does not imply real
b = ((1 + x/notp)**-2)
assert (b**(-y)).as_numer_denom() == (1, b**y)
assert (b**(-S.One)).as_numer_denom() == ((notp + x)**2, notp**2)
nonp = Symbol('nonp', nonpositive=True)
assert (((1 + x/nonp)**-2)**(-S.One)).as_numer_denom() == ((-nonp -
x)**2, nonp**2)
n = Symbol('n', negative=True)
assert (x**n).as_numer_denom() == (1, x**-n)
assert sqrt(1/n).as_numer_denom() == (S.ImaginaryUnit, sqrt(-n))
n = Symbol('0 or neg', nonpositive=True)
# if x and n are split up without negating each term and n is negative
# then the answer might be wrong; if n is 0 it won't matter since
# 1/oo and 1/zoo are both zero as is sqrt(0)/sqrt(-x) unless x is also
# zero (in which case the negative sign doesn't matter):
# 1/sqrt(1/-1) = -I but sqrt(-1)/sqrt(1) = I
assert (1/sqrt(x/n)).as_numer_denom() == (sqrt(-n), sqrt(-x))
c = Symbol('c', complex=True)
e = sqrt(1/c)
assert e.as_numer_denom() == (e, 1)
i = Symbol('i', integer=True)
assert (((1 + x/y)**i)).as_numer_denom() == ((x + y)**i, y**i)
def test_Pow_signs():
"""Cf. issues 4595 and 5250"""
x = Symbol('x')
y = Symbol('y')
n = Symbol('n', even=True)
assert (3 - y)**2 != (y - 3)**2
assert (3 - y)**n != (y - 3)**n
assert (-3 + y - x)**2 != (3 - y + x)**2
assert (y - 3)**3 != -(3 - y)**3
def test_power_with_noncommutative_mul_as_base():
x = Symbol('x', commutative=False)
y = Symbol('y', commutative=False)
assert not (x*y)**3 == x**3*y**3
assert (2*x*y)**3 == 8*(x*y)**3
def test_zero():
x = Symbol('x')
y = Symbol('y')
assert 0**x != 0
assert 0**(2*x) == 0**x
assert 0**(1.0*x) == 0**x
assert 0**(2.0*x) == 0**x
assert (0**(2 - x)).as_base_exp() == (0, 2 - x)
assert 0**(x - 2) != S.Infinity**(2 - x)
assert 0**(2*x*y) == 0**(x*y)
assert 0**(-2*x*y) == S.ComplexInfinity**(x*y)
def test_pow_as_base_exp():
x = Symbol('x')
assert (S.Infinity**(2 - x)).as_base_exp() == (S.Infinity, 2 - x)
assert (S.Infinity**(x - 2)).as_base_exp() == (S.Infinity, x - 2)
p = S.Half**x
assert p.base, p.exp == p.as_base_exp() == (S(2), -x)
# issue 8344:
assert Pow(1, 2, evaluate=False).as_base_exp() == (S(1), S(2))
def test_issue_6100():
x = Symbol('x')
y = Symbol('y')
assert x**1.0 == x
assert x == x**1.0
assert True != x**1.0
assert x**1.0 is not True
assert x is not True
assert x*y == (x*y)**1.0
assert (x**1.0)**1.0 == x
assert (x**1.0)**2.0 == x**2
b = Basic()
assert Pow(b, 1.0, evaluate=False) == b
# if the following gets distributed as a Mul (x**1.0*y**1.0 then
# __eq__ methods could be added to Symbol and Pow to detect the
# power-of-1.0 case.
assert ((x*y)**1.0).func is Pow
def test_issue_6208():
from sympy import root, Rational
I = S.ImaginaryUnit
assert sqrt(33**(9*I/10)) == -33**(9*I/20)
assert root((6*I)**(2*I), 3).as_base_exp()[1] == Rational(1, 3) # != 2*I/3
assert root((6*I)**(I/3), 3).as_base_exp()[1] == I/9
assert sqrt(exp(3*I)) == exp(3*I/2)
assert sqrt(-sqrt(3)*(1 + 2*I)) == sqrt(sqrt(3))*sqrt(-1 - 2*I)
assert sqrt(exp(5*I)) == -exp(5*I/2)
assert root(exp(5*I), 3).exp == Rational(1, 3)
def test_issue_6990():
x = Symbol('x')
a = Symbol('a')
b = Symbol('b')
assert (sqrt(a + b*x + x**2)).series(x, 0, 3).removeO() == \
b*x/(2*sqrt(a)) + x**2*(1/(2*sqrt(a)) - \
b**2/(8*a**(S(3)/2))) + sqrt(a)
def test_issue_6068():
x = Symbol('x')
assert sqrt(sin(x)).series(x, 0, 7) == \
sqrt(x) - x**(S(5)/2)/12 + x**(S(9)/2)/1440 - \
x**(S(13)/2)/24192 + O(x**7)
assert sqrt(sin(x)).series(x, 0, 9) == \
sqrt(x) - x**(S(5)/2)/12 + x**(S(9)/2)/1440 - \
x**(S(13)/2)/24192 - 67*x**(S(17)/2)/29030400 + O(x**9)
assert sqrt(sin(x**3)).series(x, 0, 19) == \
x**(S(3)/2) - x**(S(15)/2)/12 + x**(S(27)/2)/1440 + O(x**19)
assert sqrt(sin(x**3)).series(x, 0, 20) == \
x**(S(3)/2) - x**(S(15)/2)/12 + x**(S(27)/2)/1440 - \
x**(S(39)/2)/24192 + O(x**20)
def test_issue_6782():
x = Symbol('x')
assert sqrt(sin(x**3)).series(x, 0, 7) == x**(S(3)/2) + O(x**7)
assert sqrt(sin(x**4)).series(x, 0, 3) == x**2 + O(x**3)
def test_issue_6653():
x = Symbol('x')
assert (1 / sqrt(1 + sin(x**2))).series(x, 0, 3) == 1 - x**2/2 + O(x**3)
def test_issue_6429():
x = Symbol('x')
c = Symbol('c')
f = (c**2 + x)**(0.5)
assert f.series(x, x0=0, n=1) == (c**2)**0.5 + O(x)
assert f.taylor_term(0, x) == (c**2)**0.5
assert f.taylor_term(1, x) == 0.5*x*(c**2)**(-0.5)
assert f.taylor_term(2, x) == -0.125*x**2*(c**2)**(-1.5)
def test_issue_7638():
f = pi/log(sqrt(2))
assert ((1 + I)**(I*f/2))**0.3 == (1 + I)**(0.15*I*f)
# if 1/3 -> 1.0/3 this should fail since it cannot be shown that the
# sign will be +/-1; for the previous "small arg" case, it didn't matter
# that this could not be proved
assert (1 + I)**(4*I*f) == ((1 + I)**(12*I*f))**(S(1)/3)
assert (((1 + I)**(I*(1 + 7*f)))**(S(1)/3)).exp == S(1)/3
r = symbols('r', real=True)
assert sqrt(r**2) == abs(r)
assert cbrt(r**3) != r
assert sqrt(Pow(2*I, 5*S.Half)) != (2*I)**(5/S(4))
p = symbols('p', positive=True)
assert cbrt(p**2) == p**(2/S(3))
assert NS(((0.2 + 0.7*I)**(0.7 + 1.0*I))**(0.5 - 0.1*I), 1) == '0.4 + 0.2*I'
assert sqrt(1/(1 + I)) == sqrt((1 - I)/2) # or 1/sqrt(1 + I)
e = 1/(1 - sqrt(2))
assert sqrt(e) == I/sqrt(-1 + sqrt(2))
assert e**-S.Half == -I*sqrt(-1 + sqrt(2))
assert sqrt((cos(1)**2 + sin(1)**2 - 1)**(3 + I)).exp == S.Half
assert sqrt(r**(4/S(3))) != r**(2/S(3))
assert sqrt((p + I)**(4/S(3))) == (p + I)**(2/S(3))
assert sqrt((p - p**2*I)**2) == p - p**2*I
assert sqrt((p + r*I)**2) != p + r*I
e = (1 + I/5)
assert sqrt(e**5) == e**(5*S.Half)
assert sqrt(e**6) == e**3
assert sqrt((1 + I*r)**6) != (1 + I*r)**3
def test_issue_8582():
assert 1**oo is nan
assert 1**(-oo) is nan
assert 1**zoo is nan
assert 1**(oo + I) is nan
assert 1**(1 + I*oo) is nan
assert 1**(oo + I*oo) is nan
def test_issue_8650():
n = Symbol('n', integer=True, nonnegative=True)
assert (n**n).is_positive is True
x = 5*n+5
assert (x**(5*(n+1))).is_positive is True
| bsd-3-clause |
dillonjerry/aws | AWS-ElasticBeanstalk-CLI-2.6.2/eb/linux/python3/lib/aws/requests/models.py | 21 | 21205 | # -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import logging
import datetime
from io import BytesIO
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .status_codes import codes
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header
from .packages.urllib3.filepost import encode_multipart_formdata
from .exceptions import HTTPError, RequestException, MissingSchema, InvalidURL
from .utils import (
stream_untransfer, guess_filename, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len)
from .compat import (
cookielib, urlparse, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring)
REDIRECT_STATI = (codes.moved, codes.found, codes.other, codes.temporary_moved)
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
log = logging.getLogger(__name__)
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but abritrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but abritrary
if parameters are supplied as a dict.
"""
if (not files) or isinstance(data, str):
return None
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, list):
for v in val:
new_fields.append((field, builtin_str(v)))
else:
new_fields.append((field, builtin_str(val)))
for (k, v) in files:
# support for explicit filename
ft = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
else:
fn, fp, ft = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, str):
fp = StringIO(fp)
if isinstance(fp, bytes):
fp = BytesIO(fp)
if ft:
new_v = (fn, fp.read(), ft)
else:
new_v = (fn, fp.read())
new_fields.append((k, new_v))
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach the request. If a dictionary is provided, form-encoding will take place.
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None,
url=None,
headers=None,
files=None,
data=dict(),
params=dict(),
auth=None,
cookies=None,
hooks=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.params = params
self.auth = auth
self.cookies = cookies
self.hooks = hooks
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare_method(self.method)
p.prepare_url(self.url, self.params)
p.prepare_headers(self.headers)
p.prepare_cookies(self.cookies)
p.prepare_body(self.data, self.files)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
p.prepare_auth(self.auth)
# This MUST go after prepare_auth. Authenticators could add a hook
p.prepare_hooks(self.hooks)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
try:
url = unicode(url)
except NameError:
# We're on Python 3.
url = str(url)
except UnicodeDecodeError:
pass
# Support for unicode domain names and paths.
scheme, netloc, path, _params, query, fragment = urlparse(url)
if not (scheme and netloc):
raise MissingSchema("Invalid URL %r: No schema supplied" % url)
try:
netloc = netloc.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(_params, str):
_params = _params.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, _params, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
self.headers = CaseInsensitiveDict(headers)
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
is_stream = False
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, basestring),
not isinstance(data, list),
not isinstance(data, dict)
])
try:
length = str(super_len(data))
except (TypeError, AttributeError):
length = False
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = length
else:
self.headers['Transfer-Encoding'] = 'chunked'
# Check if file, fo, generator, iterator.
# If not, run through normal process.
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, str) or isinstance(data, builtin_str) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if (content_type) and (not 'content-type' in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = str(body.tell())
body.seek(0, 0)
elif body is not None:
self.headers['Content-Length'] = str(len(body))
elif self.method not in ('GET', 'HEAD'):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth):
"""Prepares the given HTTP auth data."""
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data."""
if isinstance(cookies, cookielib.CookieJar):
cookies = cookies
else:
cookies = cookiejar_from_dict(cookies)
if 'cookie' not in self.headers:
cookie_header = get_cookie_header(cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Requires that ``stream=True` on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta)
self.elapsed = datetime.timedelta(0)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except RequestException:
return False
return True
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the lovely Charade library
(Thanks, Ian!)."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. This avoids reading the content
at once into memory for large responses. The chunk size is the number
of bytes it should read into memory. This is not necessarily the
length of each item returned as decoding can take place.
"""
if self._content_consumed:
# simulate reading small chunks of the content
return iter_slices(self._content, chunk_size)
def generate():
while 1:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
gen = stream_untransfer(generate(), self)
if decode_unicode:
gen = stream_decode_response_unicode(gen, self)
return gen
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None):
"""Iterates over the response data, one line at a time. This
avoids reading the content at once into memory for large
responses.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size,
decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code is 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
if Response.encoding is None and chardet module is available, encoding
will be guessed.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
return json.loads(self.content.decode(encoding), **kwargs)
return json.loads(self.text or self.content, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers['link']
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError` or :class:`URLError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason)
elif 500 <= self.status_code < 600:
http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason)
if http_error_msg:
http_error = HTTPError(http_error_msg)
http_error.response = self
raise http_error
def close(self):
return self.raw.release_conn()
| gpl-2.0 |
incaser/bank-payment | account_payment_partner/models/res_partner.py | 13 | 1816 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Account Payment Partner module for OpenERP
# Copyright (C) 2014 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
class ResPartner(models.Model):
_inherit = 'res.partner'
supplier_payment_mode = fields.Many2one(
'payment.mode', string='Supplier Payment Mode', company_dependent=True,
domain="[('purchase_ok', '=', True)]",
help="Select the default payment mode for this supplier.")
customer_payment_mode = fields.Many2one(
'payment.mode', string='Customer Payment Mode', company_dependent=True,
domain="[('sale_ok', '=', True)]",
help="Select the default payment mode for this customer.")
@api.model
def _commercial_fields(self):
res = super(ResPartner, self)._commercial_fields()
res += ['supplier_payment_mode', 'customer_payment_mode']
return res
| agpl-3.0 |
Passtechsoft/TPEAlpGen | blender/release/scripts/addons_contrib/oscurart_futurism.py | 2 | 6431 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
bl_info = {
"name": "Futurism",
"author": "Oscurart",
"version": (1, 2),
"blender": (2, 63, 0),
"location": "Object > Futurism",
"description": "Adds a new Mesh Object",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Object/Oscurart_Futurism",
"tracker_url": "https://developer.blender.org/T31911",
"category": "Add Mesh"}
import bpy
def object_osc_futurism (self, context,STEP, HOLD):
ACTOBJ=bpy.context.active_object # OBJETO ACTIVO
FS=bpy.context.scene.frame_start # FRAME START
FE=bpy.context.scene.frame_end # FRAME END
OBJLIST=[] # LISTA PARA OBJETOS ????
FC=FS # FRAME CURRENT
OBJNUMBER=1 # SUFIJO DE NUMERO PARA OBJETOS
STEPINC=0 # NUMERO PARA EVALUAR LOS PASOS
bpy.context.scene.frame_set(FS) # SETEO EL FRAME CURRENT
OBACT = bpy.context.active_object # SETEO EL OBJETO ACTIVO
## CREO EMPTY
bpy.ops.object.add()
bpy.context.active_object.name = "FuturismContainer"
EMPTY = bpy.context.active_object
# SUMO PARAMETERS AL EMPTY
EMPTY["FUTURISM_HOLDIN"] = 0
EMPTY["FUTURISM_HOLDOUT"] = 0
bpy.context.scene.objects.active = OBACT # RECUPERO OBJETO ACTIVO
for OBJETO in range((FE+1)-FS):
if STEPINC == STEP:
# CREO UN MESH A PARTIR DE OBJETO
MESH=ACTOBJ.to_mesh(bpy.context.scene, True, 'PREVIEW')
# CREO OBJETO
OBJECT=bpy.data.objects.new(ACTOBJ.name[:3]+str(FC), MESH)
# CONECTO A LA ESCENA
bpy.context.scene.objects.link(OBJECT)
# SETEO FRAME CURRENT
bpy.context.scene.frame_set(FC)
# MARCO EXPRESIONES PARA VIEW
OBJECT.driver_add("hide")
OBJECT.animation_data.drivers[0].driver.variables.new()
OBJECT.animation_data.drivers[0].driver.variables.new()
OBJECT.animation_data.drivers[0].driver.variables.new()
OBJECT.animation_data.drivers[0].driver.expression= "False if frame >= %s+var_001 and frame <= %s+var_002 else True" % (str(FC),str(FC+HOLD))
OBJECT.animation_data.drivers[0].driver.variables[0].targets[0].id_type = 'SCENE'
OBJECT.animation_data.drivers[0].driver.variables[0].targets[0].id= bpy.context.scene
OBJECT.animation_data.drivers[0].driver.variables[0].targets[0].data_path = "current_frame"
OBJECT.animation_data.drivers[0].driver.variables[1].targets[0].id_type = 'OBJECT'
OBJECT.animation_data.drivers[0].driver.variables[1].targets[0].id= EMPTY
OBJECT.animation_data.drivers[0].driver.variables[1].targets[0].data_path = '["FUTURISM_HOLDIN"]'
OBJECT.animation_data.drivers[0].driver.variables[2].targets[0].id_type = 'OBJECT'
OBJECT.animation_data.drivers[0].driver.variables[2].targets[0].id= EMPTY
OBJECT.animation_data.drivers[0].driver.variables[2].targets[0].data_path = '["FUTURISM_HOLDOUT"]'
# MARCO EXPRESIONES PARA RENDER
OBJECT.driver_add("hide_render")
OBJECT.animation_data.drivers[1].driver.variables.new()
OBJECT.animation_data.drivers[1].driver.variables.new()
OBJECT.animation_data.drivers[1].driver.variables.new()
OBJECT.animation_data.drivers[1].driver.expression= "False if frame >= %s+5 and frame <= %s else True" % (str(FC),str(FC+HOLD))
OBJECT.animation_data.drivers[1].driver.variables[0].targets[0].id_type = 'SCENE'
OBJECT.animation_data.drivers[1].driver.variables[0].targets[0].id= bpy.context.scene
OBJECT.animation_data.drivers[1].driver.variables[0].targets[0].data_path = "current_frame"
OBJECT.animation_data.drivers[1].driver.variables[1].targets[0].id_type = 'OBJECT'
OBJECT.animation_data.drivers[1].driver.variables[1].targets[0].id= EMPTY
OBJECT.animation_data.drivers[1].driver.variables[1].targets[0].data_path = '["FUTURISM_HOLDIN"]'
OBJECT.animation_data.drivers[1].driver.variables[2].targets[0].id_type = 'OBJECT'
OBJECT.animation_data.drivers[1].driver.variables[2].targets[0].id= EMPTY
OBJECT.animation_data.drivers[1].driver.variables[2].targets[0].data_path = '["FUTURISM_HOLDOUT"]'
# RESETEO STEPINC
STEPINC=0
# COPIAMOS S R T
OBJECT.matrix_world=ACTOBJ.matrix_world
#EMPARENTO
OBJECT.parent=EMPTY
# AVANZO STEP Y FRAME
FC+=1
STEPINC+=1
# CLASE PARA OPERADOR
class Oscurart_futurism (bpy.types.Operator):
bl_idname = "object.duplicate_futurism"
bl_label = "Duplicate Futurism"
bl_description = "Duplicate object per frame"
bl_options = {'REGISTER', 'UNDO'}
scale = bpy.props.IntProperty(name='Step',default=1, min=1, max=1000)
hold = bpy.props.IntProperty(name='Hold', default=0, min=0)
@classmethod
def poll(cls, context):
return(bpy.context.active_object.type == "MESH" )
def execute(self, context):
object_osc_futurism(self, context, self.scale, self.hold)
return {'FINISHED'}
# Registration
def add_osc_futurism_button(self, context):
self.layout.operator(
Oscurart_futurism.bl_idname,
text="Futurism",
icon="PLUGIN")
def register():
bpy.utils.register_class(Oscurart_futurism)
bpy.types.VIEW3D_MT_object.append(add_osc_futurism_button)
def unregister():
bpy.utils.unregister_class(Oscurart_futurism)
bpy.types.VIEW3D_MT_object.remove(add_osc_futurism_button)
if __name__ == '__main__':
register()
| gpl-3.0 |
willyzha/PeekBot | cogs/tts.py | 1 | 26557 | from discord.ext import commands
from random import choice
from .utils.dataIO import dataIO
from .utils import checks
from .utils.chat_formatting import box
import logging
import collections
import discord
import time
import copy
import os
import asyncio
import uuid
import chardet
import re
from gtts import gTTS
from enum import Enum
log = logging.getLogger("red.tts")
available_languages = { 'af' : 'Afrikaans',
'sq' : 'Albanian',
'ar' : 'Arabic',
'hy' : 'Armenian',
'bn' : 'Bengali',
'ca' : 'Catalan',
'zh' : 'Chinese',
'zh-cn' : 'Chinese (Mandarin/China)',
'zh-tw' : 'Chinese (Mandarin/Taiwan)',
'zh-yue' : 'Chinese (Cantonese)',
'hr' : 'Croatian',
'cs' : 'Czech',
'da' : 'Danish',
'nl' : 'Dutch',
'en' : 'English',
'en-au' : 'English (Australia)',
'en-uk' : 'English (United Kingdom)',
'en-us' : 'English (United States)',
'eo' : 'Esperanto',
'fi' : 'Finnish',
'fr' : 'French',
'de' : 'German',
'el' : 'Greek',
'hi' : 'Hindi',
'hu' : 'Hungarian',
'is' : 'Icelandic',
'id' : 'Indonesian',
'it' : 'Italian',
'ja' : 'Japanese',
'km' : 'Khmer (Cambodian)',
'ko' : 'Korean',
'la' : 'Latin',
'lv' : 'Latvian',
'mk' : 'Macedonian',
'no' : 'Norwegian',
'pl' : 'Polish',
'pt' : 'Portuguese',
'ro' : 'Romanian',
'ru' : 'Russian',
'sr' : 'Serbian',
'si' : 'Sinhala',
'sk' : 'Slovak',
'es' : 'Spanish',
'es-es' : 'Spanish (Spain)',
'es-us' : 'Spanish (United States)',
'sw' : 'Swahili',
'sv' : 'Swedish',
'ta' : 'Tamil',
'th' : 'Thai',
'tr' : 'Turkish',
'uk' : 'Ukrainian',
'vi' : 'Vietnamese',
'cy' : 'Welsh' }
class QueueKey(Enum):
REPEAT = 1
PLAYLIST = 2
VOICE_CHANNEL_ID = 3
QUEUE = 4
TEMP_QUEUE = 5
NOW_PLAYING = 6
NOW_PLAYING_CHANNEL = 7
LAST_MESSAGE_USER = 8
TSS_ENABLED = 9
TTS_LANGUAGE = 10
SB_ENABLED = 11
class TextToSpeech:
"""General commands."""
def __init__(self, bot):
self.bot = bot
self.local_playlist_path = "data/tts"
self.local_soundboard_settings = "data/soundboard/sb_settings.json"
self.connect_timers = {}
self.queue = {}
self.remove_queue = deque()
self.user_list = deque()
self.mp3_remove_all()
async def on_message(self, message):
if message.channel.is_private:
return
server = message.server
sid = server.id
if server.id not in self.queue:
self._setup_queue(server)
if not self.queue[sid][QueueKey.TSS_ENABLED]:
return
#print(message.clean_content)
regex = re.search(r'(\<:).+(:\d+>)',message.clean_content)
message_content = message.clean_content
if len(message_content) > 0:
if message_content[0] == "!":
return
if regex is not None:
for group in regex.groups():
message_content = message_content.replace(group, "")
if self.queue[sid][QueueKey.TSS_ENABLED] and not message.tts and not message.author.bot:
for text in self._tokenize(message_content, 10):
if text.strip() != "":
if self.queue[server.id][QueueKey.LAST_MESSAGE_USER] == message.author.id:
self.queue[sid][QueueKey.QUEUE].append(text.strip())
else:
username = message.author.name
self.queue[sid][QueueKey.QUEUE].append(username + " says: " + text.strip())
self.queue[server.id][QueueKey.LAST_MESSAGE_USER] = message.author.id
def _tokenize(self, text, max_size):
""" Tokenizer on basic punctuation """
punc = "¡!()[]¿?.,،;:—。、:?!\n"
punc_list = [re.escape(c) for c in punc]
pattern = '|'.join(punc_list)
parts = re.split(pattern, text)
return parts
@commands.group(pass_context=True, no_pm=True)
async def happybirthday(self, ctx):
sid = ctx.message.server.id
self.queue[sid][QueueKey.QUEUE].append("Happy brithday to you! Happy birthday to you! Happybirthday dear Ricky Leek. Happy Birthday to you!")
@commands.group(pass_context=True, no_pm=True)
async def sb(self, ctx, sound):
if sound is not None:
server = ctx.message.server
if sound == "on":
await self.sb_on(ctx)
elif sound == "off":
await self.sb_off(ctx)
else:
if self.queue[server.id][QueueKey.SB_ENABLED]:
msg = sound
self.local_soundboard_settings
current = dataIO.load_json(self.local_soundboard_settings)
if sound in current.keys():
self.queue[server.id][QueueKey.TEMP_QUEUE].append("data/soundboard/"+current[sound])
else:
msg = box("Soundboard command " + sound + " does not exist.")
await self.bot.say(msg)
cmd_usage = "Usage: sb <sound>\n" + \
" Available sounds:\n"
current = dataIO.load_json(self.local_soundboard_settings)
for sound in current.keys():
cmd_usage = cmd_usage + " -" + sound + "\n"
msg = box(cmd_usage)
await self.bot.say(msg)
else:
cmd_usage = "Usage: sb <sound>\n" + \
" Available sounds:\n"
current = dataIO.load_json(self.local_soundboard_settings)
for sound in current.keys():
cmd_usage = cmd_usage + " -" + sound + "\n"
msg = box(cmd_usage)
await self.bot.say(msg)
async def sb_on(self, ctx):
"""Turn on TextToSpeech"""
server = ctx.message.server
author = ctx.message.author
voice_channel = author.voice_channel
if server.id not in self.queue:
self._setup_queue(server)
if self.is_playing(server) and self.queue[server.id][QueueKey.TSS_ENABLED]:
#await ctx.invoke(self._queue, url=url)
self._stop(server)
if self.queue[server.id][QueueKey.TSS_ENABLED]:
self.queue[server.id][QueueKey.TSS_ENABLED] = False
self.queue[server.id][QueueKey.SB_ENABLED] = True
msg = box("SoundBoard Enabled")
await self.bot.say(msg)
return # Default to queue
# Checking already connected, will join if not
try:
self.has_connect_perm(author, server)
except AuthorNotConnected:
await self.bot.say("You must join a voice channel before I can"
" play anything.")
return
except UnauthorizedConnect:
await self.bot.say("I don't have permissions to join your"
" voice channel.")
return
except UnauthorizedSpeak:
await self.bot.say("I don't have permissions to speak in your"
" voice channel.")
return
except ChannelUserLimit:
await self.bot.say("Your voice channel is full.")
return
if not self.voice_connected(server):
await self._join_voice_channel(voice_channel)
else: # We are connected but not to the right channel
if self.voice_client(server).channel != voice_channel:
await self._stop_and_disconnect(server)
await self._join_voice_channel(voice_channel)
msg = box("SoundBoard Enabled")
await self.bot.say(msg)
self.queue[server.id][QueueKey.SB_ENABLED] = True
async def sb_off(self, ctx):
"""Turn off TextToSpeech"""
server = ctx.message.server
if server.id not in self.queue:
self._setup_queue(server)
if self.queue[server.id][QueueKey.TSS_ENABLED] == False:
await self._stop_and_disconnect(server)
msg = box("SoundBoard Disabled")
self.queue[server.id][QueueKey.SB_ENABLED] = False
await self.bot.say(msg)
@commands.group(pass_context=True, no_pm=True)
async def tts(self, ctx):
"""Gives the current status of TextToSpeech"""
if ctx.invoked_subcommand is None:
server = ctx.message.server
if self.queue[server.id][QueueKey.TSS_ENABLED]:
msg = box("TextToSpeech is currently enabled")
else:
msg = box("TextToSpeech is currently disabled")
await self.bot.say(msg)
@tts.command(pass_context=True)
async def language(self, ctx):
server = ctx.message.server
language = re.search(r'\!tts language (.+)',ctx.message.content).group(1)
msg = ""
if language is None:
return
elif language == 'list':
msg = "Available Languages: \n"
for key, value in available_languages.items():
msg = msg + key + " : " + value + "\n"
if server.id not in self.queue:
self._setup_queue(server)
if language in available_languages:
self.queue[server.id][QueueKey.TTS_LANGUAGE] = language
msg = "Language switched to " + available_languages[language] + "."
await self.bot.say(msg)
@tts.command(pass_context=True)
async def off(self, ctx):
"""Turn off TextToSpeech"""
server = ctx.message.server
if server.id not in self.queue:
self._setup_queue(server)
if self.queue[server.id][QueueKey.SB_ENABLED] == False:
await self._stop_and_disconnect(server)
msg = box("TextToSpeech Disabled")
self.queue[server.id][QueueKey.TSS_ENABLED] = False
await self.bot.say(msg)
@tts.command(pass_context=True)
async def on(self, ctx):
"""Turn on TextToSpeech"""
server = ctx.message.server
author = ctx.message.author
voice_channel = author.voice_channel
if server.id not in self.queue:
self._setup_queue(server)
if self.is_playing(server) and self.queue[server.id][QueueKey.SB_ENABLED]:
#await ctx.invoke(self._queue, url=url)
self._stop(server)
if self.queue[server.id][QueueKey.SB_ENABLED]:
self.queue[server.id][QueueKey.SB_ENABLED] = False
self.queue[server.id][QueueKey.TSS_ENABLED] = True
msg = box("TextToSpeech Enabled")
await self.bot.say(msg)
return # Default to queue
# Checking already connected, will join if not
try:
self.has_connect_perm(author, server)
except AuthorNotConnected:
await self.bot.say("You must join a voice channel before I can"
" play anything.")
return
except UnauthorizedConnect:
await self.bot.say("I don't have permissions to join your"
" voice channel.")
return
except UnauthorizedSpeak:
await self.bot.say("I don't have permissions to speak in your"
" voice channel.")
return
except ChannelUserLimit:
await self.bot.say("Your voice channel is full.")
return
if not self.voice_connected(server):
await self._join_voice_channel(voice_channel)
else: # We are connected but not to the right channel
if self.voice_client(server).channel != voice_channel:
await self._stop_and_disconnect(server)
await self._join_voice_channel(voice_channel)
msg = box("TextToSpeech Enabled")
await self.bot.say(msg)
self.queue[server.id][QueueKey.TSS_ENABLED] = True
# @commands.command(pass_context=True, no_pm=True)
# async def connect(self, ctx, *, url_or_search_terms):
def is_playing(self, server):
if not self.voice_connected(server):
return False
if self.voice_client(server) is None:
return False
if not hasattr(self.voice_client(server), 'audio_player'):
return False
if self.voice_client(server).audio_player.is_done():
return False
return True
def has_connect_perm(self, author, server):
channel = author.voice_channel
if channel:
is_admin = channel.permissions_for(server.me).administrator
if channel.user_limit == 0:
is_full = False
else:
is_full = len(channel.voice_members) >= channel.user_limit
if channel is None:
raise AuthorNotConnected
elif channel.permissions_for(server.me).connect is False:
raise UnauthorizedConnect
elif channel.permissions_for(server.me).speak is False:
raise UnauthorizedSpeak
elif is_full and not is_admin:
raise ChannelUserLimit
else:
return True
return False
def voice_connected(self, server):
if self.bot.is_voice_connected(server):
return True
return False
def _setup_queue(self, server):
self.queue[server.id] = {QueueKey.REPEAT: False, QueueKey.PLAYLIST: False,
QueueKey.VOICE_CHANNEL_ID: None,
QueueKey.QUEUE: deque(), QueueKey.TEMP_QUEUE: deque(),
QueueKey.NOW_PLAYING: None, QueueKey.NOW_PLAYING_CHANNEL: None,
QueueKey.LAST_MESSAGE_USER: "",
QueueKey.TSS_ENABLED: False,
QueueKey.LAST_MESSAGE_USER: 0,
QueueKey.TTS_LANGUAGE: "en",
QueueKey.SB_ENABLED: False}
async def _join_voice_channel(self, channel):
server = channel.server
connect_time = self.connect_timers.get(server.id, 0)
if time.time() < connect_time:
diff = int(connect_time - time.time())
raise ConnectTimeout("You are on connect cooldown for another {}"
" seconds.".format(diff))
if server.id in self.queue:
self.queue[server.id][QueueKey.VOICE_CHANNEL_ID] = channel.id
try:
await asyncio.wait_for(self.bot.join_voice_channel(channel),
timeout=5, loop=self.bot.loop)
except asyncio.futures.TimeoutError as e:
log.exception(e)
self.connect_timers[server.id] = time.time() + 300
raise ConnectTimeout("We timed out connecting to a voice channel,"
" please try again in 10 minutes.")
async def _disconnect_voice_client(self, server):
if not self.voice_connected(server):
return
voice_client = self.voice_client(server)
await voice_client.disconnect()
def _stop_player(self, server):
if not self.voice_connected(server):
return
voice_client = self.voice_client(server)
if hasattr(voice_client, 'audio_player'):
voice_client.audio_player.stop()
# def _stop_downloader(self, server):
# if server.id not in self.downloaders:
# return
#
# del self.downloaders[server.id]
def _stop(self, server):
self._setup_queue(server)
self._stop_player(server)
#self._stop_downloader(server)
#self.bot.loop.create_task(self._update_bot_status())
async def _stop_and_disconnect(self, server):
self._stop(server)
await self._disconnect_voice_client(server)
def voice_client(self, server):
return self.bot.voice_client_in(server)
async def _create_ffmpeg_player(self, server, filename, local=False, start_time=None, end_time=None):
"""This function will guarantee we have a valid voice client,
even if one doesn't exist previously."""
voice_channel_id = self.queue[server.id][QueueKey.VOICE_CHANNEL_ID]
voice_client = self.voice_client(server)
if voice_client is None:
log.debug("not connected when we should be in sid {}".format(
server.id))
to_connect = self.bot.get_channel(voice_channel_id)
if to_connect is None:
raise VoiceNotConnected("Okay somehow we're not connected and"
" we have no valid channel to"
" reconnect to. In other words...LOL"
" REKT.")
log.debug("valid reconnect channel for sid"
" {}, reconnecting...".format(server.id))
await self._join_voice_channel(to_connect) # SHIT
elif voice_client.channel.id != voice_channel_id:
# This was decided at 3:45 EST in #advanced-testing by 26
self.queue[server.id][QueueKey.VOICE_CHANNEL_ID] = voice_client.channel.id
log.debug("reconnect chan id for sid {} is wrong, fixing".format(
server.id))
# Okay if we reach here we definitively have a working voice_client
if local:
song_filename = filename
else:
song_filename = os.path.join(self.cache_path, filename)
use_avconv = False#self.settings["AVCONV"]
options = '-b:a 64k -bufsize 64k'
before_options = ''
if start_time:
before_options += '-ss {}'.format(start_time)
if end_time:
options += ' -to {} -copyts'.format(end_time)
try:
voice_client.audio_player.process.kill()
log.debug("killed old player")
except AttributeError:
pass
except ProcessLookupError:
pass
log.debug("making player on sid {}".format(server.id))
#print(voice_client)
voice_client.audio_player = voice_client.create_ffmpeg_player(
song_filename, use_avconv=use_avconv, options=options, before_options=before_options)
# Set initial volume
vol = 50/100#self.get_server_settings(server)['VOLUME'] / 100
voice_client.audio_player.volume = vol
return voice_client # Just for ease of use, it's modified in-place
async def gTTS_queue_manager(self, sid):
server = self.bot.get_server(sid)
queue = self.queue[server.id][QueueKey.QUEUE]
assert queue is self.queue[server.id][QueueKey.QUEUE]
ttsMessage = queue.popleft()
tts = gTTS(text=ttsMessage, lang=self.queue[server.id][QueueKey.TTS_LANGUAGE], slow=False)
unique_filename = str(uuid.uuid4()) + ".mp3"
ttsFileName = os.path.join(self.local_playlist_path, unique_filename)
tts.save(ttsFileName)
self.queue[server.id][QueueKey.TEMP_QUEUE].append(ttsFileName)
async def gTTS_queue_scheduler(self):
while self == self.bot.get_cog('TextToSpeech'):
tasks = []
queue = copy.deepcopy(self.queue)
for sid in queue:
#print("gTTS: " + str(queue[sid][QueueKey.QUEUE]))
if len(queue[sid][QueueKey.QUEUE]) == 0:
continue
# log.debug("scheduler found a non-empty queue"
# " for sid: {}".format(sid))
tasks.append(
self.bot.loop.create_task(self.gTTS_queue_manager(sid)))
completed = [t.done() for t in tasks]
while not all(completed):
completed = [t.done() for t in tasks]
await asyncio.sleep(0.1)
await asyncio.sleep(0.1)
async def voice_queue_manager(self, sid):
server = self.bot.get_server(sid)
queue = self.queue[server.id][QueueKey.TEMP_QUEUE]
assert queue is self.queue[server.id][QueueKey.TEMP_QUEUE]
if not self.is_playing(server) and (self.queue[server.id][QueueKey.TSS_ENABLED] or self.queue[server.id][QueueKey.SB_ENABLED]):
if self.voice_client(server) is None:
return
filename = queue.popleft()
print("pop " + filename)
voice_client = await self._create_ffmpeg_player(server, filename, local=True, start_time=None, end_time=None)
print("create voice client")
voice_client.audio_player.start()
print("start voice client")
self.remove_queue.append(filename)
#os.remove(os.path.join(self.local_playlist_path, filename))
async def voice_queue_scheduler(self):
while self == self.bot.get_cog('TextToSpeech'):
tasks = []
queue = copy.deepcopy(self.queue)
for sid in queue:
#print("voice: " + str(queue[sid][QueueKey.TEMP_QUEUE]))
if len(queue[sid][QueueKey.TEMP_QUEUE]) == 0:
self.mp3_cleanup()
continue
# log.debug("scheduler found a non-empty queue"
# " for sid: {}".format(sid))
tasks.append(
self.bot.loop.create_task(self.voice_queue_manager(sid)))
completed = [t.done() for t in tasks]
while not all(completed):
completed = [t.done() for t in tasks]
await asyncio.sleep(0.1)
await asyncio.sleep(0.1)
def mp3_cleanup(self):
if len(self.remove_queue) > 1:
file_to_remove = self.remove_queue.popleft()
if self.local_playlist_path in file_to_remove:
os.remove(file_to_remove)
def mp3_remove_all(self):
for file in os.listdir(self.local_playlist_path):
if file.endswith(".mp3"):
os.remove(os.path.join(self.local_playlist_path, file))
async def disconnect_timer(self):
stop_times = {}
while self == self.bot.get_cog('TextToSpeech'):
for vc in list(self.bot.voice_clients):
server = vc.server
#print(vc.channel.voice_members)
if len(vc.channel.voice_members) == 1:
self.queue[server.id][QueueKey.TSS_ENABLED] = False
self.queue[server.id][QueueKey.SB_ENABLED] = False
await self._stop_and_disconnect(server)
await asyncio.sleep(5)
class deque(collections.deque):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def peek(self):
ret = self.pop()
self.append(ret)
return copy.deepcopy(ret)
def peekleft(self):
ret = self.popleft()
self.appendleft(ret)
return copy.deepcopy(ret)
class NotConnected(Exception):
pass
class AuthorNotConnected(NotConnected):
pass
class UnauthorizedConnect(Exception):
pass
class UnauthorizedSpeak(Exception):
pass
class ChannelUserLimit(Exception):
pass
class ConnectTimeout(NotConnected):
pass
def check_folders():
folders = ("data", "data/tts", "data/soundboard")
for folder in folders:
if not os.path.exists(folder):
print("Creating " + folder + " folder...")
os.makedirs(folder)
def check_files():
default = {"sample": "sample.mp3"}
data_path = "data/soundboard/"
settings_path = "data/soundboard/sb_settings.json"
if not os.path.isfile(settings_path):
print("Creating default audio settings.json...")
dataIO.save_json(settings_path, default)
else: # consistency check
try:
current = dataIO.load_json(settings_path)
except JSONDecodeError:
# settings.json keeps getting corrupted for unknown reasons. Let's
# try to keep it from making the cog load fail.
dataIO.save_json(settings_path, default)
current = dataIO.load_json(settings_path)
if current.keys() != default.keys():
for key in list(current.keys()):
mp3_file = data_path + current[key]
if not os.path.isfile(mp3_file):
print(mp3_file + " does not exist! Removing from list.")
current.pop(key)
dataIO.save_json(settings_path, current)
def setup(bot):
n = TextToSpeech(bot)
check_folders()
check_files()
bot.add_cog(n)
bot.loop.create_task(n.gTTS_queue_scheduler())
bot.loop.create_task(n.disconnect_timer())
bot.loop.create_task(n.voice_queue_scheduler())
| gpl-3.0 |
uw-it-aca/myuw | myuw/dao/notice_categorization.py | 1 | 12294 | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
NOTICE_CATEGORIES = {
"studentalr_intlstucheckin": {
"myuw_category": "Holds",
"location_tags": ['notices_holds', 'reg_card_holds'],
"critical": True
},
"studentalr_adminholds": {
"myuw_category": "Holds",
"location_tags": ['notices_holds', 'reg_card_holds'],
"critical": True
},
"studentalr_satprogblock": {
"myuw_category": "Holds",
"location_tags": ['notices_holds', 'reg_card_holds'],
"critical": True
},
"studentalr_preregnow": {
"myuw_category": "Registration",
"location_tags": ['reg_card_messages'],
"critical": True
},
"studentalr_hsimmunblock": {
"myuw_category": "Holds",
"location_tags": ['notices_date_sort'],
"critical": True
},
"studentgen_ferpa": {
"myuw_category": "Legal",
"location_tags": ['notices_legal'],
"critical": False
},
"studentgen_riaa": {
"myuw_category": "Legal",
"location_tags": ['notices_legal'],
"critical": False
},
"studentgen_acctbalance": {
"myuw_category": "not a notice",
"location_tags": ['tuition_balance', 'finance_card'],
"critical": False
},
"studentgen_acctbaleonote": {
"myuw_category": "Fees & Finances",
"location_tags": ['pce_tuition_dup', 'finance_card'],
"critical": False
},
"studentgen_intendedmajors": {
"myuw_category": "Academics",
"location_tags": ['academics_card'],
"critical": False
},
"studentgen_majors": {
"myuw_category": "not a notice",
"location_tags": [],
"critical": False
},
"studentgen_degreeappl": {
"myuw_category": "Graduation",
"location_tags": [],
"critical": True
},
"studentdad_qtrbegin": {
"myuw_category": "not a notice",
"location_tags": ['notices_date_sort', 'quarter_begins'],
"critical": False
},
"studentdad_lastdayregwochgfee": {
"myuw_category": "Registration",
"location_tags": ['notices_date_sort'],
"critical": False
},
"studentdad_lastdayregchgfee": {
"myuw_category": "Registration",
"location_tags": ['notices_date_sort'],
"critical": False
},
"studentdad_lastdaychgins": {
"myuw_category": "Insurance",
"location_tags": ['notices_date_sort'],
"critical": False
},
"studentdad_lastdaydropnorecord": {
"myuw_category": "Registration",
"location_tags": ['notices_date_sort'],
"critical": False
},
"studentdad_lastdayauditopt": {
"myuw_category": "Registration",
"location_tags": ['notices_date_sort'],
"critical": False
},
"studentdad_lastdaywoannualdrop": {
"myuw_category": "Registration",
"location_tags": ['notices_date_sort'],
"critical": False
},
"studentdad_lastdaydrop": {
"myuw_category": "Registration",
"location_tags": ['notices_date_sort'],
"critical": False
},
"studentdad_lastdayadd": {
"myuw_category": "Registration",
"location_tags": ['notices_date_sort'],
"critical": False
},
"studentdad_lastdayannualdrop": {
"myuw_category": "Registration",
"location_tags": ['notices_date_sort'],
"critical": False
},
"studentdad_lastdaychggradeopt": {
"myuw_category": "Registration",
"location_tags": ['notices_date_sort'],
"critical": False
},
"studentdad_lastdaywithdraw": {
"myuw_category": "Registration",
"location_tags": ['notices_date_sort'],
"critical": False
},
"studentdad_tuitdue": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_due_date', 'finance_card',
'notice_date_sort'],
"critical": True
},
"studentdad_commencement": {
"myuw_category": "not a notice",
"location_tags": [],
"critical": False
},
"studentdad_estpd1regdate": {
"myuw_category": "Registration",
"location_tags": ['est_reg_date', 'notices_date_sort'],
"critical": True
},
"studentdad_intlsturegcutoffdate": {
"myuw_category": "Visa",
"location_tags": ['notices_date_sort'],
"critical": True
},
"studentdad_intlstuftregcutoffdate": {
"myuw_category": "Registration",
"location_tags": ['notices_date_sort'],
"critical": True
},
"studentdad_hsimmunreqdatea": {
"myuw_category": "Registration",
"location_tags": ['notices_date_sort'],
"critical": True
},
"studentdad_hsimmunreqdateb": {
"myuw_category": "Registration",
"location_tags": ['notices_date_sort'],
"critical": True
},
"newstudentgen_thankyouremark": {
"myuw_category": "Admission",
"location_tags": ['checklist_thankyou'],
"critical": False
},
"newstudentgen_statussummary": {
"myuw_category": "Admission",
"location_tags": ['checklist_residence'],
"critical": False
},
"newstudentgen_feespaid": {
"myuw_category": "Admission",
"location_tags": ['checklist_feespaid'],
"critical": False
},
"newstudentclist_intlstucheckina": {
"myuw_category": "Admission",
"location_tags": ['checklist_iss_before'],
"critical": False
},
"newstudentclist_intlstucheckinb": {
"myuw_category": "Admission",
"location_tags": ['checklist_iss_after'],
"critical": False
},
"newstudentclist_advorientregdatea": {
"myuw_category": "Admission",
"location_tags": ['checklist_orient_before'],
"critical": False
},
"newstudentclist_advorientregdateb": {
"myuw_category": "Admission",
"location_tags": ['checklist_no_orient'],
"critical": False
},
"newstudentclist_advorientregdatec": {
"myuw_category": "Admission",
"location_tags": ['checklist_orient_after'],
"critical": False
},
"newstudentclist_measlesa": {
"myuw_category": "Admission",
"location_tags": ['checklist_measles_before'],
"critical": False
},
"newstudentclist_measlesb": {
"myuw_category": "Admission",
"location_tags": ['checklist_measles_after'],
"critical": False
},
"newstudentclist_intendedmajor": {
"myuw_category": "not a notice",
"location_tags": [],
"critical": False
},
"newstudentclist_finaid": {
"myuw_category": "not a notice",
"location_tags": [],
"critical": False
},
"newstudentclist_emailservices": {
"myuw_category": "Admission",
"location_tags": ['checklist_email'],
"critical": False
},
"newstudentfoot_fiuts": {
"myuw_category": "Admission",
"location_tags": ['checklist_fiuts'],
"critical": False
},
"newstudentfoot_summerreginfo": {
"myuw_category": "Admission",
"location_tags": ['checklist_summerreg'],
"critical": False
},
"newstudentfoot_nextstep": {
"myuw_category": "not a notice",
"location_tags": [],
"critical": False
},
"ugapplgen_thankyouforapplying": {
"myuw_category": "not a notice",
"location_tags": [],
"critical": False
},
"ugapplgen_applinfolinks": {
"myuw_category": "not a notice",
"location_tags": [],
"critical": False
},
"ugapplgen_admwebsites": {
"myuw_category": "not a notice",
"location_tags": [],
"critical": False
},
"studentfinaid_directdeposit": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_direct_deposit'],
"critical": True
},
"studentfinaid_directdepositshort": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_direct_deposit_title'],
"critical": False
},
"studentfinaid_aidprioritydate": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_aid_prioritydate', 'notices_date_sort'],
"critical": True
},
"studentfinaid_aidprioritydateshort": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_aid_prioritydate_title'],
"critical": False
},
"studentfinaid_aidreminder": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_aid_reminder'],
"critical": False
},
"studentfinaid_aidremindershort": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_aid_reminder_title'],
"critical": False
},
"studentfinaid_summeraiddate": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_summeraid_date', 'notices_date_sort'],
"critical": False
},
"studentfinaid_summeraiddateshort": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_summeraid_date_title'],
"critical": False
},
"studentfinaid_summeraidavail": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_summeraid_avail', 'reg_summeraid_avail'],
"critical": False
},
"studentfinaid_summeraidavailshort": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_summeraid_avail_title',
'reg_summeraid_avail_title'],
"critical": False
},
"studentfinaid_acceptrejectaward": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_acceptreject'],
"critical": True
},
"studentfinaid_acceptrejectawardshort": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_acceptreject_title'],
"critical": False
},
"studentfinaid_disbursedatea": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_disbursedateA', 'notices_date_sort'],
"critical": True
},
"studentfinaid_disbursedateashort": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_disbursedateA_title'],
"critical": False
},
"studentfinaid_disbursedateb": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_disbursedateB', 'notices_date_sort'],
"critical": True
},
"studentfinaid_disbursedatebshort": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_disbursedateB_title'],
"critical": False
},
"studentfinaid_loancounseling": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_loancounseling'],
"critical": True
},
"studentfinaid_loancounselingshort": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_loancounseling_title'],
"critical": False
},
"studentfinaid_loanpromissory": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_loanpromissory'],
"critical": True
},
"studentfinaid_loanpromissoryshort": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_loanpromissory_title'],
"critical": False
},
"studentfinaid_missingdocs": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_missingdocs'],
"critical": True
},
"studentfinaid_missingdocsshort": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_missingdocs_title'],
"critical": False
},
"studentfinaid_aidhold": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_aidhold'],
"critical": True
},
"studentfinaid_aidholdshort": {
"myuw_category": "Fees & Finances",
"location_tags": ['tuition_aidhold_title'],
"critical": False
},
"myuwnotice_banner": {
"myuw_category": "MyUW Banner Notice",
"location_tags": ['notice_banner'],
"critical": False
}
}
| apache-2.0 |
cosmiclattes/TPBviz | torrent/lib/python2.7/site-packages/pip/_vendor/requests/exceptions.py | 341 | 1877 | # -*- coding: utf-8 -*-
"""
requests.exceptions
~~~~~~~~~~~~~~~~~~~
This module contains the set of Requests' exceptions.
"""
from .packages.urllib3.exceptions import HTTPError as BaseHTTPError
class RequestException(IOError):
"""There was an ambiguous exception that occurred while handling your
request."""
def __init__(self, *args, **kwargs):
"""
Initialize RequestException with `request` and `response` objects.
"""
response = kwargs.pop('response', None)
self.response = response
self.request = kwargs.pop('request', None)
if (response is not None and not self.request and
hasattr(response, 'request')):
self.request = self.response.request
super(RequestException, self).__init__(*args, **kwargs)
class HTTPError(RequestException):
"""An HTTP error occurred."""
class ConnectionError(RequestException):
"""A Connection error occurred."""
class ProxyError(ConnectionError):
"""A proxy error occurred."""
class SSLError(ConnectionError):
"""An SSL error occurred."""
class Timeout(RequestException):
"""The request timed out."""
class URLRequired(RequestException):
"""A valid URL is required to make a request."""
class TooManyRedirects(RequestException):
"""Too many redirects."""
class MissingSchema(RequestException, ValueError):
"""The URL schema (e.g. http or https) is missing."""
class InvalidSchema(RequestException, ValueError):
"""See defaults.py for valid schemas."""
class InvalidURL(RequestException, ValueError):
""" The URL provided was somehow invalid. """
class ChunkedEncodingError(RequestException):
"""The server declared chunked encoding but sent an invalid chunk."""
class ContentDecodingError(RequestException, BaseHTTPError):
"""Failed to decode response content"""
| gpl-3.0 |
bgreenlee/sublime-github | lib/requests/packages/charade/universaldetector.py | 91 | 7030 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
import codecs
from .latin1prober import Latin1Prober # windows-1252
from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from .sbcsgroupprober import SBCSGroupProber # single-byte character sets
from .escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(b'[\x80-\xFF]')
self._escDetector = re.compile(b'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._mStart = True
self._mGotData = False
self._mInputState = ePureAscii
self._mLastChar = b''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done:
return
aLen = len(aBuf)
if not aLen:
return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0
}
elif aBuf[:4] == b'\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE:
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif ((self._mInputState == ePureAscii) and
self._escDetector.search(self._mLastChar + aBuf)):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1:]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {
'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()
}
self.done = True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(),
Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
if self.done:
return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober:
continue
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(),
prober.get_confidence()))
| mit |
adminneyk/codificacionproyectando | application/views/Generacion/Generacion/lib/openoffice/openoffice.org/basis3.4/program/python-core-2.6.1/lib/doctest.py | 7 | 100028 | # Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'SKIP',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re
import unittest, difflib, pdb, tempfile
import warnings
from StringIO import StringIO
from collections import namedtuple
TestResults = namedtuple('TestResults', 'failed attempted')
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
# Create a new flag unless `name` is already known.
return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
SKIP = register_optionflag('SKIP')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
SKIP |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, (str, unicode)):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _load_testfile(filename, package, module_relative):
if module_relative:
package = _normalize_module(package, 3)
filename = _module_relative_path(package, filename)
if hasattr(package, '__loader__'):
if hasattr(package.__loader__, 'get_data'):
file_contents = package.__loader__.get_data(filename)
# get_data() opens files as 'rb', so one must do the equivalent
# conversion as universal newlines would do.
return file_contents.replace(os.linesep, '\n'), filename
return open(filename).read(), filename
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning every
non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
self.__debugger_used = False
pdb.Pdb.__init__(self, stdout=out)
def set_trace(self, frame=None):
self.__debugger_used = True
if frame is None:
frame = sys._getframe().f_back
pdb.Pdb.set_trace(self, frame)
def set_continue(self):
# Calling set_continue unconditionally would break unit test
# coverage reporting, as Bdb.set_continue calls sys.settrace(None).
if self.__debugger_used:
pdb.Pdb.set_continue(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError, 'Expected a module: %r' % module
if path.startswith('/'):
raise ValueError, 'Module-relative files may not have absolute paths'
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that preceed the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, basestring), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
# This lets us sort tests by name:
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp((self.name, self.filename, self.lineno, id(self)),
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
# Recursively expore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
# Sort the tests by alpha order of names, for consistency in
# verbose-mode output. This was a feature of doctest in Pythons
# <= 2.3 that got lost by accident in 2.4. It was repaired in
# 2.4.4 and 2.5.
tests.sort()
return tests
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is object.func_globals
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print 'Finding tests in %s' % name
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, basestring):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, basestring)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).im_func
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, basestring):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, basestring):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.im_func
if inspect.isfunction(obj): obj = obj.func_code
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> tests.sort(key = lambda test: test.name)
>>> for test in tests:
... print test.name, '->', runner.run(test)
_TestClass -> TestResults(failed=0, attempted=2)
_TestClass.__init__ -> TestResults(failed=0, attempted=2)
_TestClass.get -> TestResults(failed=0, attempted=2)
_TestClass.square -> TestResults(failed=0, attempted=1)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
TestResults(failed=0, attempted=7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then supress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# If 'SKIP' is set, then skip this example.
if self.optionflags & SKIP:
continue
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec compile(example.source, filename, "single",
compileflags, 1) in test.globs
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'[^:]*:', example.exc_msg)
m2 = re.match(r'[^:]*:', exc_msg)
if m1 and m2 and check(m1.group(0), m2.group(0),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return TestResults(failures, tries)
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>[\w\.]+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(True)
else:
return self.save_linecache_getlines(filename, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print len(notests), "items had no tests:"
notests.sort()
for thing in notests:
print " ", thing
if passed:
print len(passed), "items passed all tests:"
passed.sort()
for thing, count in passed:
print " %3d tests in %s" % (count, thing)
if failed:
print self.DIVIDER
print len(failed), "items had failures:"
failed.sort()
for thing, (f, t) in failed:
print " %3d of %3d in %s" % (f, t, thing)
if verbose:
print totalt, "tests in", len(self._name2ft), "items."
print totalt - totalf, "passed and", totalf, "failed."
if totalf:
print "***Test Failed***", totalf, "failures."
elif verbose:
print "Test passed."
return TestResults(totalf, totalt)
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
print "*** DocTestRunner.merge: '" + name + "' in both" \
" testers; summing outcomes."
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException, failure:
... pass
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
TestResults(failed=0, attempted=1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, report=True,
optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See doctest.__doc__ for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser(),
encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg "encoding" specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
text, filename = _load_testfile(filename, package, module_relative)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
if encoding is not None:
text = text.decode(encoding)
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.optionflags = optionflags
self.testfinder = DocTestFinder()
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print "Running string", name
(f,t) = self.testrunner.run(test)
if self.verbose:
print f, "of", t, "examples failed in string", name
return TestResults(f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return TestResults(f,t)
def rundict(self, d, name, module=None):
import types
m = types.ModuleType(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import types
m = types.ModuleType(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> import doctest
>>> old = doctest._unittest_reportflags
>>> doctest.set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> doctest.set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexepcted
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException, failure:
... pass
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test, clear_globs=False)
self.tearDown()
def id(self):
return self._dt_test.name
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
raise ValueError(module, "has no tests")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(),
encoding=None, **options):
if globs is None:
globs = {}
else:
globs = globs.copy()
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
doc, path = _load_testfile(path, package, module_relative)
if "__file__" not in globs:
globs["__file__"] = path
# Find the file and read it.
name = os.path.basename(path)
# If an encoding is specified, use it to convert the file to unicode
if encoding is not None:
doc = doc.decode(encoding)
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
encoding
An encoding that will be used to convert the files to unicode.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print script_from_examples(text)
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
<BLANKLINE>
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
# Add a courtesy newline to prevent exec from choking (see bug #1172785)
return '\n'.join(output) + '\n'
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
f = open(srcfilename, 'w')
f.write(src)
f.close()
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print sys.exc_info()[1]
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print 'foo\n\nbar\n'
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print range(1000) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print range(30) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
testfiles = [arg for arg in sys.argv[1:] if arg and arg[0] != '-']
if testfiles:
for filename in testfiles:
if filename.endswith(".py"):
# It is a module -- insert its dir into sys.path and try to
# import it. If it is part of a package, that possibly won't work
# because of package imports.
dirname, filename = os.path.split(filename)
sys.path.insert(0, dirname)
m = __import__(filename[:-3])
del sys.path[0]
failures, _ = testmod(m)
else:
failures, _ = testfile(filename, module_relative=False)
if failures:
return 1
else:
r = unittest.TextTestRunner()
r.run(DocTestSuite())
return 0
if __name__ == "__main__":
sys.exit(_test())
| mit |
snowdream1314/scrapy | tests/test_utils_request.py | 13 | 3587 | from __future__ import print_function
import unittest
from scrapy.http import Request
from scrapy.utils.request import request_fingerprint, _fingerprint_cache, \
request_authenticate, request_httprepr
class UtilsRequestTest(unittest.TestCase):
def test_request_fingerprint(self):
r1 = Request("http://www.example.com/query?id=111&cat=222")
r2 = Request("http://www.example.com/query?cat=222&id=111")
self.assertEqual(request_fingerprint(r1), request_fingerprint(r1))
self.assertEqual(request_fingerprint(r1), request_fingerprint(r2))
r1 = Request('http://www.example.com/hnnoticiaj1.aspx?78132,199')
r2 = Request('http://www.example.com/hnnoticiaj1.aspx?78160,199')
self.assertNotEqual(request_fingerprint(r1), request_fingerprint(r2))
# make sure caching is working
self.assertEqual(request_fingerprint(r1), _fingerprint_cache[r1][None])
r1 = Request("http://www.example.com/members/offers.html")
r2 = Request("http://www.example.com/members/offers.html")
r2.headers['SESSIONID'] = b"somehash"
self.assertEqual(request_fingerprint(r1), request_fingerprint(r2))
r1 = Request("http://www.example.com/")
r2 = Request("http://www.example.com/")
r2.headers['Accept-Language'] = b'en'
r3 = Request("http://www.example.com/")
r3.headers['Accept-Language'] = b'en'
r3.headers['SESSIONID'] = b"somehash"
self.assertEqual(request_fingerprint(r1), request_fingerprint(r2), request_fingerprint(r3))
self.assertEqual(request_fingerprint(r1),
request_fingerprint(r1, include_headers=['Accept-Language']))
self.assertNotEqual(request_fingerprint(r1),
request_fingerprint(r2, include_headers=['Accept-Language']))
self.assertEqual(request_fingerprint(r3, include_headers=['accept-language', 'sessionid']),
request_fingerprint(r3, include_headers=['SESSIONID', 'Accept-Language']))
r1 = Request("http://www.example.com")
r2 = Request("http://www.example.com", method='POST')
r3 = Request("http://www.example.com", method='POST', body=b'request body')
self.assertNotEqual(request_fingerprint(r1), request_fingerprint(r2))
self.assertNotEqual(request_fingerprint(r2), request_fingerprint(r3))
# cached fingerprint must be cleared on request copy
r1 = Request("http://www.example.com")
fp1 = request_fingerprint(r1)
r2 = r1.replace(url="http://www.example.com/other")
fp2 = request_fingerprint(r2)
self.assertNotEqual(fp1, fp2)
def test_request_authenticate(self):
r = Request("http://www.example.com")
request_authenticate(r, 'someuser', 'somepass')
self.assertEqual(r.headers['Authorization'], b'Basic c29tZXVzZXI6c29tZXBhc3M=')
def test_request_httprepr(self):
r1 = Request("http://www.example.com")
self.assertEqual(request_httprepr(r1), b'GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n')
r1 = Request("http://www.example.com/some/page.html?arg=1")
self.assertEqual(request_httprepr(r1), b'GET /some/page.html?arg=1 HTTP/1.1\r\nHost: www.example.com\r\n\r\n')
r1 = Request("http://www.example.com", method='POST', headers={"Content-type": b"text/html"}, body=b"Some body")
self.assertEqual(request_httprepr(r1), b'POST / HTTP/1.1\r\nHost: www.example.com\r\nContent-Type: text/html\r\n\r\nSome body')
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
willhardy/django | django/core/serializers/xml_serializer.py | 50 | 15601 | """
XML serializer.
"""
from __future__ import unicode_literals
from collections import OrderedDict
from xml.dom import pulldom
from xml.sax import handler
from xml.sax.expatreader import ExpatParser as _ExpatParser
from django.apps import apps
from django.conf import settings
from django.core.serializers import base
from django.db import DEFAULT_DB_ALIAS, models
from django.utils.encoding import smart_text
from django.utils.xmlutils import (
SimplerXMLGenerator, UnserializableContentError,
)
class Serializer(base.Serializer):
"""
Serializes a QuerySet to XML.
"""
def indent(self, level):
if self.options.get('indent') is not None:
self.xml.ignorableWhitespace('\n' + ' ' * self.options.get('indent') * level)
def start_serialization(self):
"""
Start serialization -- open the XML document and the root element.
"""
self.xml = SimplerXMLGenerator(self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET))
self.xml.startDocument()
self.xml.startElement("django-objects", {"version": "1.0"})
def end_serialization(self):
"""
End serialization -- end the document.
"""
self.indent(0)
self.xml.endElement("django-objects")
self.xml.endDocument()
def start_object(self, obj):
"""
Called as each object is handled.
"""
if not hasattr(obj, "_meta"):
raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj))
self.indent(1)
attrs = OrderedDict([("model", smart_text(obj._meta))])
if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'):
obj_pk = obj._get_pk_val()
if obj_pk is not None:
attrs['pk'] = smart_text(obj_pk)
self.xml.startElement("object", attrs)
def end_object(self, obj):
"""
Called after handling all fields for an object.
"""
self.indent(1)
self.xml.endElement("object")
def handle_field(self, obj, field):
"""
Called to handle each field on an object (except for ForeignKeys and
ManyToManyFields)
"""
self.indent(2)
self.xml.startElement("field", OrderedDict([
("name", field.name),
("type", field.get_internal_type()),
]))
# Get a "string version" of the object's data.
if getattr(obj, field.name) is not None:
try:
self.xml.characters(field.value_to_string(obj))
except UnserializableContentError:
raise ValueError("%s.%s (pk:%s) contains unserializable characters" % (
obj.__class__.__name__, field.name, obj._get_pk_val()))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey (we need to treat them slightly
differently from regular fields).
"""
self._start_relational_field(field)
related_att = getattr(obj, field.get_attname())
if related_att is not None:
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
related = getattr(obj, field.name)
# If related object has a natural key, use it
related = related.natural_key()
# Iterable natural keys are rolled out as subelements
for key_value in related:
self.xml.startElement("natural", {})
self.xml.characters(smart_text(key_value))
self.xml.endElement("natural")
else:
self.xml.characters(smart_text(related_att))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField. Related objects are only
serialized as references to the object's PK (i.e. the related *data*
is not dumped, just the relation).
"""
if field.remote_field.through._meta.auto_created:
self._start_relational_field(field)
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
# If the objects in the m2m have a natural key, use it
def handle_m2m(value):
natural = value.natural_key()
# Iterable natural keys are rolled out as subelements
self.xml.startElement("object", {})
for key_value in natural:
self.xml.startElement("natural", {})
self.xml.characters(smart_text(key_value))
self.xml.endElement("natural")
self.xml.endElement("object")
else:
def handle_m2m(value):
self.xml.addQuickElement("object", attrs={
'pk': smart_text(value._get_pk_val())
})
for relobj in getattr(obj, field.name).iterator():
handle_m2m(relobj)
self.xml.endElement("field")
def _start_relational_field(self, field):
"""
Helper to output the <field> element for relational fields
"""
self.indent(2)
self.xml.startElement("field", OrderedDict([
("name", field.name),
("rel", field.remote_field.__class__.__name__),
("to", smart_text(field.remote_field.model._meta)),
]))
class Deserializer(base.Deserializer):
"""
Deserialize XML.
"""
def __init__(self, stream_or_string, **options):
super(Deserializer, self).__init__(stream_or_string, **options)
self.event_stream = pulldom.parse(self.stream, self._make_parser())
self.db = options.pop('using', DEFAULT_DB_ALIAS)
self.ignore = options.pop('ignorenonexistent', False)
def _make_parser(self):
"""Create a hardened XML parser (no custom/external entities)."""
return DefusedExpatParser()
def __next__(self):
for event, node in self.event_stream:
if event == "START_ELEMENT" and node.nodeName == "object":
self.event_stream.expandNode(node)
return self._handle_object(node)
raise StopIteration
def _handle_object(self, node):
"""
Convert an <object> node to a DeserializedObject.
"""
# Look up the model using the model loading mechanism. If this fails,
# bail.
Model = self._get_model_from_node(node, "model")
# Start building a data dictionary from the object.
data = {}
if node.hasAttribute('pk'):
data[Model._meta.pk.attname] = Model._meta.pk.to_python(
node.getAttribute('pk'))
# Also start building a dict of m2m data (this is saved as
# {m2m_accessor_attribute : [list_of_related_objects]})
m2m_data = {}
field_names = {f.name for f in Model._meta.get_fields()}
# Deserialize each field.
for field_node in node.getElementsByTagName("field"):
# If the field is missing the name attribute, bail (are you
# sensing a pattern here?)
field_name = field_node.getAttribute("name")
if not field_name:
raise base.DeserializationError("<field> node is missing the 'name' attribute")
# Get the field from the Model. This will raise a
# FieldDoesNotExist if, well, the field doesn't exist, which will
# be propagated correctly unless ignorenonexistent=True is used.
if self.ignore and field_name not in field_names:
continue
field = Model._meta.get_field(field_name)
# As is usually the case, relation fields get the special treatment.
if field.remote_field and isinstance(field.remote_field, models.ManyToManyRel):
m2m_data[field.name] = self._handle_m2m_field_node(field_node, field)
elif field.remote_field and isinstance(field.remote_field, models.ManyToOneRel):
data[field.attname] = self._handle_fk_field_node(field_node, field)
else:
if field_node.getElementsByTagName('None'):
value = None
else:
value = field.to_python(getInnerText(field_node).strip())
data[field.name] = value
obj = base.build_instance(Model, data, self.db)
# Return a DeserializedObject so that the m2m data has a place to live.
return base.DeserializedObject(obj, m2m_data)
def _handle_fk_field_node(self, node, field):
"""
Handle a <field> node for a ForeignKey
"""
# Check if there is a child node named 'None', returning None if so.
if node.getElementsByTagName('None'):
return None
else:
model = field.remote_field.model
if hasattr(model._default_manager, 'get_by_natural_key'):
keys = node.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj = model._default_manager.db_manager(self.db).get_by_natural_key(*field_value)
obj_pk = getattr(obj, field.remote_field.field_name)
# If this is a natural foreign key to an object that
# has a FK/O2O as the foreign key, use the FK value
if field.remote_field.model._meta.pk.remote_field:
obj_pk = obj_pk.pk
else:
# Otherwise, treat like a normal PK
field_value = getInnerText(node).strip()
obj_pk = model._meta.get_field(field.remote_field.field_name).to_python(field_value)
return obj_pk
else:
field_value = getInnerText(node).strip()
return model._meta.get_field(field.remote_field.field_name).to_python(field_value)
def _handle_m2m_field_node(self, node, field):
"""
Handle a <field> node for a ManyToManyField.
"""
model = field.remote_field.model
default_manager = model._default_manager
if hasattr(default_manager, 'get_by_natural_key'):
def m2m_convert(n):
keys = n.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj_pk = default_manager.db_manager(self.db).get_by_natural_key(*field_value).pk
else:
# Otherwise, treat like a normal PK value.
obj_pk = model._meta.pk.to_python(n.getAttribute('pk'))
return obj_pk
else:
def m2m_convert(n):
return model._meta.pk.to_python(n.getAttribute('pk'))
return [m2m_convert(c) for c in node.getElementsByTagName("object")]
def _get_model_from_node(self, node, attr):
"""
Helper to look up a model from a <object model=...> or a <field
rel=... to=...> node.
"""
model_identifier = node.getAttribute(attr)
if not model_identifier:
raise base.DeserializationError(
"<%s> node is missing the required '%s' attribute"
% (node.nodeName, attr))
try:
return apps.get_model(model_identifier)
except (LookupError, TypeError):
raise base.DeserializationError(
"<%s> node has invalid model identifier: '%s'"
% (node.nodeName, model_identifier))
def getInnerText(node):
"""
Get all the inner text of a DOM node (recursively).
"""
# inspired by http://mail.python.org/pipermail/xml-sig/2005-March/011022.html
inner_text = []
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE or child.nodeType == child.CDATA_SECTION_NODE:
inner_text.append(child.data)
elif child.nodeType == child.ELEMENT_NODE:
inner_text.extend(getInnerText(child))
else:
pass
return "".join(inner_text)
# Below code based on Christian Heimes' defusedxml
class DefusedExpatParser(_ExpatParser):
"""
An expat parser hardened against XML bomb attacks.
Forbids DTDs, external entity references
"""
def __init__(self, *args, **kwargs):
_ExpatParser.__init__(self, *args, **kwargs)
self.setFeature(handler.feature_external_ges, False)
self.setFeature(handler.feature_external_pes, False)
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise DTDForbidden(name, sysid, pubid)
def entity_decl(self, name, is_parameter_entity, value, base,
sysid, pubid, notation_name):
raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name)
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name)
def external_entity_ref_handler(self, context, base, sysid, pubid):
raise ExternalReferenceForbidden(context, base, sysid, pubid)
def reset(self):
_ExpatParser.reset(self)
parser = self._parser
parser.StartDoctypeDeclHandler = self.start_doctype_decl
parser.EntityDeclHandler = self.entity_decl
parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
parser.ExternalEntityRefHandler = self.external_entity_ref_handler
class DefusedXmlException(ValueError):
"""Base exception."""
def __repr__(self):
return str(self)
class DTDForbidden(DefusedXmlException):
"""Document type definition is forbidden."""
def __init__(self, name, sysid, pubid):
super(DTDForbidden, self).__init__()
self.name = name
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "DTDForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class EntitiesForbidden(DefusedXmlException):
"""Entity definition is forbidden."""
def __init__(self, name, value, base, sysid, pubid, notation_name):
super(EntitiesForbidden, self).__init__()
self.name = name
self.value = value
self.base = base
self.sysid = sysid
self.pubid = pubid
self.notation_name = notation_name
def __str__(self):
tpl = "EntitiesForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class ExternalReferenceForbidden(DefusedXmlException):
"""Resolving an external reference is forbidden."""
def __init__(self, context, base, sysid, pubid):
super(ExternalReferenceForbidden, self).__init__()
self.context = context
self.base = base
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "ExternalReferenceForbidden(system_id='{}', public_id={})"
return tpl.format(self.sysid, self.pubid)
| bsd-3-clause |
h4ck3rm1k3/github3.py | tests/test_github.py | 9 | 9398 | import github3
from tests.utils import (BaseCase, load, mock)
def merge(first, second=None, **kwargs):
copy = first.copy()
copy.update(second or {})
copy.update(kwargs)
return copy
class TestGitHub(BaseCase):
def test_init(self):
g = github3.GitHub('foo', 'bar')
assert repr(g).endswith('[foo]>')
g = github3.GitHub(token='foo')
assert repr(g).endswith('{0:x}>'.format(id(g)))
def test_key(self):
self.response('key')
self.get('https://api.github.com/user/keys/10')
self.assertRaises(github3.GitHubError, self.g.key, 10)
assert self.request.called is False
self.login()
assert self.g.key(-1).is_null()
assert self.request.called is False
assert isinstance(self.g.key(10), github3.users.Key)
self.mock_assertions()
def test_login(self):
self.g.login('user', 'password')
assert self.g.session.auth == ('user', 'password')
self.g.login(token='FakeOAuthToken')
auth = self.g.session.headers.get('Authorization')
assert auth == 'token FakeOAuthToken'
# Unwritten test, not entirely sure how to mock this
def test_markdown(self):
self.response('archive')
self.post('https://api.github.com/markdown')
self.conf = dict(
data={
'text': 'Foo', 'mode': 'gfm', 'context': 'sigmavirus24/cfg'
}
)
assert self.g.markdown(
'Foo', 'gfm', 'sigmavirus24/cfg'
).startswith('archive_data')
self.mock_assertions()
self.post('https://api.github.com/markdown/raw')
self.conf['data'] = 'Foo'
self.g.markdown('Foo', raw=True)
self.mock_assertions()
assert self.g.markdown(None) == ''
self.not_called()
def test_meta(self):
self.response('meta')
self.get('https://api.github.com/meta')
meta = self.g.meta()
assert isinstance(meta, dict)
self.mock_assertions()
def test_octocat(self):
self.response('archive')
self.get('https://api.github.com/octocat')
assert self.g.octocat().startswith('archive_data')
self.mock_assertions()
def test_organization(self):
self.response('org')
self.get('https://api.github.com/orgs/github3py')
org = self.g.organization('github3py')
assert isinstance(org, github3.orgs.Organization)
self.mock_assertions()
def test_pubsubhubbub(self):
self.response('', 204)
self.post('https://api.github.com/hub')
body = [('hub.mode', 'subscribe'),
('hub.topic', 'https://github.com/foo/bar/events/push'),
('hub.callback', 'https://localhost/post')]
self.conf = {}
pubsubhubbub = self.g.pubsubhubbub
self.assertRaises(github3.GitHubError, pubsubhubbub, '', '', '')
self.login()
assert pubsubhubbub('', '', '') is False
self.not_called()
assert pubsubhubbub('foo', 'https://example.com', 'foo') is False
self.not_called()
d = dict([(k[4:], v) for k, v in body])
assert pubsubhubbub(**d) is True
_, kwargs = self.request.call_args
assert 'data' in kwargs
assert body == kwargs['data']
self.mock_assertions()
d['secret'] = 'secret'
body.append(('hub.secret', 'secret'))
assert pubsubhubbub(**d)
_, kwargs = self.request.call_args
assert 'data' in kwargs
assert body == kwargs['data']
self.mock_assertions()
def test_pull_request(self):
self.response('pull')
self.get('https://api.github.com/repos/sigmavirus24/'
'github3.py/pulls/18')
pr = None
with mock.patch.object(github3.github.GitHub, 'repository') as repo:
repo.return_value = github3.repos.Repository(load('repo'))
pr = self.g.pull_request('sigmavirus24', 'github3.py', 18)
assert isinstance(pr, github3.pulls.PullRequest)
self.mock_assertions()
def test_set_client_id(self):
auth = ('idXXXXXXXXXXXX', 'secretXXXXXXXXXXXXXXXX')
self.g.set_client_id(*auth)
assert self.g.session.params['client_id'] == auth[0]
assert self.g.session.params['client_secret'] == auth[1]
def test_set_user_agent(self):
ua = 'Fake User Agents'
self.g.set_user_agent(ua)
assert self.g.session.headers['User-Agent'] == ua
self.g.set_user_agent(None)
assert self.g.session.headers['User-Agent'] == ua
def test_star(self):
self.response('', 204)
self.put('https://api.github.com/user/starred/sigmavirus24/github3.py')
self.conf = {'data': None}
self.assertRaises(github3.GitHubError, self.g.star, 'foo', 'bar')
self.login()
assert self.g.star(None, None) is False
assert self.g.star('sigmavirus24', 'github3.py')
self.mock_assertions()
def test_unfollow(self):
self.response('', 204)
self.delete('https://api.github.com/user/following/'
'sigmavirus24')
self.conf = {}
self.assertRaises(github3.GitHubError, self.g.unfollow, 'foo')
self.login()
assert self.g.unfollow(None) is False
assert self.g.unfollow('sigmavirus24')
self.mock_assertions()
def test_unstar(self):
self.response('', 204)
self.delete('https://api.github.com/user/starred/'
'sigmavirus24/github3.py')
self.conf = {}
self.assertRaises(github3.GitHubError, self.g.unstar, 'foo', 'bar')
self.login()
assert self.g.unstar(None, None) is False
assert self.g.unstar('sigmavirus24', 'github3.py')
self.mock_assertions()
def test_utf8_user(self):
self.response('utf8_user')
self.get('https://api.github.com/users/alejandrogomez')
u = self.g.user('alejandrogomez')
try:
repr(u)
except UnicodeEncodeError:
self.fail('Regression caught. See PR #52. Names must be utf-8'
' encoded')
def test_zen(self):
self.response('archive')
self.get('https://api.github.com/zen')
assert self.g.zen().startswith('archive_data')
self.mock_assertions()
class TestGitHubEnterprise(BaseCase):
def setUp(self):
super(TestGitHubEnterprise, self).setUp()
self.g = github3.GitHubEnterprise('https://github.example.com:8080/')
def test_admin_stats(self):
self.response('user')
self.get('https://github.example.com:8080/api/v3/enterprise/stats/all')
self.assertRaises(github3.GitHubError, self.g.admin_stats, None)
self.not_called()
self.login()
assert isinstance(self.g.admin_stats('all'), dict)
self.mock_assertions()
def test_repr(self):
assert repr(self.g).startswith('<GitHub Enterprise')
def test_pubsubhubbub(self):
self.response('', 204)
self.post('https://github.example.com:8080/api/v3/hub')
body = [('hub.mode', 'subscribe'),
('hub.topic',
'https://github.example.com:8080/foo/bar/events/push'),
('hub.callback', 'https://localhost/post')]
self.conf = {}
self.login()
d = dict([(k[4:], v) for k, v in body])
assert self.g.pubsubhubbub(**d)
_, kwargs = self.request.call_args
assert 'data' in kwargs
assert body == kwargs['data']
self.mock_assertions()
d['secret'] = 'secret'
body.append(('hub.secret', 'secret'))
assert self.g.pubsubhubbub(**d)
_, kwargs = self.request.call_args
assert 'data' in kwargs
assert body == kwargs['data']
self.mock_assertions()
class TestUnsecureGitHubEnterprise(BaseCase):
def setUp(self):
super(TestUnsecureGitHubEnterprise, self).setUp()
self.g = github3.GitHubEnterprise('https://github.example.com:8080/',
verify=False)
def test_skip_ssl_validation(self):
self.response('pull_enterprise')
self.g.pull_request('sigmavirus24', 'github3.py', 19)
assert False == self.g.session.verify
assert self.request.called
class TestGitHubStatus(BaseCase):
def setUp(self):
super(TestGitHubStatus, self).setUp()
self.g = github3.GitHubStatus()
self.api = 'https://status.github.com/'
def test_repr(self):
assert repr(self.g) == '<GitHub Status>'
def test_api(self):
self.response('user')
self.get(self.api + 'api.json')
assert isinstance(self.g.api(), dict)
self.mock_assertions()
def test_status(self):
self.response('user')
self.get(self.api + 'api/status.json')
assert isinstance(self.g.status(), dict)
self.mock_assertions()
def test_last_message(self):
self.response('user')
self.get(self.api + 'api/last-message.json')
assert isinstance(self.g.last_message(), dict)
self.mock_assertions()
def test_messages(self):
self.response('user')
self.get(self.api + 'api/messages.json')
assert isinstance(self.g.messages(), dict)
self.mock_assertions()
| bsd-3-clause |
wfxiang08/Nuitka | nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/hpcc.py | 7 | 1810 | """SCons.Tool.hpcc
Tool-specific initialization for HP aCC and cc.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/hpcc.py 2014/07/05 09:42:21 garyo"
import SCons.Util
import cc
def generate(env):
"""Add Builders and construction variables for aCC & cc to an Environment."""
cc.generate(env)
env['CXX'] = 'aCC'
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS +Z')
def exists(env):
return env.Detect('aCC')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
csrg-utfsm/acscb | LGPL/acsBUILD/test/createAcsRootTar.py | 4 | 7071 | #! /usr/bin/env python
#*******************************************************************************
# ALMA - Atacama Large Millimiter Array
# Copyright (c) European Southern Observatory, 2013
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
# who when what
# -------- -------- ----------------------------------------------
# acaproni 2013-08-21 created
#
import os
import sys
from datetime import datetime
from subprocess import call
# The packages to include in the tar
acsRootPackages = (
"ACSSW",
"acsdata")
def getAcsVersionFromACSROOT():
'''
Get the ACS version out of ACSROOT.
Raise a exception in case of error
'''
if os.environ.has_key('ACSROOT'):
acsRoot=os.environ['ACSROOT']
else:
raise Exception("ACSROOT not defined!")
# Assumes ACSROOT to be something like /alma/ACS-12.0/ACSSW
strs=acsRoot.split("/")
if len(strs)!=4:
raise Exception("Malformed ACSROOT: "+acsRoot)
temp=strs[2].replace(".","_")
return temp
def getTagFromFile(tagFileName):
'''
getAcsTag() and getArchiveTag() both need to read the tag
in the same way but from different files in $ACSROOT
This method return the tag from the file with the passed name
(i.e. ACS_TAG or ARCHIVE_TAG)
@param tagFileName: the name of the file with the TAG to open
@return the TAG contained in the file
@throws exception in case of failure
'''
if tagFileName is None:
raise Exception("Invalid empty file name")
if os.environ.has_key('ACSROOT'):
acsRoot=os.environ['ACSROOT']
else:
raise Exception("ACSROOT not defined!")
if acsRoot[len(acsRoot)-1] is not '/':
fileName= acsRoot+"/"
else:
fileName= acsRoot
fileToOpen=fileName+tagFileName
# CHeck if the file exists and is readable
if not os.access(fileToOpen, os.R_OK):
raise Exception(fileToOpen+" unreadable")
with open(fileToOpen) as f:
content = f.readlines()
if len(content) is not 1:
raise Exception(fileToOpen+" is malformed")
else:
return str(content[0]).strip()
def getACSVersionFromFile():
'''
@return the ACS version from $ACSROOT/ACS_VERSION
'''
return getTagFromFile("ACS_VERSION")
def getAcsTag():
'''
Get ACS tag from $ACSROOT/ACS_TAG
@return: The tag or exception in case of error
'''
return getTagFromFile("ACS_TAG")
def getArchiveTag():
'''
Get ACS tag from $ACSROOT/ARCHIVE_TAG
@return: The tag or exception in case of error
'''
return getTagFromFile("ARCHIVE_TAG")
def checkInstalledVersion(acsRoot,acsVersion):
'''
Check if the installed version of ACS in $ACSROOT matches with
the content of ACS_VERSION
@return True if the version matches
'''
temp=acsVersion.replace(".","_")
return acsRoot=="ACS-"+temp
def getAcsDistributionFolder():
"""
Get the folder where all the the packages reside
It is relative to the root i.e. something like alma/ACS-12.0/
"""
if os.environ.has_key('ACSROOT'):
acsRoot=os.environ['ACSROOT']
else:
raise Exception("ACSROOT not defined!")
temp=acsRoot.replace("ACSSW", "")
temp=temp.replace("/", "",1)
return temp
def getDate():
'''
Return the date to be set in the name of the tar
'''
now=datetime.today()
return "{0}{1:02}{2:02}".format(now.year,now.month,now.day)
def getArchitecture():
'''
Return the architecture 32/64 bit
'''
un=os.uname()
return un[4]
def getDistribution():
'''
Get the distribution (RHEL, SL and so on)
by parsing /etc/redhat-release
@return: a string describing the distribution like RH6.5
or UNKNOWN if /etc/redhat-release does not exist or is unreadable.
'''
ret="UNKNOWN"
fname="/etc/redhat-release"
with open(fname) as f:
content = f.readlines()
distr=str(content)
if distr.find("Red Hat Enterprise Linux")>=0:
temp="RH"
elif distr.find("Scientific Linux")>=0:
temp="SL"
else:
return ret
parts=distr.split(" ")
version=parts[len(parts)-2]
return temp+version
def buildTarName(acs,archive):
'''
Build the name of the tar
@param acs: ACS tag
@param archive: ARC tag or None if ARCHIVE not installed
'''
date=getDate()
architecture=getArchitecture()
dist=getDistribution()
ret=acs
if archive is not None:
ret=ret+"-"+archive.replace("-B","")
return ret+"-"+date+"-"+architecture+"-"+dist+".tar.gz"
if __name__=="__main__":
try:
acsTag=getAcsTag()
except Exception as e:
# No ACS TAG means something strange: abort
print "Error reading ACS TAG",e
exit(-1)
try:
arcTag=getArchiveTag()
except Exception as e:
# This is not an error but means that ARCHIVE is not installed in ACSROOT
arcTag=None
print "ACS_TAG is",acsTag
if arcTag is not None:
print "ARCHIVE_TAG is",arcTag
else:
print "No ARCHIVE TAG found (ARCHIVE not installed in ACSROOT)"
acsVersionFromFile=getACSVersionFromFile()
acsVersionFromACSROOT=getAcsVersionFromACSROOT()
if not checkInstalledVersion(acsVersionFromACSROOT,acsVersionFromFile):
print "ACS version seems inconsistent:",acsVersionFromACSROOT,"does not match with",acsVersionFromFile
exit(1)
print "ACS version is",acsVersionFromACSROOT
currentFolder=os.getcwd()
srcFolder=getAcsDistributionFolder()
print "Creating ACSROOT tar for", acsVersionFromACSROOT,"in",currentFolder
tarName=buildTarName(acsVersionFromACSROOT,arcTag)
print "Tar name",tarName
tarNameFullPath=currentFolder+"/"+tarName
# Go to root /
os.chdir("/")
print "Moved into",os.getcwd()
# build the command to pass to call
cmd=[]
cmd.append("tar")
cmd.append("cpzf")
cmd.append(tarNameFullPath)
cmd.append("--exclude")
cmd.append("Sources")
for pkg in acsRootPackages:
cmd.append(srcFolder+pkg)
print "Running tar....",
call(cmd)
# Back to original folder
os.chdir(currentFolder)
print "Done"
| mit |
lorenzgerber/sql_python_exercise | examples_from_coursepage/example7_odbc.py | 1 | 2503 | # A simple program which connects to an ODBC database
# and then executes a simple insert query.
# Note that it is possible to recapture that the insert "failed" if it violates
# integrity constraints.
# It also illustrates that updates must be committed explicitly.
# Stephen J. Hegner 15 June 2011.
# Modified for Python 3 compatibility 21 February 2014.
# Modified to show how to insert NULL, 24 April 2016.
### This script has been tested with both Python2 and Python3
### under PostgreSQL and MySQL running on Debian 7 Linux.
### IMPORTANT: For PostgreSQL:
### the ANSI driver psqlodbca.so must be used for Python2;
### the Unicode driver psqlodbcw.so must be used for Python3.
### For other Linux distributions which use Unicode in the OS (e.g., Ubuntu)
### it may be necessary to use psqlodbcw.so for Python2 as well.
### For MySQL, the driver libmyodbc.so works for both Python2 and Python3.
import sys
import pyodbc
if sys.version_info<(3,0,0):
input = raw_input
import my_odbc_connect
import my_odbc_cursor
query = "Insert into Works_On values (?,?,?)"
def read_input():
print("Type the fields of a tuple to insert into the works_on relation:")
essn = input("Enter the employee SSN: ")
pno = input("Enter the project number: ")
hours = input("Enter the number of hours: ")
# Pyodbc uses None to represent SQL NULL:
if hours.upper()=='NULL':
hours=None
return [essn,pno,hours]
def process_query():
global success
print("Executing the update.")
cursor1 = connection1.cursor()
arglist = read_input()
## Some gymnastics to accommodate both Python2 and Python3:
try:
cursor1.execute(query,arglist[0],arglist[1],arglist[2])
print("The insertion executed successfully.")
success=1
## Python version incompatibilty:
## Comment out the second line for Python2 and the first for Python3:
# except pyodbc.IntegrityError, why:
except pyodbc.IntegrityError as why:
print("The update would violate an integrity constraint.")
print("The reason is:", why)
success=0
def commit_updates(c):
print("Committing the updates.")
c.commit()
print("")
connection1 = my_odbc_connect.establish_connection("pwd")
print("")
cursor1 = my_odbc_cursor.establish_cursor(connection1)
print("")
process_query()
print("")
if success==1:
commit_updates(connection1)
print("")
my_odbc_cursor.close_cursor(cursor1)
print("")
my_odbc_connect.close_connection(connection1)
| gpl-3.0 |
thisispuneet/potato-blog | django/db/models/sql/constants.py | 394 | 1043 | import re
# Valid query types (a dictionary is used for speedy lookups).
QUERY_TERMS = dict([(x, None) for x in (
'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in',
'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year',
'month', 'day', 'week_day', 'isnull', 'search', 'regex', 'iregex',
)])
# Size of each "chunk" for get_iterator calls.
# Larger values are slightly faster at the expense of more storage space.
GET_ITERATOR_CHUNK_SIZE = 100
# Separator used to split filter strings apart.
LOOKUP_SEP = '__'
# Constants to make looking up tuple values clearer.
# Join lists (indexes into the tuples that are values in the alias_map
# dictionary in the Query class).
TABLE_NAME = 0
RHS_ALIAS = 1
JOIN_TYPE = 2
LHS_ALIAS = 3
LHS_JOIN_COL = 4
RHS_JOIN_COL = 5
NULLABLE = 6
# How many results to expect from a cursor.execute call
MULTI = 'multi'
SINGLE = 'single'
ORDER_PATTERN = re.compile(r'\?|[-+]?[.\w]+$')
ORDER_DIR = {
'ASC': ('ASC', 'DESC'),
'DESC': ('DESC', 'ASC')}
| bsd-3-clause |
de-wolff/android_kernel_motorola_xt320 | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
espadrine/opera | chromium/src/third_party/WebKit/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory.py | 127 | 2229 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
from webkitpy.common.system.executive import ScriptError
class CleanWorkingDirectory(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.force_clean,
Options.clean,
]
def run(self, state):
if not self._options.clean:
return
if self._tool.scm().has_working_directory_changes() and not self._options.force_clean:
raise ScriptError("Working directory has changes, pass --force-clean to continue.")
self._tool.scm().discard_working_directory_changes()
| bsd-3-clause |
HenriqueLR/watchful | app/core/tests/test_uses.py | 1 | 1770 | #encoding: utf-8
from django.test import LiveServerTestCase
from selenium.webdriver.firefox.webdriver import WebDriver
from django.contrib.auth.models import User
class TestLoginAdmin(LiveServerTestCase):
'''
Simple functional test use case interface admin.
'''
fixtures = ['user-data.json']
@classmethod
def setUpClass(cls):
cls.selenium = WebDriver()
super(TestLoginAdmin, cls).setUpClass()
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super(TestLoginAdmin, cls).tearDownClass()
def test_login(self):
self.selenium.get('%s%s' % (self.live_server_url, '/acesso/'))
username_input = self.selenium.find_element_by_name("username")
username_input.send_keys('root')
password_input = self.selenium.find_element_by_name("password")
password_input.send_keys('root')
self.selenium.find_element_by_xpath('//input[@value="Acessar"]').click()
class TestLoginPoint(LiveServerTestCase):
'''
Simple functional test use case interface watchful.
'''
fixtures = ['user-data.json']
@classmethod
def setUpClass(cls):
cls.selenium = WebDriver()
super(TestLoginPoint, cls).setUpClass()
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super(TestLoginPoint, cls).tearDownClass()
def test_login(self):
self.selenium.get('%s%s' % (self.live_server_url, '/'))
username_input = self.selenium.find_element_by_name("username")
username_input.send_keys('root')
password_input = self.selenium.find_element_by_name("password")
password_input.send_keys('root')
self.selenium.find_element_by_xpath('//input[@value="Entrar"]').click() | mit |
k0ste/ansible | test/lib/ansible_test/_internal/env.py | 25 | 7265 | """Show information about the test environment."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import functools
import os
import platform
import signal
import sys
import time
from .config import (
CommonConfig,
TestConfig,
)
from .io import (
write_json_file,
read_json_file,
)
from .util import (
display,
find_executable,
SubprocessError,
ApplicationError,
get_ansible_version,
get_available_python_versions,
)
from .util_common import (
data_context,
write_json_test_results,
ResultType,
)
from .docker_util import (
docker_info,
docker_version
)
from .thread import (
WrappedThread,
)
from .constants import (
TIMEOUT_PATH,
)
from .test import (
TestTimeout,
)
from .executor import (
SUPPORTED_PYTHON_VERSIONS,
)
from .ci import (
get_ci_provider,
)
class EnvConfig(CommonConfig):
"""Configuration for the tools command."""
def __init__(self, args):
"""
:type args: any
"""
super(EnvConfig, self).__init__(args, 'env')
self.show = args.show
self.dump = args.dump
self.timeout = args.timeout
self.list_files = args.list_files
if not self.show and not self.dump and self.timeout is None and not self.list_files:
# default to --show if no options were given
self.show = True
def command_env(args):
"""
:type args: EnvConfig
"""
show_dump_env(args)
list_files_env(args)
set_timeout(args)
def show_dump_env(args):
"""
:type args: EnvConfig
"""
if not args.show and not args.dump:
return
data = dict(
ansible=dict(
version=get_ansible_version(),
),
docker=get_docker_details(args),
environ=os.environ.copy(),
location=dict(
pwd=os.environ.get('PWD', None),
cwd=os.getcwd(),
),
git=get_ci_provider().get_git_details(args),
platform=dict(
datetime=datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
platform=platform.platform(),
uname=platform.uname(),
),
python=dict(
executable=sys.executable,
version=platform.python_version(),
),
interpreters=get_available_python_versions(SUPPORTED_PYTHON_VERSIONS),
)
if args.show:
verbose = {
'docker': 3,
'docker.executable': 0,
'environ': 2,
'platform.uname': 1,
}
show_dict(data, verbose)
if args.dump and not args.explain:
write_json_test_results(ResultType.BOT, 'data-environment.json', data)
def list_files_env(args): # type: (EnvConfig) -> None
"""List files on stdout."""
if not args.list_files:
return
for path in data_context().content.all_files():
display.info(path)
def set_timeout(args):
"""
:type args: EnvConfig
"""
if args.timeout is None:
return
if args.timeout:
deadline = (datetime.datetime.utcnow() + datetime.timedelta(minutes=args.timeout)).strftime('%Y-%m-%dT%H:%M:%SZ')
display.info('Setting a %d minute test timeout which will end at: %s' % (args.timeout, deadline), verbosity=1)
else:
deadline = None
display.info('Clearing existing test timeout.', verbosity=1)
if args.explain:
return
if deadline:
data = dict(
duration=args.timeout,
deadline=deadline,
)
write_json_file(TIMEOUT_PATH, data)
elif os.path.exists(TIMEOUT_PATH):
os.remove(TIMEOUT_PATH)
def get_timeout():
"""
:rtype: dict[str, any] | None
"""
if not os.path.exists(TIMEOUT_PATH):
return None
data = read_json_file(TIMEOUT_PATH)
data['deadline'] = datetime.datetime.strptime(data['deadline'], '%Y-%m-%dT%H:%M:%SZ')
return data
def configure_timeout(args):
"""
:type args: CommonConfig
"""
if isinstance(args, TestConfig):
configure_test_timeout(args) # only tests are subject to the timeout
def configure_test_timeout(args):
"""
:type args: TestConfig
"""
timeout = get_timeout()
if not timeout:
return
timeout_start = datetime.datetime.utcnow()
timeout_duration = timeout['duration']
timeout_deadline = timeout['deadline']
timeout_remaining = timeout_deadline - timeout_start
test_timeout = TestTimeout(timeout_duration)
if timeout_remaining <= datetime.timedelta():
test_timeout.write(args)
raise ApplicationError('The %d minute test timeout expired %s ago at %s.' % (
timeout_duration, timeout_remaining * -1, timeout_deadline))
display.info('The %d minute test timeout expires in %s at %s.' % (
timeout_duration, timeout_remaining, timeout_deadline), verbosity=1)
def timeout_handler(_dummy1, _dummy2):
"""Runs when SIGUSR1 is received."""
test_timeout.write(args)
raise ApplicationError('Tests aborted after exceeding the %d minute time limit.' % timeout_duration)
def timeout_waiter(timeout_seconds):
"""
:type timeout_seconds: int
"""
time.sleep(timeout_seconds)
os.kill(os.getpid(), signal.SIGUSR1)
signal.signal(signal.SIGUSR1, timeout_handler)
instance = WrappedThread(functools.partial(timeout_waiter, timeout_remaining.seconds))
instance.daemon = True
instance.start()
def show_dict(data, verbose, root_verbosity=0, path=None):
"""
:type data: dict[str, any]
:type verbose: dict[str, int]
:type root_verbosity: int
:type path: list[str] | None
"""
path = path if path else []
for key, value in sorted(data.items()):
indent = ' ' * len(path)
key_path = path + [key]
key_name = '.'.join(key_path)
verbosity = verbose.get(key_name, root_verbosity)
if isinstance(value, (tuple, list)):
display.info(indent + '%s:' % key, verbosity=verbosity)
for item in value:
display.info(indent + ' - %s' % item, verbosity=verbosity)
elif isinstance(value, dict):
min_verbosity = min([verbosity] + [v for k, v in verbose.items() if k.startswith('%s.' % key)])
display.info(indent + '%s:' % key, verbosity=min_verbosity)
show_dict(value, verbose, verbosity, key_path)
else:
display.info(indent + '%s: %s' % (key, value), verbosity=verbosity)
def get_docker_details(args):
"""
:type args: CommonConfig
:rtype: dict[str, any]
"""
docker = find_executable('docker', required=False)
info = None
version = None
if docker:
try:
info = docker_info(args)
except SubprocessError as ex:
display.warning('Failed to collect docker info:\n%s' % ex)
try:
version = docker_version(args)
except SubprocessError as ex:
display.warning('Failed to collect docker version:\n%s' % ex)
docker_details = dict(
executable=docker,
info=info,
version=version,
)
return docker_details
| gpl-3.0 |
jorik041/MITMf | core/responder/fingerprinter/RAPLANMANPackets.py | 7 | 6231 | import struct
from core.responder.odict import OrderedDict
from core.responder.packet import Packet
def longueur(payload):
length = struct.pack(">i", len(''.join(payload)))
return length
class SMBHeader(Packet):
fields = OrderedDict([
("proto", "\xff\x53\x4d\x42"),
("cmd", "\x72"),
("error-code", "\x00\x00\x00\x00" ),
("flag1", "\x08"),
("flag2", "\x01\x00"),
("pidhigh", "\x00\x00"),
("signature", "\x00\x00\x00\x00\x00\x00\x00\x00"),
("reserved", "\x00\x00"),
("tid", "\x00\x00"),
("pid", "\x3c\x1b"),
("uid", "\x00\x00"),
("mid", "\x00\x00"),
])
class SMBNegoData(Packet):
fields = OrderedDict([
("wordcount", "\x00"),
("bcc", "\x54\x00"),
("separator1","\x02" ),
("dialect1", "\x50\x43\x20\x4e\x45\x54\x57\x4f\x52\x4b\x20\x50\x52\x4f\x47\x52\x41\x4d\x20\x31\x2e\x30\x00"),
("separator2","\x02"),
("dialect2", "\x4c\x41\x4e\x4d\x41\x4e\x31\x2e\x30\x00"),
])
def calculate(self):
CalculateBCC = str(self.fields["separator1"])+str(self.fields["dialect1"])+str(self.fields["separator2"])+str(self.fields["dialect2"])
self.fields["bcc"] = struct.pack("<h",len(CalculateBCC))
class SMBSessionData(Packet):
fields = OrderedDict([
("wordcount", "\x0a"),
("AndXCommand", "\xff"),
("reserved","\x00"),
("andxoffset", "\x00\x00"),
("maxbuff","\xff\xff"),
("maxmpx", "\x02\x00"),
("vcnum","\x01\x00"),
("sessionkey", "\x00\x00\x00\x00"),
("PasswordLen","\x18\x00"),
("reserved2","\x00\x00\x00\x00"),
("bcc","\x3b\x00"),
("AccountPassword",""),
("AccountName",""),
("AccountNameTerminator","\x00"),
("PrimaryDomain","WORKGROUP"),
("PrimaryDomainTerminator","\x00"),
("NativeOs","Unix"),
("NativeOsTerminator","\x00"),
("NativeLanman","Samba"),
("NativeLanmanTerminator","\x00"),
])
def calculate(self):
CompleteBCC = str(self.fields["AccountPassword"])+str(self.fields["AccountName"])+str(self.fields["AccountNameTerminator"])+str(self.fields["PrimaryDomain"])+str(self.fields["PrimaryDomainTerminator"])+str(self.fields["NativeOs"])+str(self.fields["NativeOsTerminator"])+str(self.fields["NativeLanman"])+str(self.fields["NativeLanmanTerminator"])
self.fields["bcc"] = struct.pack("<h", len(CompleteBCC))
self.fields["PasswordLen"] = struct.pack("<h", len(str(self.fields["AccountPassword"])))
class SMBTreeConnectData(Packet):
fields = OrderedDict([
("Wordcount", "\x04"),
("AndXCommand", "\xff"),
("Reserved","\x00" ),
("Andxoffset", "\x00\x00"),
("Flags","\x08\x00"),
("PasswdLen", "\x01\x00"),
("Bcc","\x1b\x00"),
("Passwd", "\x00"),
("Path",""),
("PathTerminator","\x00"),
("Service","?????"),
("Terminator", "\x00"),
])
def calculate(self):
self.fields["PasswdLen"] = struct.pack("<h", len(str(self.fields["Passwd"])))[:2]
BccComplete = str(self.fields["Passwd"])+str(self.fields["Path"])+str(self.fields["PathTerminator"])+str(self.fields["Service"])+str(self.fields["Terminator"])
self.fields["Bcc"] = struct.pack("<h", len(BccComplete))
class RAPNetServerEnum3Data(Packet):
fields = OrderedDict([
("Command", "\xd7\x00"),
("ParamDescriptor", "WrLehDzz"),
("ParamDescriptorTerminator", "\x00"),
("ReturnDescriptor","B16BBDz"),
("ReturnDescriptorTerminator", "\x00"),
("DetailLevel", "\x01\x00"),
("RecvBuff","\xff\xff"),
("ServerType", "\x00\x00\x00\x80"),
("TargetDomain","SMB"),
("RapTerminator","\x00"),
("TargetName","ABCD"),
("RapTerminator2","\x00"),
])
class SMBTransRAPData(Packet):
fields = OrderedDict([
("Wordcount", "\x0e"),
("TotalParamCount", "\x24\x00"),
("TotalDataCount","\x00\x00" ),
("MaxParamCount", "\x08\x00"),
("MaxDataCount","\xff\xff"),
("MaxSetupCount", "\x00"),
("Reserved","\x00\x00"),
("Flags", "\x00"),
("Timeout","\x00\x00\x00\x00"),
("Reserved1","\x00\x00"),
("ParamCount","\x24\x00"),
("ParamOffset", "\x5a\x00"),
("DataCount", "\x00\x00"),
("DataOffset", "\x7e\x00"),
("SetupCount", "\x00"),
("Reserved2", "\x00"),
("Bcc", "\x3f\x00"),
("Terminator", "\x00"),
("PipeName", "\\PIPE\\LANMAN"),
("PipeTerminator","\x00\x00"),
("Data", ""),
])
def calculate(self):
#Padding
if len(str(self.fields["Data"]))%2==0:
self.fields["PipeTerminator"] = "\x00\x00\x00\x00"
else:
self.fields["PipeTerminator"] = "\x00\x00\x00"
##Convert Path to Unicode first before any Len calc.
self.fields["PipeName"] = self.fields["PipeName"].encode('utf-16le')
##Data Len
self.fields["TotalParamCount"] = struct.pack("<i", len(str(self.fields["Data"])))[:2]
self.fields["ParamCount"] = struct.pack("<i", len(str(self.fields["Data"])))[:2]
##Packet len
FindRAPOffset = str(self.fields["Wordcount"])+str(self.fields["TotalParamCount"])+str(self.fields["TotalDataCount"])+str(self.fields["MaxParamCount"])+str(self.fields["MaxDataCount"])+str(self.fields["MaxSetupCount"])+str(self.fields["Reserved"])+str(self.fields["Flags"])+str(self.fields["Timeout"])+str(self.fields["Reserved1"])+str(self.fields["ParamCount"])+str(self.fields["ParamOffset"])+str(self.fields["DataCount"])+str(self.fields["DataOffset"])+str(self.fields["SetupCount"])+str(self.fields["Reserved2"])+str(self.fields["Bcc"])+str(self.fields["Terminator"])+str(self.fields["PipeName"])+str(self.fields["PipeTerminator"])
self.fields["ParamOffset"] = struct.pack("<i", len(FindRAPOffset)+32)[:2]
##Bcc Buff Len
BccComplete = str(self.fields["Terminator"])+str(self.fields["PipeName"])+str(self.fields["PipeTerminator"])+str(self.fields["Data"])
self.fields["Bcc"] = struct.pack("<i", len(BccComplete))[:2]
| gpl-3.0 |
jac2130/BettingIsBelieving | Betting/putsDAO.py | 1 | 3977 | __author__ = 'aje'
#
# Copyright (c) 2008 - 2013 10gen, Inc. <http://10gen.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import sys
import re
import datetime
import time
from bson.objectid import ObjectId
import pymongo
# The Blog Post Data Access Object handles interactions with the Posts collection
class PutsDAO:
def __init__(self, database):
self.db = database
self.puts = database.puts
def insert_entry(self, userid, treatment, valToBetOn, price, shares, period, opend):
put = { "userid": userid,
"treatment": treatment,
"valToBetOn": valToBetOn,
"price": int(price),
"shares":int(shares),
"date": datetime.datetime.utcnow(),
"period": period,
"open": opend}
try:
print "Inserting the put", put
newId = self.puts.insert(put)
return str(newId)
except:
print "Error inserting post"
print "Unexpected error:", sys.exc_info()[0]
raise
# returns an array of num_posts posts, reverse ordered
def get_puts(self, treatment, period, demo_mode):
l = []
if demo_mode:
cursor = self.puts.find({'period': period, 'open': 1}).sort('price', pymongo.ASCENDING)
else:
cursor = self.puts.find({'treatment': treatment, 'period':period, 'open': True}).sort('price', pymongo.ASCENDING)
for put in cursor:
put['date'] = str(time.time()) # fix up date
put['id'] = str(put['_id']);
put['price'] = str(put['price'])
put['shares'] = str(put['shares'])
put["_id"] = str(put['_id']);
if 'period' not in put:
put['period'] = 0
else:
put['period'] = str(put['period']);
l.append(put)
return l
def get_put_by_id(self, item_id):
#put = None
#Work here to retrieve the specified post
put=self.puts.find_one({'_id':ObjectId(item_id)})
#if put is not None:
# fix up date
#put['date'] = put['date'].strftime("%A, %B %d %Y at %I:%M%p")
#new= put.next()
return put
def accept_put(self, id, accepter_id):
put = self.get_put_by_id(id)
putId = put['_id']
putMods = {}
putMods["open"] = False
putMods['accepted'] = accepter_id
self.puts.update({'_id': putId}, {"$set": putMods}, upsert=False)
def computer_accept(self, id):
put = self.get_put_by_id(id)
putId = put['_id']
putMods = {}
putMods["open"] = False
putMods['accepted'] = -1
self.puts.update({'_id': putId}, {"$set": putMods}, upsert=False)
# add a comment to a particular blog post
def add_comment(self, id, name, email, body, commentId):
comment = {'author': name, 'body': body, 'id':commentId}
if (email != ""):
comment['email'] = email
try:
last_error = {'n':-1} # this is here so the code runs before you fix the next line
# XXX HW 3.3 Work here to add the comment to the designated post
put=self.puts.find_one({'_id':ObjectId(id)})
put['comments'].append(comment);
self.puts.save(put)
return last_error['n'] # return the number of documents updated
except:
return 0
| mit |
sermmor/Blogodt | src/VentanaTwitt.py | 1 | 6891 | # -*- coding: utf-8 -*-
# generated by wxGlade 0.6.3 on Thu Dec 8 15:07:30 2011
import os
import wx
import ModeloTwitt
# begin wxGlade: dependencies
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class VentanaTwitt(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: VentanaTwitt.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
# Constantes.
self.TWITTER = "Twitter"
self.ROJO = (255,0,0)
self.AZUL = (0,0,255)
# Objetos de la vista.
self.lbKeyFile = wx.StaticText(self, -1, " File with keys: ")
self.tcKeyFile = wx.TextCtrl(self, -1, "")
self.btKeyFile = wx.Button(self, -1, "Browse...")
self.lbSendMessageTo = wx.StaticText(self, -1, " Send message: ")
self.cBSendMessageTo = wx.ComboBox(self, -1, choices=[self.TWITTER], style=wx.CB_DROPDOWN|wx.CB_READONLY)
self.tCMessage = wx.TextCtrl(self, -1, "", style=wx.TE_PROCESS_ENTER|wx.TE_MULTILINE|wx.TE_AUTO_URL)
self.lbUrl = wx.StaticText(self, -1, " URL: ")
self.tcBigURL = wx.TextCtrl(self, -1, "http://")
self.btAcortarURL = wx.Button(self, -1, "Shorten")
self.lbNumChars = wx.StaticText(self, -1, "0000" + (" "*153))
self.lbNumChars.SetForegroundColour(self.AZUL)
self.btSend = wx.Button(self, -1, "Send")
self.__set_properties()
self.__do_layout()
# Variables.
self.twitter = ModeloTwitt.Twitt()
self.sugerenciaMsg = None
self.allEnable(False)
self.Bind(wx.EVT_BUTTON, self.examinarXMLKeys, self.btKeyFile)
self.Bind(wx.EVT_TEXT_ENTER, self.enviarMensaje, self.tCMessage)
self.Bind(wx.EVT_BUTTON, self.acortarURL, self.btAcortarURL)
self.Bind(wx.EVT_BUTTON, self.enviarMensaje, self.btSend)
# end wxGlade
def __set_properties(self):
# begin wxGlade: VentanaTwitt.__set_properties
self.SetTitle("Send message to social networks")
self.tcKeyFile.SetMinSize((355, 27))
self.cBSendMessageTo.SetMinSize((355, 29))
self.cBSendMessageTo.SetSelection(0)
self.tCMessage.SetMinSize((572, 92))
self.tcBigURL.SetMinSize((355, 27))
# end wxGlade
def __do_layout(self):
# begin wxGlade: VentanaTwitt.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
grid_sizer_1 = wx.FlexGridSizer(5, 1, 0, 0)
grid_sizer_5 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_4 = wx.FlexGridSizer(1, 3, 0, 0)
grid_sizer_3 = wx.FlexGridSizer(1, 2, 0, 0)
grid_sizer_2 = wx.FlexGridSizer(1, 3, 0, 0)
grid_sizer_2.Add(self.lbKeyFile, 0, wx.ALIGN_CENTER_VERTICAL, 0)
grid_sizer_2.Add(self.tcKeyFile, 0, 0, 0)
grid_sizer_2.Add(self.btKeyFile, 0, 0, 0)
grid_sizer_1.Add(grid_sizer_2, 1, wx.EXPAND, 0)
grid_sizer_3.Add(self.lbSendMessageTo, 0, wx.ALIGN_CENTER_VERTICAL, 0)
grid_sizer_3.Add(self.cBSendMessageTo, 0, 0, 0)
grid_sizer_1.Add(grid_sizer_3, 1, wx.EXPAND, 0)
grid_sizer_1.Add(self.tCMessage, 0, 0, 0)
grid_sizer_4.Add(self.lbUrl, 0, wx.ALIGN_CENTER_VERTICAL, 0)
grid_sizer_4.Add(self.tcBigURL, 0, 0, 0)
grid_sizer_4.Add(self.btAcortarURL, 0, 0, 0)
grid_sizer_1.Add(grid_sizer_4, 1, wx.EXPAND, 0)
grid_sizer_5.Add(self.lbNumChars, 0, 0, 0)
grid_sizer_5.Add(self.btSend, 0, 0, 0)
grid_sizer_1.Add(grid_sizer_5, 1, wx.EXPAND, 0)
sizer_1.Add(grid_sizer_1, 1, wx.EXPAND, 0)
self.SetSizer(sizer_1)
sizer_1.Fit(self)
self.Layout()
# end wxGlade
# Eventos.
def examinarXMLKeys(self, event):
self.tcKeyFile.SetValue("")
dialogoCargar=wx.FileDialog(self, "CHoose file xml", "", "", "*.xml", wx.FD_OPEN)
i = dialogoCargar.ShowModal()
if i == wx.ID_OK:
pathXML = dialogoCargar.GetPath()
self.tcKeyFile.SetValue(pathXML)
if self.cBSendMessageTo.GetValue() == self.TWITTER:
self.twitter.initTwitt(pathXML)
if self.sugerenciaMsg != None:
self.twitter.setSugerenciaMsg(self.sugerenciaMsg)
self.tCMessage.SetValue(self.twitter.getTextoPredefinido())
self.refleshLbNumChars()
self.allEnable(True)
def enviarMensaje(self, event):
self.refleshLbNumChars()
if (self.cBSendMessageTo.GetValue() == self.TWITTER) and (self.getNumCaracteresQuedan() >= 0):
self.twitter.enviarTuit(self.tCMessage.GetValue(), self.tcKeyFile.GetValue())
self.tCMessage.SetValue("")
self.Close()
def acortarURL(self, event):
self.tCMessage.AppendText(" " + self.twitter.acortarLink(self.tcBigURL.GetValue(), self.tcKeyFile.GetValue()))
self.tcBigURL.SetValue("")
self.refleshLbNumChars()
# Utilidades.
def setSugerenciaMsg(self, sTitle):
self.sugerenciaMsg = sTitle
def refleshLbNumChars(self):
self.lbNumChars.SetLabel(self.cadenaNumeroCaracteres())
if self.getNumCaracteresQuedan() >= 0:
self.lbNumChars.SetForegroundColour(self.AZUL)
else:
self.lbNumChars.SetForegroundColour(self.ROJO)
def getNumCaracteresQuedan(self):
res = 0
if (self.cBSendMessageTo.GetValue() == self.TWITTER):
res = self.twitter.getLimitChars() - len(self.tCMessage.GetValue())
return res
def cadenaNumeroCaracteres(self):
# cifras es un Int.
numero = self.getNumCaracteresQuedan()
res = ""
if (numero < -999):
# Cinco caracteres.
res = str(abs(numero))
elif (numero < -99):
# Cuatro caracteres.
res = "-0" + str(abs(numero))
elif (numero < -9):
# Tres caracteres.
res = "-00" + str(abs(numero))
elif (numero < 0):
# Dos caracteres.
res = "-000" + str(abs(numero))
elif (numero > 100):
# Tres caracteres.
res = "0" + str(numero)
elif (numero > 10):
# Dos caracteres.
res = "00" + str(numero)
elif (numero >= 0):
# Un caracter.
res = "000" + str(numero)
else:
res = "0000"
if (numero >= 0):
res = res + (" "*153)
else:
res = res + (" "*152)
return res
def allEnable(self, b=True):
self.cBSendMessageTo.Enable(b)
self.tCMessage.Enable(b)
self.tcBigURL.Enable(b)
self.btAcortarURL.Enable(b)
self.btSend.Enable(b)
# end of class VentanaTwitt
| gpl-3.0 |
LCAV/pyroomacoustics | pyroomacoustics/transform/tests/test_dft_timing.py | 1 | 2826 | from __future__ import division, print_function
import numpy as np
import pyroomacoustics as pra
import time
try:
import pyfftw
pyfftw_available = True
except ImportError:
pyfftw_available = False
try:
import mkl_fft
mkl_available = True
except ImportError:
mkl_available = False
n_trials = 1000
nfft = 128
D = 7
x = np.random.randn(nfft, D).astype('float32')
def timing(transform, n_trials):
dft = pra.transform.DFT(nfft, D, transform=transform)
start_time = time.time()
for k in range(n_trials):
dft.analysis(x)
analysis_time = (time.time()-start_time)/n_trials * 1e6
start_time = time.time()
for k in range(n_trials):
dft.synthesis()
synthesis_time = (time.time()-start_time)/n_trials * 1e6
print("avg %s : %f [1e-6 sec], (analysis, synthesis)=(%f, %f) [1e-6 sec]" %
(transform, analysis_time+synthesis_time, analysis_time, synthesis_time))
res = timing('numpy', n_trials)
if pyfftw_available:
res = timing('fftw', n_trials)
if mkl_available:
res = timing('mkl', n_trials)
"""
test against without using class
"""
print()
start_time = time.time()
for k in range(n_trials):
X = np.fft.rfft(x)
analysis_time = (time.time()-start_time)/n_trials * 1e6
start_time = time.time()
for k in range(n_trials):
x_r = np.fft.irfft(X)
synthesis_time = (time.time()-start_time)/n_trials * 1e6
print("avg numpy w/o class : %f [1e-6 sec], (analysis, synthesis)=(%f, %f) [1e-6 sec]" %
(analysis_time+synthesis_time, analysis_time, synthesis_time))
if pyfftw_available:
# prepare
a = pyfftw.empty_aligned([nfft, D], dtype='float32')
b = pyfftw.empty_aligned([nfft//2+1, D], dtype='complex64')
c = pyfftw.empty_aligned([nfft, D], dtype='float32')
forward = pyfftw.FFTW(a, b, axes=(0, ))
backward = pyfftw.FFTW(b, c, axes=(0, ), direction='FFTW_BACKWARD')
start_time = time.time()
for k in range(n_trials):
forward()
analysis_time = (time.time()-start_time)/n_trials * 1e6
start_time = time.time()
for k in range(n_trials):
backward()
synthesis_time = (time.time()-start_time)/n_trials * 1e6
print("avg fftw w/o class : %f [1e-6 sec], (analysis, synthesis)=(%f, %f) [1e-6 sec]" %
(analysis_time+synthesis_time, analysis_time, synthesis_time))
if mkl_available:
start_time = time.time()
for k in range(n_trials):
X = mkl_fft.rfft_numpy(x)
analysis_time = (time.time()-start_time)/n_trials * 1e6
start_time = time.time()
for k in range(n_trials):
x_r = mkl_fft.irfft_numpy(X)
synthesis_time = (time.time()-start_time)/n_trials * 1e6
print("avg mkl w/o class : %f [1e-6 sec], (analysis, synthesis)=(%f, %f) [1e-6 sec]" %
(analysis_time+synthesis_time, analysis_time, synthesis_time))
| mit |
felixonmars/mongo-python-driver | test/test_topology.py | 16 | 22520 | # Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the topology module."""
import sys
sys.path[0:0] = [""]
import threading
from bson.py3compat import imap
from pymongo import common
from pymongo.read_preferences import ReadPreference, Secondary
from pymongo.server_type import SERVER_TYPE
from pymongo.topology import Topology
from pymongo.topology_description import TOPOLOGY_TYPE
from pymongo.errors import (AutoReconnect,
ConfigurationError,
ConnectionFailure)
from pymongo.ismaster import IsMaster
from pymongo.monitor import Monitor
from pymongo.pool import PoolOptions
from pymongo.server_description import ServerDescription
from pymongo.server_selectors import (any_server_selector,
writable_server_selector)
from pymongo.settings import TopologySettings
from test import client_knobs, unittest
from test.utils import wait_until
class MockSocketInfo(object):
def close(self):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class MockPool(object):
def __init__(self, *args, **kwargs):
self.pool_id = 0
self._lock = threading.Lock()
def get_socket(self, all_credentials):
return MockSocketInfo()
def return_socket(self, _):
pass
def reset(self):
with self._lock:
self.pool_id += 1
class MockMonitor(object):
def __init__(self, server_description, topology, pool, topology_settings):
self._server_description = server_description
self._topology = topology
def open(self):
pass
def request_check(self):
pass
def close(self):
pass
class SetNameDiscoverySettings(TopologySettings):
def get_topology_type(self):
return TOPOLOGY_TYPE.ReplicaSetNoPrimary
address = ('a', 27017)
def create_mock_topology(
seeds=None,
replica_set_name=None,
monitor_class=MockMonitor):
partitioned_seeds = list(imap(common.partition_node, seeds or ['a']))
topology_settings = TopologySettings(
partitioned_seeds,
replica_set_name=replica_set_name,
pool_class=MockPool,
monitor_class=monitor_class)
t = Topology(topology_settings)
t.open()
return t
def got_ismaster(topology, server_address, ismaster_response):
server_description = ServerDescription(
server_address, IsMaster(ismaster_response), 0)
topology.on_change(server_description)
def disconnected(topology, server_address):
# Create new description of server type Unknown.
topology.on_change(ServerDescription(server_address))
def get_type(topology, hostname):
description = topology.get_server_by_address((hostname, 27017)).description
return description.server_type
class TopologyTest(unittest.TestCase):
"""Disables periodic monitoring, to make tests deterministic."""
def setUp(self):
super(TopologyTest, self).setUp()
self.client_knobs = client_knobs(heartbeat_frequency=999999)
self.client_knobs.enable()
self.addCleanup(self.client_knobs.disable)
# Use assertRaisesRegex if available, otherwise use Python 2.7's
# deprecated assertRaisesRegexp, with a 'p'.
if not hasattr(unittest.TestCase, 'assertRaisesRegex'):
TopologyTest.assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
class TestTopologyConfiguration(TopologyTest):
def test_timeout_configuration(self):
pool_options = PoolOptions(connect_timeout=1, socket_timeout=2)
topology_settings = TopologySettings(pool_options=pool_options)
t = Topology(topology_settings=topology_settings)
t.open()
# Get the default server.
server = t.get_server_by_address(('localhost', 27017))
# The pool for application operations obeys our settings.
self.assertEqual(1, server._pool.opts.connect_timeout)
self.assertEqual(2, server._pool.opts.socket_timeout)
# The pool for monitoring operations uses our connect_timeout as both
# its connect_timeout and its socket_timeout.
monitor = server._monitor
self.assertEqual(1, monitor._pool.opts.connect_timeout)
self.assertEqual(1, monitor._pool.opts.socket_timeout)
# The monitor, not its pool, is responsible for calling ismaster.
self.assertFalse(monitor._pool.handshake)
class TestSingleServerTopology(TopologyTest):
def test_direct_connection(self):
for server_type, ismaster_response in [
(SERVER_TYPE.RSPrimary, {
'ok': 1,
'ismaster': True,
'hosts': ['a'],
'setName': 'rs'}),
(SERVER_TYPE.RSSecondary, {
'ok': 1,
'ismaster': False,
'secondary': True,
'hosts': ['a'],
'setName': 'rs'}),
(SERVER_TYPE.Mongos, {
'ok': 1,
'ismaster': True,
'msg': 'isdbgrid'}),
(SERVER_TYPE.RSArbiter, {
'ok': 1,
'ismaster': False,
'arbiterOnly': True,
'hosts': ['a'],
'setName': 'rs'}),
(SERVER_TYPE.Standalone, {
'ok': 1,
'ismaster': True}),
# Slave.
(SERVER_TYPE.Standalone, {
'ok': 1,
'ismaster': False}),
]:
t = create_mock_topology()
# Can't select a server while the only server is of type Unknown.
with self.assertRaisesRegex(ConnectionFailure,
'No servers found yet'):
t.select_servers(any_server_selector,
server_selection_timeout=0)
got_ismaster(t, address, ismaster_response)
# Topology type never changes.
self.assertEqual(TOPOLOGY_TYPE.Single, t.description.topology_type)
# No matter whether the server is writable,
# select_servers() returns it.
s = t.select_server(writable_server_selector)
self.assertEqual(server_type, s.description.server_type)
def test_reopen(self):
t = create_mock_topology()
# Additional calls are permitted.
t.open()
t.open()
def test_unavailable_seed(self):
t = create_mock_topology()
disconnected(t, address)
self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a'))
def test_round_trip_time(self):
round_trip_time = 125
available = True
class TestMonitor(Monitor):
def _check_with_socket(self, sock_info):
if available:
return IsMaster({'ok': 1}), round_trip_time
else:
raise AutoReconnect('mock monitor error')
t = create_mock_topology(monitor_class=TestMonitor)
s = t.select_server(writable_server_selector)
self.assertEqual(125, s.description.round_trip_time)
round_trip_time = 25
t.request_check_all()
# Exponential weighted average: .8 * 125 + .2 * 25 = 105.
self.assertAlmostEqual(105, s.description.round_trip_time)
# The server is temporarily down.
available = False
t.request_check_all()
def raises_err():
try:
t.select_server(writable_server_selector,
server_selection_timeout=0.1)
except ConnectionFailure:
return True
else:
return False
wait_until(raises_err, 'discover server is down')
self.assertIsNone(s.description.round_trip_time)
# Bring it back, RTT is now 20 milliseconds.
available = True
round_trip_time = 20
def new_average():
# We reset the average to the most recent measurement.
description = s.description
return (description.round_trip_time is not None
and round(abs(20 - description.round_trip_time), 7) == 0)
tries = 0
while not new_average():
t.request_check_all()
tries += 1
if tries > 10:
self.fail("Didn't ever calculate correct new average")
class TestMultiServerTopology(TopologyTest):
def test_close(self):
t = create_mock_topology(replica_set_name='rs')
got_ismaster(t, ('a', 27017), {
'ok': 1,
'ismaster': True,
'setName': 'rs',
'hosts': ['a', 'b']})
got_ismaster(t, ('b', 27017), {
'ok': 1,
'ismaster': False,
'secondary': True,
'setName': 'rs',
'hosts': ['a', 'b']})
self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, 'a'))
self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, 'b'))
self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary,
t.description.topology_type)
t.close()
self.assertEqual(2, len(t.description.server_descriptions()))
self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a'))
self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'b'))
self.assertEqual('rs', t.description.replica_set_name)
self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary,
t.description.topology_type)
got_ismaster(t, ('a', 27017), {
'ok': 1,
'ismaster': True,
'setName': 'rs',
'hosts': ['a', 'b']})
self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, 'a'))
self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'b'))
self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary,
t.description.topology_type)
def test_reset_server(self):
t = create_mock_topology(replica_set_name='rs')
got_ismaster(t, ('a', 27017), {
'ok': 1,
'ismaster': True,
'setName': 'rs',
'hosts': ['a', 'b']})
got_ismaster(t, ('b', 27017), {
'ok': 1,
'ismaster': False,
'secondary': True,
'setName': 'rs',
'hosts': ['a', 'b']})
t.reset_server(('a', 27017))
self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'a'))
self.assertEqual(SERVER_TYPE.RSSecondary, get_type(t, 'b'))
self.assertEqual('rs', t.description.replica_set_name)
self.assertEqual(TOPOLOGY_TYPE.ReplicaSetNoPrimary,
t.description.topology_type)
got_ismaster(t, ('a', 27017), {
'ok': 1,
'ismaster': True,
'setName': 'rs',
'hosts': ['a', 'b']})
self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, 'a'))
self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary,
t.description.topology_type)
t.reset_server(('b', 27017))
self.assertEqual(SERVER_TYPE.RSPrimary, get_type(t, 'a'))
self.assertEqual(SERVER_TYPE.Unknown, get_type(t, 'b'))
self.assertEqual('rs', t.description.replica_set_name)
self.assertEqual(TOPOLOGY_TYPE.ReplicaSetWithPrimary,
t.description.topology_type)
def test_reset_removed_server(self):
t = create_mock_topology(replica_set_name='rs')
# No error resetting a server not in the TopologyDescription.
t.reset_server(('b', 27017))
# Server was *not* added as type Unknown.
self.assertFalse(t.has_server(('b', 27017)))
def test_discover_set_name_from_primary(self):
# Discovering a replica set without the setName supplied by the user
# is not yet supported by MongoClient, but Topology can do it.
topology_settings = SetNameDiscoverySettings(
seeds=[address],
pool_class=MockPool,
monitor_class=MockMonitor)
t = Topology(topology_settings)
self.assertEqual(t.description.replica_set_name, None)
self.assertEqual(t.description.topology_type,
TOPOLOGY_TYPE.ReplicaSetNoPrimary)
got_ismaster(t, address, {
'ok': 1,
'ismaster': True,
'setName': 'rs',
'hosts': ['a']})
self.assertEqual(t.description.replica_set_name, 'rs')
self.assertEqual(t.description.topology_type,
TOPOLOGY_TYPE.ReplicaSetWithPrimary)
# Another response from the primary. Tests the code that processes
# primary response when topology type is already ReplicaSetWithPrimary.
got_ismaster(t, address, {
'ok': 1,
'ismaster': True,
'setName': 'rs',
'hosts': ['a']})
# No change.
self.assertEqual(t.description.replica_set_name, 'rs')
self.assertEqual(t.description.topology_type,
TOPOLOGY_TYPE.ReplicaSetWithPrimary)
def test_discover_set_name_from_secondary(self):
# Discovering a replica set without the setName supplied by the user
# is not yet supported by MongoClient, but Topology can do it.
topology_settings = SetNameDiscoverySettings(
seeds=[address],
pool_class=MockPool,
monitor_class=MockMonitor)
t = Topology(topology_settings)
self.assertEqual(t.description.replica_set_name, None)
self.assertEqual(t.description.topology_type,
TOPOLOGY_TYPE.ReplicaSetNoPrimary)
got_ismaster(t, address, {
'ok': 1,
'ismaster': False,
'secondary': True,
'setName': 'rs',
'hosts': ['a']})
self.assertEqual(t.description.replica_set_name, 'rs')
self.assertEqual(t.description.topology_type,
TOPOLOGY_TYPE.ReplicaSetNoPrimary)
def test_wire_version(self):
t = create_mock_topology(replica_set_name='rs')
t.description.check_compatible() # No error.
got_ismaster(t, address, {
'ok': 1,
'ismaster': True,
'setName': 'rs',
'hosts': ['a']})
# Use defaults.
server = t.get_server_by_address(address)
self.assertEqual(server.description.min_wire_version, 0)
self.assertEqual(server.description.max_wire_version, 0)
got_ismaster(t, address, {
'ok': 1,
'ismaster': True,
'setName': 'rs',
'hosts': ['a'],
'minWireVersion': 1,
'maxWireVersion': 5})
self.assertEqual(server.description.min_wire_version, 1)
self.assertEqual(server.description.max_wire_version, 5)
# Incompatible.
got_ismaster(t, address, {
'ok': 1,
'ismaster': True,
'setName': 'rs',
'hosts': ['a'],
'minWireVersion': 11,
'maxWireVersion': 12})
try:
t.select_servers(any_server_selector)
except ConfigurationError as e:
# Error message should say which server failed and why.
self.assertTrue('a:27017' in str(e))
self.assertTrue('wire protocol versions 11 through 12' in str(e))
else:
self.fail('No error with incompatible wire version')
def test_max_write_batch_size(self):
t = create_mock_topology(seeds=['a', 'b'], replica_set_name='rs')
def write_batch_size():
s = t.select_server(writable_server_selector)
return s.description.max_write_batch_size
got_ismaster(t, ('a', 27017), {
'ok': 1,
'ismaster': True,
'setName': 'rs',
'hosts': ['a', 'b'],
'maxWriteBatchSize': 1})
got_ismaster(t, ('b', 27017), {
'ok': 1,
'ismaster': False,
'secondary': True,
'setName': 'rs',
'hosts': ['a', 'b'],
'maxWriteBatchSize': 2})
# Uses primary's max batch size.
self.assertEqual(1, write_batch_size())
# b becomes primary.
got_ismaster(t, ('b', 27017), {
'ok': 1,
'ismaster': True,
'setName': 'rs',
'hosts': ['a', 'b'],
'maxWriteBatchSize': 2})
self.assertEqual(2, write_batch_size())
def wait_for_master(topology):
"""Wait for a Topology to discover a writable server.
If the monitor is currently calling ismaster, a blocking call to
select_server from this thread can trigger a spurious wake of the monitor
thread. In applications this is harmless but it would break some tests,
so we pass server_selection_timeout=0 and poll instead.
"""
def get_master():
try:
return topology.select_server(writable_server_selector, 0)
except ConnectionFailure:
return None
return wait_until(get_master, 'find master')
class TestTopologyErrors(TopologyTest):
# Errors when calling ismaster.
def test_pool_reset(self):
# ismaster succeeds at first, then always raises socket error.
ismaster_count = [0]
class TestMonitor(Monitor):
def _check_with_socket(self, sock_info):
ismaster_count[0] += 1
if ismaster_count[0] == 1:
return IsMaster({'ok': 1}), 0
else:
raise AutoReconnect('mock monitor error')
t = create_mock_topology(monitor_class=TestMonitor)
server = wait_for_master(t)
self.assertEqual(1, ismaster_count[0])
pool_id = server.pool.pool_id
# Pool is reset by ismaster failure.
t.request_check_all()
self.assertNotEqual(pool_id, server.pool.pool_id)
def test_ismaster_retry(self):
# ismaster succeeds at first, then raises socket error, then succeeds.
ismaster_count = [0]
class TestMonitor(Monitor):
def _check_with_socket(self, sock_info):
ismaster_count[0] += 1
if ismaster_count[0] in (1, 3):
return IsMaster({'ok': 1}), 0
else:
raise AutoReconnect('mock monitor error')
t = create_mock_topology(monitor_class=TestMonitor)
server = wait_for_master(t)
self.assertEqual(1, ismaster_count[0])
self.assertEqual(SERVER_TYPE.Standalone,
server.description.server_type)
# Second ismaster call, then immediately the third.
t.request_check_all()
self.assertEqual(3, ismaster_count[0])
self.assertEqual(SERVER_TYPE.Standalone, get_type(t, 'a'))
def test_internal_monitor_error(self):
exception = AssertionError('internal error')
class TestMonitor(Monitor):
def _check_with_socket(self, sock_info):
raise exception
t = create_mock_topology(monitor_class=TestMonitor)
with self.assertRaisesRegex(ConnectionFailure, 'internal error'):
t.select_server(any_server_selector,
server_selection_timeout=0.5)
class TestServerSelectionErrors(TopologyTest):
def assertMessage(self, message, topology, selector=any_server_selector):
with self.assertRaises(ConnectionFailure) as context:
topology.select_server(selector, server_selection_timeout=0)
self.assertEqual(message, str(context.exception))
def test_no_primary(self):
t = create_mock_topology(replica_set_name='rs')
got_ismaster(t, address, {
'ok': 1,
'ismaster': False,
'secondary': True,
'setName': 'rs',
'hosts': ['a']})
self.assertMessage('No replica set members match selector "Primary()"',
t, ReadPreference.PRIMARY)
self.assertMessage('No primary available for writes',
t, writable_server_selector)
def test_no_secondary(self):
t = create_mock_topology(replica_set_name='rs')
got_ismaster(t, address, {
'ok': 1,
'ismaster': True,
'setName': 'rs',
'hosts': ['a']})
self.assertMessage(
'No replica set members match selector'
' "Secondary(tag_sets=None)"',
t, ReadPreference.SECONDARY)
self.assertMessage(
"No replica set members match selector"
" \"Secondary(tag_sets=[{'dc': 'ny'}])\"",
t, Secondary(tag_sets=[{'dc': 'ny'}]))
def test_bad_replica_set_name(self):
t = create_mock_topology(replica_set_name='rs')
got_ismaster(t, address, {
'ok': 1,
'ismaster': False,
'secondary': True,
'setName': 'wrong',
'hosts': ['a']})
self.assertMessage(
'No replica set members available for replica set name "rs"', t)
def test_multiple_standalones(self):
# Standalones are removed from a topology with multiple seeds.
t = create_mock_topology(seeds=['a', 'b'])
got_ismaster(t, ('a', 27017), {'ok': 1})
got_ismaster(t, ('b', 27017), {'ok': 1})
self.assertMessage('No servers available', t)
def test_no_mongoses(self):
# Standalones are removed from a topology with multiple seeds.
t = create_mock_topology(seeds=['a', 'b'])
# Discover a mongos and change topology type to Sharded.
got_ismaster(t, ('a', 27017), {'ok': 1, 'msg': 'isdbgrid'})
# Oops, both servers are standalone now. Remove them.
got_ismaster(t, ('a', 27017), {'ok': 1})
got_ismaster(t, ('b', 27017), {'ok': 1})
self.assertMessage('No mongoses available', t)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
OpenDeployment/chef-repo | cookbooks/python/src/11/adding_ssl_to_network_servers/echoserv.py | 5 | 1081 | from socket import socket, AF_INET, SOCK_STREAM
from socket import SOL_SOCKET, SO_REUSEADDR
import ssl
KEYFILE = 'server_key.pem' # Private key of the server
CERTFILE = 'server_cert.pem' # Server certificate (given to client)
def echo_client(s):
while True:
data = s.recv(8192)
if data == b'':
break
s.send(data)
s.close()
print('Connection closed')
def echo_server(address):
s = socket(AF_INET, SOCK_STREAM)
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s.bind(address)
s.listen(1)
# Wrap with an SSL layer requiring client certs
s_ssl = ssl.wrap_socket(s,
keyfile=KEYFILE,
certfile=CERTFILE,
server_side=True
)
# Wait for connections
while True:
try:
c,a = s_ssl.accept()
print('Got connection', c, a)
echo_client(c)
except Exception as e:
print('{}: {}'.format(e.__class__.__name__, e))
echo_server(('', 20000))
| apache-2.0 |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/lib2to3/fixes/fix_raise.py | 203 | 2926 | """Fixer for 'raise E, V, T'
raise -> raise
raise E -> raise E
raise E, V -> raise E(V)
raise E, V, T -> raise E(V).with_traceback(T)
raise E, None, T -> raise E.with_traceback(T)
raise (((E, E'), E''), E'''), V -> raise E(V)
raise "foo", V, T -> warns about string exceptions
CAVEATS:
1) "raise E, V" will be incorrectly translated if V is an exception
instance. The correct Python 3 idiom is
raise E from V
but since we can't detect instance-hood by syntax alone and since
any client code would have to be changed as well, we don't automate
this.
"""
# Author: Collin Winter
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, Attr, ArgList, is_tuple
class FixRaise(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] >
"""
def transform(self, node, results):
syms = self.syms
exc = results["exc"].clone()
if exc.type == token.STRING:
msg = "Python 3 does not support string exceptions"
self.cannot_convert(node, msg)
return
# Python 2 supports
# raise ((((E1, E2), E3), E4), E5), V
# as a synonym for
# raise E1, V
# Since Python 3 will not support this, we recurse down any tuple
# literals, always taking the first element.
if is_tuple(exc):
while is_tuple(exc):
# exc.children[1:-1] is the unparenthesized tuple
# exc.children[1].children[0] is the first element of the tuple
exc = exc.children[1].children[0].clone()
exc.prefix = " "
if "val" not in results:
# One-argument raise
new = pytree.Node(syms.raise_stmt, [Name("raise"), exc])
new.prefix = node.prefix
return new
val = results["val"].clone()
if is_tuple(val):
args = [c.clone() for c in val.children[1:-1]]
else:
val.prefix = ""
args = [val]
if "tb" in results:
tb = results["tb"].clone()
tb.prefix = ""
e = exc
# If there's a traceback and None is passed as the value, then don't
# add a call, since the user probably just wants to add a
# traceback. See issue #9661.
if val.type != token.NAME or val.value != "None":
e = Call(exc, args)
with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
new = pytree.Node(syms.simple_stmt, [Name("raise")] + with_tb)
new.prefix = node.prefix
return new
else:
return pytree.Node(syms.raise_stmt,
[Name("raise"), Call(exc, args)],
prefix=node.prefix)
| gpl-2.0 |
domain51/d51.django.apps.sharing | d51/django/apps/sharing/providers/__init__.py | 1 | 1052 | from d51.django.apps.sharing.utils import load_target_from_setting_with_key as load_target
from d51.django.apps.sharing.exceptions import ProviderNotImplemented
from django.conf import settings as django_settings
SHARING_PREFERRED_PROVIDER_KEY = 'D51_DJANGO_APPS_SHARING_PREFERRED_PROVIDER'
SHARING_PROVIDERS_SETTINGS_KEY = 'D51_DJANGO_APPS_SHARING_PROVIDERS'
class SharingProviderException(Exception):
pass
class Provider(object):
def __init__(self, name):
self.name = name
def create_alternate(self, from_url):
return from_url.alternates.create(
provider=self.name
)
def fulfill(self, alternate):
alternate.url = alternate.original_url.url
alternate.is_fulfilled = True
alternate.save()
return alternate
def load_provider(provider_name=None, settings=django_settings):
if provider_name is None:
provider_name = getattr(settings, SHARING_PREFERRED_PROVIDER_KEY)
return load_target(SHARING_PROVIDERS_SETTINGS_KEY, provider_name)(provider_name)
| gpl-3.0 |
darina/omim | tools/python/taxi_csv_to_json.py | 8 | 2463 | #!/usr/bin/env python
# coding: utf8
from argparse import ArgumentParser
from collections import defaultdict
import json
import logging
def deserialize_places(src):
lines = src.splitlines()
# Skip header.
lines = lines[1:]
countries = defaultdict(list)
mwms = []
try:
for l in lines:
cells = l.split('\t')
if len(cells) < 5 and not cells[0]:
logging.error("Country cell is empty. Incorrect line: {}".format(cells))
exit()
# Add full country.
if len(cells) < 3:
countries[cells[0]] = []
# Add city of the country.
elif len(cells) < 5:
countries[cells[0]].append(cells[2])
# Add mwm.
elif len(cells) >= 5:
mwms.append(cells[4])
except IndexError as e:
logging.error("The structure of src file is incorrect. Exception: {}".format(e))
exit()
return countries, mwms
def convert(src_path, dst_path):
try:
with open(src_path, "r") as f:
src = f.read()
except (OSError, IOError):
logging.error("Cannot read src file {}".format(src_path))
return
countries, mwms = deserialize_places(src)
# Carcass of the result.
result = {
"enabled": {"countries": [], "mwms": []},
"disabled": {"countries": [], "mwms": []}
}
for country, cities in countries.iteritems():
result["enabled"]["countries"].append({
"id": country,
"cities": cities
})
result["enabled"]["mwms"] = mwms
try:
with open(dst_path, "w") as f:
json.dump(result, f, indent=2, sort_keys=True)
except (OSError, IOError):
logging.error("Cannot write result into dst file {}".format(dst_path))
return
def process_options():
parser = ArgumentParser(description='Load taxi file in csv format and convert it into json')
parser.add_argument("--src", type=str, dest="src", help="Path to csv file", required=True)
parser.add_argument("--dst", type=str, dest="dst", help="Path to json file", required=True)
options = parser.parse_args()
if not options.src or not options.dst:
parser.print_help()
return None
return options
def main():
options = process_options()
if options:
convert(options.src, options.dst)
if __name__ == "__main__":
main()
| apache-2.0 |
Kha/flask-admin | flask_admin/contrib/pymongo/filters.py | 39 | 2662 | import re
from flask_admin.babel import lazy_gettext
from flask_admin.model import filters
from .tools import parse_like_term
class BasePyMongoFilter(filters.BaseFilter):
"""
Base pymongo filter.
"""
def __init__(self, column, name, options=None, data_type=None):
"""
Constructor.
:param column:
Document field name
:param name:
Display name
:param options:
Fixed set of options
:param data_type:
Client data type
"""
super(BasePyMongoFilter, self).__init__(name, options, data_type)
self.column = column
# Common filters
class FilterEqual(BasePyMongoFilter):
def apply(self, query, value):
query.append({self.column: value})
return query
def operation(self):
return lazy_gettext('equals')
class FilterNotEqual(BasePyMongoFilter):
def apply(self, query, value):
query.append({self.column: {'$ne': value}})
return query
def operation(self):
return lazy_gettext('not equal')
class FilterLike(BasePyMongoFilter):
def apply(self, query, value):
regex = parse_like_term(value)
query.append({self.column: {'$regex': regex}})
return query
def operation(self):
return lazy_gettext('contains')
class FilterNotLike(BasePyMongoFilter):
def apply(self, query, value):
regex = parse_like_term(value)
query.append({self.column: {'$not': re.compile(regex)}})
return query
def operation(self):
return lazy_gettext('not contains')
class FilterGreater(BasePyMongoFilter):
def apply(self, query, value):
try:
value = float(value)
except ValueError:
value = 0
query.append({self.column: {'$gt': value}})
return query
def operation(self):
return lazy_gettext('greater than')
class FilterSmaller(BasePyMongoFilter):
def apply(self, query, value):
try:
value = float(value)
except ValueError:
value = 0
query.append({self.column: {'$lt': value}})
return query
def operation(self):
return lazy_gettext('smaller than')
# Customized type filters
class BooleanEqualFilter(FilterEqual, filters.BaseBooleanFilter):
def apply(self, query, value):
query.append({self.column: value == '1'})
return query
class BooleanNotEqualFilter(FilterNotEqual, filters.BaseBooleanFilter):
def apply(self, query, value):
query.append({self.column: value != '1'})
return query
| bsd-3-clause |
sam-tsai/django-old | django/db/models/sql/constants.py | 394 | 1043 | import re
# Valid query types (a dictionary is used for speedy lookups).
QUERY_TERMS = dict([(x, None) for x in (
'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in',
'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year',
'month', 'day', 'week_day', 'isnull', 'search', 'regex', 'iregex',
)])
# Size of each "chunk" for get_iterator calls.
# Larger values are slightly faster at the expense of more storage space.
GET_ITERATOR_CHUNK_SIZE = 100
# Separator used to split filter strings apart.
LOOKUP_SEP = '__'
# Constants to make looking up tuple values clearer.
# Join lists (indexes into the tuples that are values in the alias_map
# dictionary in the Query class).
TABLE_NAME = 0
RHS_ALIAS = 1
JOIN_TYPE = 2
LHS_ALIAS = 3
LHS_JOIN_COL = 4
RHS_JOIN_COL = 5
NULLABLE = 6
# How many results to expect from a cursor.execute call
MULTI = 'multi'
SINGLE = 'single'
ORDER_PATTERN = re.compile(r'\?|[-+]?[.\w]+$')
ORDER_DIR = {
'ASC': ('ASC', 'DESC'),
'DESC': ('DESC', 'ASC')}
| bsd-3-clause |
DR08/mxnet | example/recommenders/matrix_fact.py | 45 | 1993 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import math
import mxnet as mx
import numpy as np
import mxnet.notebook.callback
import logging
logging.basicConfig(level=logging.DEBUG)
def RMSE(label, pred):
ret = 0.0
n = 0.0
pred = pred.flatten()
for i in range(len(label)):
ret += (label[i] - pred[i]) * (label[i] - pred[i])
n += 1.0
return math.sqrt(ret / n)
def train(network, data_pair, num_epoch, learning_rate, optimizer='sgd', opt_args=None, ctx=[mx.gpu(0)]):
np.random.seed(123) # Fix random seed for consistent demos
mx.random.seed(123) # Fix random seed for consistent demos
if not opt_args:
opt_args = {}
if optimizer=='sgd' and (not opt_args):
opt_args['momentum'] = 0.9
model = mx.model.FeedForward(
ctx = ctx,
symbol = network,
num_epoch = num_epoch,
optimizer = optimizer,
learning_rate = learning_rate,
wd = 1e-4,
**opt_args
)
train, test = (data_pair)
lc = mxnet.notebook.callback.LiveLearningCurve('RMSE', 1)
model.fit(X = train,
eval_data = test,
eval_metric = RMSE,
**mxnet.notebook.callback.args_wrapper(lc)
)
return lc
| apache-2.0 |
lifeinoppo/littlefishlet-scode | RES/REF/python_sourcecode/ipython-master/IPython/core/alias.py | 6 | 9984 | # encoding: utf-8
"""
System command aliases.
Authors:
* Fernando Perez
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import re
import sys
from traitlets.config.configurable import Configurable
from IPython.core.error import UsageError
from IPython.utils.py3compat import string_types
from traitlets import List, Instance
from IPython.utils.warn import error
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
# This is used as the pattern for calls to split_user_input.
shell_line_split = re.compile(r'^(\s*)()(\S+)(.*$)')
def default_aliases():
"""Return list of shell aliases to auto-define.
"""
# Note: the aliases defined here should be safe to use on a kernel
# regardless of what frontend it is attached to. Frontends that use a
# kernel in-process can define additional aliases that will only work in
# their case. For example, things like 'less' or 'clear' that manipulate
# the terminal should NOT be declared here, as they will only work if the
# kernel is running inside a true terminal, and not over the network.
if os.name == 'posix':
default_aliases = [('mkdir', 'mkdir'), ('rmdir', 'rmdir'),
('mv', 'mv'), ('rm', 'rm'), ('cp', 'cp'),
('cat', 'cat'),
]
# Useful set of ls aliases. The GNU and BSD options are a little
# different, so we make aliases that provide as similar as possible
# behavior in ipython, by passing the right flags for each platform
if sys.platform.startswith('linux'):
ls_aliases = [('ls', 'ls -F --color'),
# long ls
('ll', 'ls -F -o --color'),
# ls normal files only
('lf', 'ls -F -o --color %l | grep ^-'),
# ls symbolic links
('lk', 'ls -F -o --color %l | grep ^l'),
# directories or links to directories,
('ldir', 'ls -F -o --color %l | grep /$'),
# things which are executable
('lx', 'ls -F -o --color %l | grep ^-..x'),
]
elif sys.platform.startswith('openbsd') or sys.platform.startswith('netbsd'):
# OpenBSD, NetBSD. The ls implementation on these platforms do not support
# the -G switch and lack the ability to use colorized output.
ls_aliases = [('ls', 'ls -F'),
# long ls
('ll', 'ls -F -l'),
# ls normal files only
('lf', 'ls -F -l %l | grep ^-'),
# ls symbolic links
('lk', 'ls -F -l %l | grep ^l'),
# directories or links to directories,
('ldir', 'ls -F -l %l | grep /$'),
# things which are executable
('lx', 'ls -F -l %l | grep ^-..x'),
]
else:
# BSD, OSX, etc.
ls_aliases = [('ls', 'ls -F -G'),
# long ls
('ll', 'ls -F -l -G'),
# ls normal files only
('lf', 'ls -F -l -G %l | grep ^-'),
# ls symbolic links
('lk', 'ls -F -l -G %l | grep ^l'),
# directories or links to directories,
('ldir', 'ls -F -G -l %l | grep /$'),
# things which are executable
('lx', 'ls -F -l -G %l | grep ^-..x'),
]
default_aliases = default_aliases + ls_aliases
elif os.name in ['nt', 'dos']:
default_aliases = [('ls', 'dir /on'),
('ddir', 'dir /ad /on'), ('ldir', 'dir /ad /on'),
('mkdir', 'mkdir'), ('rmdir', 'rmdir'),
('echo', 'echo'), ('ren', 'ren'), ('copy', 'copy'),
]
else:
default_aliases = []
return default_aliases
class AliasError(Exception):
pass
class InvalidAliasError(AliasError):
pass
class Alias(object):
"""Callable object storing the details of one alias.
Instances are registered as magic functions to allow use of aliases.
"""
# Prepare blacklist
blacklist = {'cd','popd','pushd','dhist','alias','unalias'}
def __init__(self, shell, name, cmd):
self.shell = shell
self.name = name
self.cmd = cmd
self.__doc__ = "Alias for `!{}`".format(cmd)
self.nargs = self.validate()
def validate(self):
"""Validate the alias, and return the number of arguments."""
if self.name in self.blacklist:
raise InvalidAliasError("The name %s can't be aliased "
"because it is a keyword or builtin." % self.name)
try:
caller = self.shell.magics_manager.magics['line'][self.name]
except KeyError:
pass
else:
if not isinstance(caller, Alias):
raise InvalidAliasError("The name %s can't be aliased "
"because it is another magic command." % self.name)
if not (isinstance(self.cmd, string_types)):
raise InvalidAliasError("An alias command must be a string, "
"got: %r" % self.cmd)
nargs = self.cmd.count('%s') - self.cmd.count('%%s')
if (nargs > 0) and (self.cmd.find('%l') >= 0):
raise InvalidAliasError('The %s and %l specifiers are mutually '
'exclusive in alias definitions.')
return nargs
def __repr__(self):
return "<alias {} for {!r}>".format(self.name, self.cmd)
def __call__(self, rest=''):
cmd = self.cmd
nargs = self.nargs
# Expand the %l special to be the user's input line
if cmd.find('%l') >= 0:
cmd = cmd.replace('%l', rest)
rest = ''
if nargs==0:
if cmd.find('%%s') >= 1:
cmd = cmd.replace('%%s', '%s')
# Simple, argument-less aliases
cmd = '%s %s' % (cmd, rest)
else:
# Handle aliases with positional arguments
args = rest.split(None, nargs)
if len(args) < nargs:
raise UsageError('Alias <%s> requires %s arguments, %s given.' %
(self.name, nargs, len(args)))
cmd = '%s %s' % (cmd % tuple(args[:nargs]),' '.join(args[nargs:]))
self.shell.system(cmd)
#-----------------------------------------------------------------------------
# Main AliasManager class
#-----------------------------------------------------------------------------
class AliasManager(Configurable):
default_aliases = List(default_aliases(), config=True)
user_aliases = List(default_value=[], config=True)
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
def __init__(self, shell=None, **kwargs):
super(AliasManager, self).__init__(shell=shell, **kwargs)
# For convenient access
self.linemagics = self.shell.magics_manager.magics['line']
self.init_aliases()
def init_aliases(self):
# Load default & user aliases
for name, cmd in self.default_aliases + self.user_aliases:
self.soft_define_alias(name, cmd)
@property
def aliases(self):
return [(n, func.cmd) for (n, func) in self.linemagics.items()
if isinstance(func, Alias)]
def soft_define_alias(self, name, cmd):
"""Define an alias, but don't raise on an AliasError."""
try:
self.define_alias(name, cmd)
except AliasError as e:
error("Invalid alias: %s" % e)
def define_alias(self, name, cmd):
"""Define a new alias after validating it.
This will raise an :exc:`AliasError` if there are validation
problems.
"""
caller = Alias(shell=self.shell, name=name, cmd=cmd)
self.shell.magics_manager.register_function(caller, magic_kind='line',
magic_name=name)
def get_alias(self, name):
"""Return an alias, or None if no alias by that name exists."""
aname = self.linemagics.get(name, None)
return aname if isinstance(aname, Alias) else None
def is_alias(self, name):
"""Return whether or not a given name has been defined as an alias"""
return self.get_alias(name) is not None
def undefine_alias(self, name):
if self.is_alias(name):
del self.linemagics[name]
else:
raise ValueError('%s is not an alias' % name)
def clear_aliases(self):
for name, cmd in self.aliases:
self.undefine_alias(name)
def retrieve_alias(self, name):
"""Retrieve the command to which an alias expands."""
caller = self.get_alias(name)
if caller:
return caller.cmd
else:
raise ValueError('%s is not an alias' % name)
| gpl-2.0 |
psiwczak/openstack | nova/virt/baremetal/nodes.py | 1 | 1318 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 University of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from nova.virt.baremetal import tilera
from nova.virt.baremetal import fake
from nova.openstack.common import cfg
from nova import flags
from nova import exception
FLAGS = flags.FLAGS
baremetal_opts = [
cfg.StrOpt('baremetal_driver',
default='tilera',
help='Bare-metal driver runs on')
]
FLAGS.register_opts(baremetal_opts)
def get_baremetal_nodes():
d = FLAGS.baremetal_driver
if d == 'tilera':
return tilera.get_baremetal_nodes()
elif d == 'fake':
return fake.get_baremetal_nodes()
else:
raise exception.NovaException(_("Unknown baremetal driver %(d)s"))
| apache-2.0 |
roubert/python-phonenumbers | python/phonenumbers/shortdata/region_CZ.py | 3 | 1089 | """Auto-generated file, do not edit by hand. CZ metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_CZ = PhoneMetadata(id='CZ', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2,5}', possible_number_pattern='\\d{3,6}'),
toll_free=PhoneNumberDesc(national_number_pattern='116(?:00[06]|111|123)', possible_number_pattern='\\d{6}', example_number='116000'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='1(?:12|5[058])', possible_number_pattern='\\d{3}', example_number='112'),
short_code=PhoneNumberDesc(national_number_pattern='1(?:1(?:2|6\\d{3}|8\\d)|2\\d{2,3}|3\\d{3,4}|4\\d{3}|5[058]|99)', possible_number_pattern='\\d{3,6}', example_number='116123'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
carrier_specific=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
short_data=True)
| apache-2.0 |
feketemihai/l10n-romania | l10n_ro_account_bank_statement/wizard/account_statement_from_invoice.py | 2 | 3259 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_statement_from_invoice_lines(osv.osv_memory):
"""
Generate Entries by Statement from Invoices
"""
_inherit = "account.statement.from.invoice.lines"
def populate_statement(self, cr, uid, ids, context=None):
context = dict(context or {})
statement_id = context.get('statement_id', False)
if not statement_id:
return {'type': 'ir.actions.act_window_close'}
data = self.read(cr, uid, ids, context=context)[0]
line_ids = data['line_ids']
if not line_ids:
return {'type': 'ir.actions.act_window_close'}
line_obj = self.pool.get('account.move.line')
statement_obj = self.pool.get('account.bank.statement')
statement_line_obj = self.pool.get('account.bank.statement.line')
currency_obj = self.pool.get('res.currency')
line_date = time.strftime('%Y-%m-%d')
statement = statement_obj.browse(
cr, uid, statement_id, context=context)
# for each selected move lines
for line in line_obj.browse(cr, uid, line_ids, context=context):
ctx = context.copy()
# take the date for computation of currency => use payment date
ctx['date'] = line_date
amount = 0.0
if line.debit > 0:
amount = line.amount_residual
elif line.credit > 0:
amount = -line.amount_residual
if line.amount_currency:
amount = currency_obj.compute(cr, uid, line.currency_id.id,
statement.currency.id, -line.amount_residual_currency, context=ctx)
context.update({'move_line_ids': [line.id],
'invoice_id': line.invoice.id})
statement_line_obj.create(cr, uid, {
'name': line.name or '?',
'amount': amount,
'partner_id': line.partner_id.id,
'statement_id': statement_id,
'ref': line.ref,
'date': statement.date,
}, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jzoldak/edx-platform | lms/djangoapps/certificates/management/commands/ungenerated_certs.py | 73 | 7484 | """
Management command to find all students that need certificates for
courses that have finished, and put their cert requests on the queue.
"""
import logging
import datetime
from pytz import UTC
from django.core.management.base import BaseCommand, CommandError
from certificates.models import certificate_status_for_student
from certificates.api import generate_user_certificates
from django.contrib.auth.models import User
from optparse import make_option
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.django import modulestore
from certificates.models import CertificateStatuses
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Management command to find all students that need certificates
for courses that have finished and put their cert requests on the queue.
"""
help = """
Find all students that need certificates for courses that have finished and
put their cert requests on the queue.
If --user is given, only grade and certify the requested username.
Use the --noop option to test without actually putting certificates on the
queue to be generated.
"""
option_list = BaseCommand.option_list + (
make_option('-n', '--noop',
action='store_true',
dest='noop',
default=False,
help="Don't add certificate requests to the queue"),
make_option('--insecure',
action='store_true',
dest='insecure',
default=False,
help="Don't use https for the callback url to the LMS, useful in http test environments"),
make_option('-c', '--course',
metavar='COURSE_ID',
dest='course',
default=False,
help='Grade and generate certificates '
'for a specific course'),
make_option('-f', '--force-gen',
metavar='STATUS',
dest='force',
default=False,
help='Will generate new certificates for only those users '
'whose entry in the certificate table matches STATUS. '
'STATUS can be generating, unavailable, deleted, error '
'or notpassing.'),
)
def handle(self, *args, **options):
LOGGER.info(
(
u"Starting to create tasks for ungenerated certificates "
u"with arguments %s and options %s"
),
unicode(args),
unicode(options)
)
# Will only generate a certificate if the current
# status is in the unavailable state, can be set
# to something else with the force flag
if options['force']:
valid_statuses = [getattr(CertificateStatuses, options['force'])]
else:
valid_statuses = [CertificateStatuses.unavailable]
# Print update after this many students
STATUS_INTERVAL = 500
if options['course']:
# try to parse out the course from the serialized form
try:
course = CourseKey.from_string(options['course'])
except InvalidKeyError:
LOGGER.warning(
(
u"Course id %s could not be parsed as a CourseKey; "
u"falling back to SlashSeparatedCourseKey.from_deprecated_string()"
),
options['course']
)
course = SlashSeparatedCourseKey.from_deprecated_string(options['course'])
ended_courses = [course]
else:
raise CommandError("You must specify a course")
for course_key in ended_courses:
# prefetch all chapters/sequentials by saying depth=2
course = modulestore().get_course(course_key, depth=2)
enrolled_students = User.objects.filter(
courseenrollment__course_id=course_key
)
total = enrolled_students.count()
count = 0
start = datetime.datetime.now(UTC)
for student in enrolled_students:
count += 1
if count % STATUS_INTERVAL == 0:
# Print a status update with an approximation of
# how much time is left based on how long the last
# interval took
diff = datetime.datetime.now(UTC) - start
timeleft = diff * (total - count) / STATUS_INTERVAL
hours, remainder = divmod(timeleft.seconds, 3600)
minutes, _seconds = divmod(remainder, 60)
print "{0}/{1} completed ~{2:02}:{3:02}m remaining".format(
count, total, hours, minutes)
start = datetime.datetime.now(UTC)
cert_status = certificate_status_for_student(student, course_key)['status']
LOGGER.info(
(
u"Student %s has certificate status '%s' "
u"in course '%s'"
),
student.id,
cert_status,
unicode(course_key)
)
if cert_status in valid_statuses:
if not options['noop']:
# Add the certificate request to the queue
ret = generate_user_certificates(
student,
course_key,
course=course,
insecure=options['insecure']
)
if ret == 'generating':
LOGGER.info(
(
u"Added a certificate generation task to the XQueue "
u"for student %s in course '%s'. "
u"The new certificate status is '%s'."
),
student.id,
unicode(course_key),
ret
)
else:
LOGGER.info(
(
u"Skipping certificate generation for "
u"student %s in course '%s' "
u"because the noop flag is set."
),
student.id,
unicode(course_key)
)
else:
LOGGER.info(
(
u"Skipped student %s because "
u"certificate status '%s' is not in %s"
),
student.id,
cert_status,
unicode(valid_statuses)
)
LOGGER.info(
(
u"Completed ungenerated certificates command "
u"for course '%s'"
),
unicode(course_key)
)
| agpl-3.0 |
cysnake4713/odoo | addons/resource/faces/task.py | 433 | 126405 | #@+leo-ver=4
#@+node:@file task.py
#@@language python
#@<< Copyright >>
#@+node:<< Copyright >>
############################################################################
# Copyright (C) 2005, 2006, 2007, 2008 by Reithinger GmbH
# mreithinger@web.de
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
#@-node:<< Copyright >>
#@nl
"""
This module contains all classes for project plan objects
"""
#@<< Imports >>
#@+node:<< Imports >>
import pcalendar
import resource
import types
import sys
import datetime
import operator as op
import warnings
import locale
import weakref
import opcode
import new
try:
set
except NameError:
from sets import Set as set
#@-node:<< Imports >>
#@nl
_is_source = True
STRICT = 3
SLOPPY = 2
SMART = 1
#@+others
#@+node:Exceptions
#@+node:class AttributeError
class AttributeError(AttributeError):
#@ << class AttributeError declarations >>
#@+node:<< class AttributeError declarations >>
is_frozen = False
#@-node:<< class AttributeError declarations >>
#@nl
#@-node:class AttributeError
#@+node:class RecursionError
class RecursionError(Exception):
"""This exception is raised in cas of cirular dependencies
within an project"""
#@ << class RecursionError declarations >>
#@+node:<< class RecursionError declarations >>
pass
#@-node:<< class RecursionError declarations >>
#@nl
#@-node:class RecursionError
#@+node:class _IncompleteError
class _IncompleteError(Exception):
"""This exception is raised, when there is not enough
data specified to calculate as task"""
#@ @+others
#@+node:__init__
def __init__(self, *args):
if isinstance(args[0], (basestring)):
Exception.__init__(self, *args)
else:
Exception.__init__(self,
"Not enough data for calculating task, "\
"maybe you have a recursive reference.",
*args)
#@-node:__init__
#@-others
#@-node:class _IncompleteError
#@-node:Exceptions
#@+node:Proxies for self referencing
#@+node:class _MeProxy
class _MeProxy(object):
"""
A Proxy class for the me attribute of tasks in the compile case
"""
#@ << declarations >>
#@+node:<< declarations >>
__slots__ = "task"
#@-node:<< declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self, task):
object.__setattr__(self, "task", task)
#@-node:__init__
#@+node:__getattr__
def __getattr__(self, name):
if self.task._is_frozen:
return getattr(self.task, name)
if name in ("name", "up", "root", "path",
"depth", "index", "calendar",
"children", "resource", "balance"):
return getattr(self.task, name)
value = self.task.__dict__.get(name, _NEVER_USED_)
def make_val(default):
if value is _NEVER_USED_: return default
return value
if name in ("start", "end"):
return self.task._to_start(make_val("1.1.2006"))
if name in ("length", "effort", "duration", "todo", "done",
"buffer", "performed", "performed_effort",
"performed_end", "performed_start",
"performed_work_time" ):
return self.task._to_delta(make_val("0d"))
if name in ("complete", "priority", "efficiency"):
return make_val(0)
if value is _NEVER_USED_:
raise AttributeError("'%s' is not a valid attribute." % (name))
return value
#@-node:__getattr__
#@+node:__setattr__
def __setattr__(self, name, value):
self.task._set_attrib(name, value)
#@-node:__setattr__
#@+node:__iter__
def __iter__(self):
return iter(self.task)
#@nonl
#@-node:__iter__
#@+node:add_attrib
def add_attrib(self, name_or_iter, val=None):
if not isinstance(name_or_iter, str):
for n, v in name_or_iter:
setattr(self, n, v)
else:
setattr(self, name_or_iter, val)
#@-node:add_attrib
#@-others
#@nonl
#@-node:class _MeProxy
#@+node:class _MeProxyRecalc
class _MeProxyRecalc(_MeProxy):
"""
A Proxy class for the me attribute of tasks in the recalc case
"""
#@ @+others
#@+node:__setattr__
def __setattr__(self, name, value):
if self.task._properties.has_key(name):
self.task._set_attrib(name, value)
#@-node:__setattr__
#@-others
#@-node:class _MeProxyRecalc
#@+node:class _MeProxyError
class _MeProxyError(_MeProxy):
#@ << declarations >>
#@+node:<< declarations >>
__slots__ = ("task", "attrib", "exc")
#@-node:<< declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self, task, attrib, exc):
_MeProxy.__init__(self, task)
object.__setattr__(self, "attrib", attrib)
object.__setattr__(self, "exc", exc)
#@-node:__init__
#@+node:__setattr__
def __setattr__(self, name, value):
if name == self.attrib or not self.attrib:
raise self.exc
#@-node:__setattr__
#@-others
#@-node:class _MeProxyError
#@+node:class _MeProxyWarn
class _MeProxyWarn(_MeProxy):
#@ << declarations >>
#@+node:<< declarations >>
__slots__ = ("task", "attrib", "message")
#@-node:<< declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self, task, attrib, message):
_MeProxy.__init__(self, task)
object.__setattr__(self, "attrib", attrib)
object.__setattr__(self, "message", message)
#@-node:__init__
#@+node:__setattr__
def __setattr__(self, name, value):
if name == self.attrib or not self.attrib:
warnings.warn(self.message, RuntimeWarning, 2)
if not self.attrib:
#warn only one time!
object.__setattr__(self, "attrib", 1)
#@-node:__setattr__
#@-others
#@-node:class _MeProxyWarn
#@-node:Proxies for self referencing
#@+node:Task instrumentation
#@+doc
# This section contains code for byte code instrumenting
# the task functions
#@-doc
#@nonl
#@+node:_int_to_arg
def _int_to_arg(value):
return value % 256, value / 256
#@-node:_int_to_arg
#@+node:_correct_labels
def _correct_labels(old_code, new_code):
#@ << localize dot variables >>
#@+node:<< localize dot variables >>
hasjrel = opcode.hasjrel
hasjabs = opcode.hasjabs
HAVE_ARGUMENT = opcode.HAVE_ARGUMENT
#@nonl
#@-node:<< localize dot variables >>
#@nl
#@ << loop initialization >>
#@+node:<< loop initialization >>
labels = {}
old_new_map = {} # map old code offset to new code offset
n = len(old_code)
i = 0
j = 0
#@nonl
#@-node:<< loop initialization >>
#@nl
while i < n:
op = old_code[i]
nop = new_code[j]
old_new_map[i] = j
i = i + 1
j = j + 1
if op >= HAVE_ARGUMENT:
oparg = old_code[i] + old_code[i + 1] * 256
i = i + 2
j = j + 2
if nop != op:
j += 3 # skip the 3 addition opcodes for attrib access
else:
#@ << add label if necessary >>
#@+node:<< add label if necessary >>
label = -1
if op in hasjrel:
label = i + oparg
elif op in hasjabs:
label = oparg
if label >= 0:
labels[i] = label
#@nonl
#@-node:<< add label if necessary >>
#@nl
for offset, label in labels.iteritems():
new_offset = old_new_map[offset]
new_label = old_new_map[label]
op = new_code[new_offset - 3]
#change jump arguments
if op in hasjrel:
jump = _int_to_arg(new_label - new_offset)
new_code[new_offset - 2:new_offset] = jump
elif op in hasjabs:
new_code[new_offset - 2:new_offset] = _int_to_arg(new_label)
#@nonl
#@-node:_correct_labels
#@+node:_instrument
def _instrument(func):
#@ << localize dot variables >>
#@+node:<< localize dot variables >>
opname = opcode.opname
opmap = opcode.opmap
jumps = opcode.hasjrel + opcode.hasjabs
HAVE_ARGUMENT = opcode.HAVE_ARGUMENT
co = func.func_code
local_names = co.co_varnames
all_names = list(co.co_names)
global_names = set()
#@-node:<< localize dot variables >>
#@nl
#@ << define local functions list_to_dict and is_local >>
#@+node:<< define local functions list_to_dict and is_local >>
def list_to_dict(l):
return dict([(t[1], t[0]) for t in enumerate(l)])
def is_local(name):
return name[0] == "_" and name != "__constraint__"
#@nonl
#@-node:<< define local functions list_to_dict and is_local >>
#@nl
#convert code
#@ << loop initialization >>
#@+node:<< loop initialization >>
# all_name_map maps names to the all_names index
# (same like all_names.index())
all_name_map = list_to_dict(all_names)
if not all_name_map.has_key("me"):
all_name_map["me"] = len(all_names)
all_names.append("me")
#<python 2.5>
for ln in local_names:
if not all_name_map.has_key(ln):
all_name_map[ln] = len(all_names)
all_names.append(ln)
#</python 2.5>
new_local_names = filter(is_local, local_names)
new_local_name_map = list_to_dict(new_local_names)
me_arg = _int_to_arg(all_name_map["me"])
old_lnotab = map(ord, co.co_lnotab)
new_lnotab = []
tab_pos = 0
try:
next_tab_point = old_lnotab[0]
except IndexError:
next_tab_point = None
last_tab_point = 0
code = map(ord, co.co_code)
new_code = []
has_labels = False
n = len(code)
i = 0
#@nonl
#@-node:<< loop initialization >>
#@nl
while i < n:
if i == next_tab_point:
#@ << calculate new tab point >>
#@+node:<< calculate new tab point >>
increment = len(new_code) - last_tab_point
new_lnotab.extend((increment, old_lnotab[tab_pos + 1]))
tab_pos += 2
try:
next_tab_point = i + old_lnotab[tab_pos]
last_tab_point = len(new_code)
except IndexError:
next_tab_point = -1
#@nonl
#@-node:<< calculate new tab point >>
#@nl
op = code[i]
i += 1
if op >= HAVE_ARGUMENT:
#@ << calculate argument >>
#@+node:<< calculate argument >>
arg0 = code[i]
arg1 = code[i+1]
oparg = arg0 + arg1 * 256
#@nonl
#@-node:<< calculate argument >>
#@nl
i += 2
if opname[op] == "LOAD_GLOBAL":
global_names.add(oparg)
elif opname[op] == "STORE_FAST":
#@ << change "store fast" to "store attribute" >>
#@+node:<< change "store fast" to "store attribute" >>
name = local_names[oparg]
if not is_local(name):
new_code.append(opmap["LOAD_GLOBAL"])
new_code.extend(me_arg)
op = opmap["STORE_ATTR"]
arg0, arg1 = _int_to_arg(all_name_map[name])
else:
arg0, arg1 = _int_to_arg(new_local_name_map[name])
#@nonl
#@-node:<< change "store fast" to "store attribute" >>
#@nl
elif opname[op] == "LOAD_FAST":
#@ << change "load fast" to "load attribute" >>
#@+node:<< change "load fast" to "load attribute" >>
name = local_names[oparg]
if not is_local(name):
new_code.append(opmap["LOAD_GLOBAL"])
new_code.extend(me_arg)
op = opmap["LOAD_ATTR"]
arg0, arg1 = _int_to_arg(all_name_map[name])
else:
arg0, arg1 = _int_to_arg(new_local_name_map[name])
#@nonl
#@-node:<< change "load fast" to "load attribute" >>
#@nl
elif op in jumps:
has_labels = True
new_code.extend((op, arg0, arg1))
else:
new_code.append(op)
if has_labels:
_correct_labels(code, new_code)
#@ << create new code and function objects and return >>
#@+node:<< create new code and function objects and return >>
new_code = "".join(map(chr, new_code))
new_lnotab = "".join(map(chr, new_lnotab))
new_co = new.code(co.co_argcount,
len(new_local_names),
max(co.co_stacksize, 2),
co.co_flags,
new_code,
co.co_consts,
tuple(all_names),
tuple(new_local_names),
co.co_filename,
co.co_name,
co.co_firstlineno,
new_lnotab,
co.co_freevars,
co.co_cellvars)
func = new.function(new_co,
func.func_globals,
func.func_name,
func.func_defaults,
func.func_closure)
func.global_names = tuple([all_names[index] for index in global_names])
return func
#@nonl
#@-node:<< create new code and function objects and return >>
#@nl
#@nonl
#@-node:_instrument
#@-node:Task instrumentation
#@+node:Wrappers
#@+node:class _Path
class _Path(object):
"""
This class represents an instrumented path, to
a task. If it points to an attribute of a task, it
not only returns the value of the attribute. You can also
find out the source attribute (task and attribute name)
of the value.
"""
#@ @+others
#@+node:__init__
def __init__(self, task, path_str):
self._task = task
self._path_str = path_str
#@-node:__init__
#@+node:__getattr__
def __getattr__(self, name):
new = getattr(self._task, name)
if isinstance(new, Task):
return _Path(new, self._path_str + "." + name)
return _ValueWrapper(new, [(self._task, name)])
#@-node:__getattr__
#@+node:__str__
def __str__(self):
return self._path_str
#@-node:__str__
#@+node:__iter__
def __iter__(self):
return iter(self._task)
#@nonl
#@-node:__iter__
#@-others
#@-node:class _Path
#@+node:_val
#helper functions for _ValueWrapper
#----------------------------------
def _val(val):
if isinstance(val, _ValueWrapper):
return val._value
return val
#@-node:_val
#@+node:_ref
def _ref(val):
if isinstance(val, _ValueWrapper):
return val._ref
return []
#@-node:_ref
#@+node:_sref
def _sref(val, ref):
if isinstance(val, _ValueWrapper):
val._ref = ref
#@nonl
#@-node:_sref
#@+node:_refsum
def _refsum(refs):
return reduce(lambda a, b: a + b, refs, [])
#@nonl
#@-node:_refsum
#@+node:class _ValueWrapper
class _ValueWrapper(object):
"""
This class represents a value, of a task attribute or
a return value of a task method. It contains the value,
and the supplier of that value
"""
#@ @+others
#@+node:__init__
def __init__(self, value, ref):
self._value = value
self._ref = ref
#@-node:__init__
#@+node:unicode
def unicode(self, *args):
if isinstance(self._value, str):
return unicode(self._value, *args)
return unicode(self._value)
#@nonl
#@-node:unicode
#@+node:_vw
def _vw(self, operand, *args):
refs = _refsum(map(_ref, args))
vals = map(_val, args)
result = operand(*vals)
return self.__class__(result, refs)
#@-node:_vw
#@+node:_cmp
def _cmp(self, operand, *args):
refs = _refsum(map(_ref, args))
vals = map(_val, args)
result = operand(*vals)
map(lambda a: _sref(a, refs), args)
return result
#@-node:_cmp
#@+node:__getattr__
def __getattr__(self, name):
return getattr(self._value, name)
#@-node:__getattr__
#@+node:__getitem__
def __getitem__(self, slice):
return self.__class__(self._value[slice], self._ref)
#@nonl
#@-node:__getitem__
#@+node:__str__
def __str__(self): return str(self._value)
#@-node:__str__
#@+node:__unicode__
def __unicode__(self): return unicode(self._value)
#@nonl
#@-node:__unicode__
#@+node:__repr__
def __repr__(self): return repr(self._value)
#@-node:__repr__
#@+node:__nonzero__
def __nonzero__(self): return bool(self._value)
#@-node:__nonzero__
#@+node:__lt__
def __lt__(self, other): return self._cmp(op.lt, self, other)
#@-node:__lt__
#@+node:__le__
def __le__(self, other): return self._cmp(op.le, self, other)
#@-node:__le__
#@+node:__eq__
def __eq__(self, other): return self._cmp(op.eq, self, other)
#@-node:__eq__
#@+node:__ne__
def __ne__(self, other): return self._cmp(op.ne, self, other)
#@-node:__ne__
#@+node:__gt__
def __gt__(self, other): return self._cmp(op.gt, self, other)
#@-node:__gt__
#@+node:__ge__
def __ge__(self, other): return self._cmp(op.ge, self, other)
#@-node:__ge__
#@+node:__add__
def __add__(self, other): return self._vw(op.add, self, other)
#@nonl
#@-node:__add__
#@+node:__sub__
def __sub__(self, other): return self._vw(op.sub, self, other)
#@-node:__sub__
#@+node:__mul__
def __mul__(self, other): return self._vw(op.mul, self, other)
#@-node:__mul__
#@+node:__floordiv__
def __floordiv__(self, other): return self._vw(op.floordiv, self, other)
#@-node:__floordiv__
#@+node:__mod__
def __mod__(self, other): return self._vw(op.mod, self, other)
#@-node:__mod__
#@+node:__divmod__
def __divmod__(self, other): return self._vw(op.divmod, self, other)
#@-node:__divmod__
#@+node:__pow__
def __pow__(self, other): return self._vw(op.pow, self, other)
#@-node:__pow__
#@+node:__lshift__
def __lshift__(self, other): return self._vw(op.lshift, self, other)
#@-node:__lshift__
#@+node:__rshift__
def __rshift__(self, other): return self._vw(op.rshift, self, other)
#@-node:__rshift__
#@+node:__and__
def __and__(self, other): return self._vw(op.and_, self, other)
#@-node:__and__
#@+node:__xor__
def __xor__(self, other): return self._vw(op.xor, self, other)
#@-node:__xor__
#@+node:__or__
def __or__(self, other): return self._vw(op.or_, self, other)
#@-node:__or__
#@+node:__div__
def __div__(self, other): return self._vw(op.div, self, other)
#@-node:__div__
#@+node:__radd__
def __radd__(self, other): return self._vw(op.add, other, self)
#@-node:__radd__
#@+node:__rsub__
def __rsub__(self, other): return self._vw(op.sub, other, self)
#@-node:__rsub__
#@+node:__rmul__
def __rmul__(self, other): return self._vw(op.mul, other, self)
#@-node:__rmul__
#@+node:__rdiv__
def __rdiv__(self, other): return self._vw(op.div, other, self)
#@-node:__rdiv__
#@+node:__rtruediv__
def __rtruediv__(self, other): return self._vw(op.truediv, other, self)
#@-node:__rtruediv__
#@+node:__rfloordiv__
def __rfloordiv__(self, other): return self._vw(op.floordiv, other, self)
#@-node:__rfloordiv__
#@+node:__rmod__
def __rmod__(self, other): return self._vw(op.mod, other, self)
#@-node:__rmod__
#@+node:__rdivmod__
def __rdivmod__(self, other): return self._vw(op.divmod, other, self)
#@-node:__rdivmod__
#@+node:__rpow__
def __rpow__(self, other): return self._vw(op.pow, other, self)
#@-node:__rpow__
#@+node:__rlshift__
def __rlshift__(self, other): return self._vw(op.lshift, other, self)
#@-node:__rlshift__
#@+node:__rrshift__
def __rrshift__(self, other): return self._vw(op.rshift, other, self)
#@-node:__rrshift__
#@+node:__rand__
def __rand__(self, other): return self._vw(op.and_, other, self)
#@-node:__rand__
#@+node:__rxor__
def __rxor__(self, other): return self._vw(op.xor, other, self)
#@-node:__rxor__
#@+node:__ror__
def __ror__(self, other): return self._vw(op.or_, other, self)
#@-node:__ror__
#@+node:__int__
def __int__(self): return int(self._value)
#@-node:__int__
#@+node:__long__
def __long__(self): return long(self._value)
#@-node:__long__
#@+node:__float__
def __float__(self): return float(self._value)
#@-node:__float__
#@+node:__len__
def __len__(self): return len(self._value)
#@-node:__len__
#@+node:__iter__
def __iter__(self): return iter(self._value)
#@-node:__iter__
#@+node:__hash__
def __hash__(self): return hash(self._value)
#@-node:__hash__
#@-others
#@-node:class _ValueWrapper
#@-node:Wrappers
#@+node:Utilities
#@+node:class _NEVER_USED_
class _NEVER_USED_:
pass
#@-node:class _NEVER_USED_
#@+node:class _StringConverter
class _StringConverter(object):
"""This class is a helper for the to_string mechanism
of tasks"""
#@ @+others
#@+node:__init__
def __init__(self, source, format=None):
self.source = source
self.format = format
#@-node:__init__
#@+node:__getitem__
def __getitem__(self, format):
return _StringConverter(self.source, format)
#@-node:__getitem__
#@+node:__getattr__
def __getattr__(self, name):
class StrWrapper(object):
def __init__(self, value, name, source, format):
self._value = value
self.name = name
self.source = source
self.format = format
def __call__(self, arg):
formatter = self.source.formatter(self.name,
arg,
self.format)
return formatter(self._value(arg))
value = getattr(self.source, name)
if callable(value):
#for methods the wrapper has to
return StrWrapper(value, name, self.source, self.format)
formatter = self.source.formatter(name, format=self.format)
return formatter(value)
#@-node:__getattr__
#@-others
#@-node:class _StringConverter
#@+node:Multi
def Multi(val, **kwargs):
"""returns a directory for mutlivalued attributes"""
return dict(_default=val, **kwargs)
#@nonl
#@-node:Multi
#@+node:create_relative_path
def create_relative_path(from_, to_):
"""
creates a relative path from absolute path
from_ to absolute path to_
"""
from_ = from_.split(".")
to_ = to_.split(".")
for i, parts in enumerate(zip(from_, to_)):
from_part, to_part = parts
if from_part != to_part:
break
from_ = from_[i:]
to_ = to_[i:]
return "up." * len(from_) + ".".join(to_)
#@nonl
#@-node:create_relative_path
#@+node:create_absolute_path
def create_absolute_path(from_, to_):
"""
creates a absolute path from absolute path
from_ to relative path to_
"""
from_ = from_.split(".")
to_ = to_.split(".")
for i, part in enumerate(to_):
if part != "up":
break
from_ = from_[:-i]
to_ = to_[i:]
return "%s.%s" % (".".join(from_), ".".join(to_))
#@-node:create_absolute_path
#@+node:_split_path
def _split_path(path):
try:
index = path.rindex(".")
return path[:index], path[index + 1:]
except:
return path
#@-node:_split_path
#@+node:_to_datetime
_to_datetime = pcalendar.to_datetime
#@nonl
#@-node:_to_datetime
#@+node:_get_tasks_of_sources
def _get_tasks_of_sources(task, attrib_filter="end,start,effort,length,duration"):
#return all source tasks, this task is dependend on
dep_tasks = {}
while task:
for dep in task._sources.values():
for d in dep:
path, attrib = _split_path(d)
if attrib and attrib_filter.find(attrib) >= 0:
dep_tasks[path] = True
task = task.up
return dep_tasks.keys()
#@-node:_get_tasks_of_sources
#@+node:_build_balancing_list
def _build_balancing_list(tasks):
"""
Returns a specialy sorted list of tasks.
If the tasks will allocate resources in the sorting order of that list
correct balancing is ensured
"""
# first sort the list for attributes
index = 0
balancing_list = [(-t.priority, t.balance, index, t) for index, t in enumerate(tasks)]
balancing_list.sort()
#print
#for p, b, i, t in balancing_list:
# print p, b, i, t.path
balancing_list = [ t for p, b, i, t in balancing_list ]
#now correct the presorted list:
#if task a is dependent on task b, b will be moved before a
done_map = { }
count = len(balancing_list)
while len(done_map) < count:
for i in range(count):
to_inspect = balancing_list[i]
if done_map.has_key(to_inspect):
continue
done_map[to_inspect] = True
break
else:
break
#@ << define inspect_depends_on >>
#@+node:<< define inspect_depends_on >>
inspect_path = to_inspect.path + "."
sources = _get_tasks_of_sources(to_inspect)
sources = [ s + "." for s in sources
if not inspect_path.startswith(s) ]
# the if in the later line ignores assignments like
# like start = up.start (i.e. references to parents)
# this will be handled in the second if of inspect_depends_on
# and can cause errors otherwise
def inspect_depends_on(task):
cmp_path = task.path + "."
for src in sources:
if cmp_path.startswith(src):
#task is a source of to_inspect
return True
if inspect_path.startswith(cmp_path):
#to_inspect is a child of task
return True
return False
#@nonl
#@-node:<< define inspect_depends_on >>
#@nl
for j in range(i + 1, count):
check_task = balancing_list[j]
if done_map.has_key(check_task):
continue
if inspect_depends_on(check_task):
del balancing_list[j]
balancing_list.insert(i, check_task)
i += 1 # to_inspect is now at i + 1
return balancing_list
#@-node:_build_balancing_list
#@+node:_as_string
def _as_string(val):
if isinstance(val, basestring):
return '"""%s"""' % val.replace("\n", "\\n")
if isinstance(val, pcalendar._WorkingDateBase):
return '"%s"' % val.strftime("%Y-%m-%d %H:%M")
if isinstance(val, datetime.datetime):
return '"%s"' % val.strftime("%Y-%m-%d %H:%M")
if isinstance(val, datetime.timedelta):
return '"%id %iM"' % (val.days, val.seconds / 60)
if isinstance(val, tuple):
result = map(_as_string, val)
return "(%s)" % ", ".join(result)
if isinstance(val, list):
result = map(_as_string, val)
return "[%s]" % ", ".join(result)
if isinstance(val, resource.Resource):
return val._as_string()
if isinstance(val, Task):
return val.path
return str(val)
#@-node:_as_string
#@+node:_step_tasks
def _step_tasks(task):
if isinstance(task, Task):
yield task
stack = [iter(task.children)]
while stack:
for task in stack[-1]:
yield task
if task.children:
stack.append(iter(task.children))
break
else:
stack.pop()
#@-node:_step_tasks
#@-node:Utilities
#@+node:Cache
instrumentation_cache = {}
balancing_cache = {}
def clear_cache():
instrumentation_cache.clear()
balancing_cache.clear()
#@nonl
#@-node:Cache
#@+node:Resource Allocators
#@+others
#@+node:VariableLoad
def VariableLoad(limit=0):
"""
Allocates the resource with maximal possible load.
If limit is given, a the load is at least limit or more.
"""
try:
balance = me.balance
except NameError:
balance = SLOPPY
if balance != SLOPPY:
raise RuntimeError("You may specify variable_load only with balance=SLOPPY")
return -limit
#@-node:VariableLoad
#@+node:_calc_load
def _calc_load(task, resource):
#changed at the resource instance
load = resource.__dict__.get("load")
if load is not None: return load
load = task.__dict__.get("load")
if load is not None: return load
#inherited by the task
return min(task.load, task.max_load, resource.max_load or 100.0)
#@-node:_calc_load
#@+node:_calc_maxload
def _calc_maxload(task, resource):
#changed at the resource instance
max_load = resource.__dict__.get("max_load")
if max_load: return max_load
#an explicit load can overwrite max_load
load = max(resource.__dict__.get("load", 0),
task.__dict__.get("load"), 0)
#change at the task
max_load = task.__dict__.get("max_load")
if max_load: return max(max_load, load)
#inherited by the resource
max_load = resource.max_load
if max_load: return max(max_load, load)
#inherited by the task
return max(task.max_load, load)
#@-node:_calc_maxload
#@+node:class AllocationAlgorithm
class AllocationAlgorithm(object):
"""This class is a base for resource allocation algorithms"""
#@ @+others
#@+node:test_allocation
def test_allocation(self, task, resource):
"""This method simulates the allocation of a specific resource.
It returns a list of values representing the state of the allocation.
The task allocator calls test_allocation for every alternative resource.
It compares the first items of all return lists, and allocates the
resource with the minum first item value"""
return (task.end, )
#@-node:test_allocation
#@+node:allocate
def allocate(self, task, state):
"""This method eventually allocates a specific resource.
State is the return list of test_allocation"""
pass
#@-node:allocate
#@-others
#@-node:class AllocationAlgorithm
#@+node:class StrictAllocator
class StrictAllocator(AllocationAlgorithm):
"""This class implements the STRICT resource allocation"""
#@ @+others
#@+node:_distribute_len_loads
def _distribute_len_loads(self, task, resource, effort, length):
# A special load calculation, if effort and length are given.
# and the resources have a defined maxload, the load must be
# individually calculated for each resource.
# Formulars: r=resources, t=task
# effort = length * efficiency(t) * sum[load(r) * effiency(r)]
# ==> sum_load = sum[load(r) * effiency(r)]
# = effort / (length * efficiency(t))
#
sum_load = float(effort) / (task.efficiency * length)
# algorithm:
# The goal is to distribute the load (norm_load) equally
# to all resources. If a resource has a max_load(r) < norm_load
# the load of this resource will be max_load(r), and the other
# resources will have another (higher) norm_load
max_loads = map(lambda r: (_calc_maxload(task, r), r), resource)
max_loads.sort()
efficiency_sum = sum(map(lambda r: r.efficiency, resource))
norm_load = sum_load / efficiency_sum
loads = {}
for max_load, r in max_loads[:-1]:
if max_load < norm_load:
loads[r] = max_load
efficiency_sum -= r.efficiency
sum_load -= max_load * r.efficiency
norm_load = sum_load / efficiency_sum
else:
loads[r] = norm_load
max_load, r = max_loads[-1]
loads[r] = norm_load
return loads
#@-node:_distribute_len_loads
#@+node:test_allocation
def test_allocation(self, task, resource):
effort = task.__dict__.get("effort")
to_start = task._to_start
to_end = task._to_end
to_delta = task._to_delta
if task.performed_end:
start = to_start(max(task.performed_end,
task.root.calendar.now,
task.start))
else:
start = task.start
if task.root.has_actual_data and task.complete == 0:
start = max(start, to_start(task.root.calendar.now))
base_start = to_start(task.performed_start or task.start)
calc_load = lambda r: _calc_load(task, r)
loads = map(lambda r: (r, calc_load(r)), resource)
length = task.__dict__.get("length")
duration = task.__dict__.get("duration")
end = task.__dict__.get("end")
#@ << correct length >>
#@+node:<< correct length >>
if length is not None:
length = to_delta(max(length - (task.start - base_start), 0))
#@nonl
#@-node:<< correct length >>
#@nl
#@ << correct duration >>
#@+node:<< correct duration >>
if duration is not None:
delta = task.start.to_datetime() - base_start.to_datetime()
delta = to_delta(delta, True)
duration = to_delta(max(duration - delta, 0), True)
#@nonl
#@-node:<< correct duration >>
#@nl
#@ << check end >>
#@+node:<< check end >>
if end is not None:
length = end - start
if length <= 0: return False
#@nonl
#@-node:<< check end >>
#@nl
#@ << correct effort and (re)calculate length >>
#@+node:<< correct effort and (re)calculate length >>
if effort is not None:
effort -= task.performed_effort
effort = to_delta(max(effort, 0))
if effort <= 0: return False
if length is not None:
#if length and effort is set, the load will be calculated
length = length or task.calendar.minimum_time_unit
loads = self._distribute_len_loads(task, resource,
effort, length)
def calc_load(res):
return loads[res]
else:
#the length depends on the count of resources
factor = sum(map(lambda a: a[0].efficiency * a[1],
loads)) * task.efficiency
length = effort / factor
#@nonl
#@-node:<< correct effort and (re)calculate length >>
#@nl
#@ << set adjust_date and delta >>
#@+node:<< set adjust_date and delta >>
if length is not None:
adjust_date = lambda date: date
delta = to_delta(length).round()
else:
assert(duration is not None)
adjust_date = _to_datetime
delta = datetime.timedelta(minutes=duration)
#@nonl
#@-node:<< set adjust_date and delta >>
#@nl
# find the earliest start date
start, book_load\
= self.balance(task, start, delta, adjust_date,
calc_load, resource)
end = to_end(start + delta)
start = to_start(start)
if effort is None:
#length is frozen ==> a new effort will be calculated
factor = sum(map(lambda a: a[1], loads))
length = end - start
effort = to_delta(length * factor\
+ task.performed_effort).round()
return (end, book_load), resource, calc_load, start, effort
#@-node:test_allocation
#@+node:allocate
def allocate(self, task, state):
# now really book the resource
end_bl, resource, calc_load, start, effort = state
end = end_bl[0]
cal = task.root.calendar
to_start = task._to_start
to_end = task._to_end
to_delta = task._to_delta
task.start = task.performed_start \
and to_start(task.performed_start) \
or to_start(start)
task.end = end
task._unfreeze("length")
task._unfreeze("duration")
length = end - start
for r in resource:
book_load = calc_load(r)
work_time = to_delta(length * book_load).round()
r.book_task(task, start, end, book_load, work_time, False)
#the following lines are important to be exactly at this
#positions in that order:
# done and todo are dependend on:
# - the existence of effort (if effort was set or not set)
# - book_task (they can only be calculated, if the task is booked)
# - booked_resource (to get the booked tasks)
task.booked_resource = resource
task.done = task.done
task.todo = task.todo
task.length = end - task.start
task.effort = to_delta(effort + task.performed_effort)
#@-node:allocate
#@+node:balance
#now effort exists always
def balance(self, task, start, delta, adjust_date,
calc_load, resource):
book_load = max(map(lambda r: r.get_load(task.start, task.scenario), resource))
return start, book_load
#@-node:balance
#@-others
#@-node:class StrictAllocator
#@+node:class SmartAllocator
class SmartAllocator(StrictAllocator):
#@ @+others
#@+node:balance
def balance(self, task, start, delta, adjust_date,
calc_load, resource):
#find the earliest start date, at which all
#resources in the team are free
cal = task.root.calendar
to_start = task._to_start
start = adjust_date(start)
scenario = task.scenario
while True:
#we have finished, when all resources have the
#same next free start date
for r in resource:
max_load = _calc_maxload(task, r)
load = calc_load(r)
#find the next free time of the resource
s = r.find_free_time(start, delta, load, max_load, scenario)
if s != start:
s = to_start(s)
start = adjust_date(s)
break
else:
#only one resource
break
return start, 1.0
#@-node:balance
#@-others
#@-node:class SmartAllocator
#@+node:class SloppyAllocator
class SloppyAllocator(AllocationAlgorithm):
#@ @+others
#@+node:test_allocation
def test_allocation(self, task, resource):
if task.__dict__.has_key("effort"):
return self.test_allocation_effort(task, resource)
return self.test_allocation_length(task, resource)
#@-node:test_allocation
#@+node:test_allocation_length
def test_allocation_length(self, task, resource):
#length is frozen ==> effort will be calculated
to_start = task._to_start
to_end = task._to_end
to_delta = task._to_delta
end = task.end
if task.performed_end:
start = to_start(max(task.performed_end,
task.root.calendar.now,
start))
else:
start = task.start
base_start = to_start(task.performed_start or task.start)
length = to_delta(max(task.length - (start - base_start), 0))
sum_effort = 0
intervals = []
scenario = task.scenario
for r in resource:
date = start
max_load = _calc_maxload(task, r)
book_load = _calc_load(task, r)
while date < end:
#find free time intervals and add them for booking
endi, load = r.end_of_booking_interval(date, task)
endi = min(endi, end)
endi = to_end(endi)
if book_load <= 0:
#variable book_load ==> calc the maxmimal possible book_load >= (the given book_load)
used_book_load = - book_load
diff_load = max_load - load
if diff_load and diff_load >= book_load:
used_book_load = diff_load
else:
used_book_load = max_load
else:
used_book_load = book_load
if max_load - load >= used_book_load:
intervals.append((r, used_book_load, date, endi))
sum_effort = (endi - date) * used_book_load
date = to_start(endi)
return -sum_effort, end, resource, intervals
#@-node:test_allocation_length
#@+node:test_allocation_effort
def test_allocation_effort(self, task, resource):
#effort is frozen ==> length will be calculated
to_start = task._to_start
to_end = task._to_end
to_delta = task._to_delta
intervals = []
effort = task.__dict__.get("effort")
if task.performed_end:
next_date = to_start(max(task.performed_end,
task.root.calendar.now,
task.start))
else:
next_date = task.start
if task.root.has_actual_data and task.complete == 0:
next_date = max(next_date, to_start(task.root.calendar.now))
#walks chronologicly through the booking
#intervals of each resource, and reduces
#the effort for each free interval
#until it becomes 0
alloc_effort = effort
effort -= task.performed_effort
while effort > 0:
date = next_date
interval_resource = []
interval_end = to_start(sys.maxint)
factor = 0
for r in resource:
max_load = _calc_maxload(task, r)
book_load = _calc_load(task, r)
end, load = r.end_of_booking_interval(date, task)
interval_end = to_start(min(end, interval_end))
if book_load <= 0:
#variable book_load ==> calc the maxmimal possible book_load >= (the given book_load)
book_load = - book_load
diff_load = max_load - load
if diff_load and diff_load >= book_load:
book_load = diff_load
else:
book_load = max_load
if book_load + load <= max_load:
resource_factor = book_load * r.efficiency
interval_resource.append((r, book_load, resource_factor))
factor += resource_factor
next_date = interval_end
if factor:
factor *= task.efficiency
length = to_delta(effort / factor).round()
end = date + length
if interval_end >= end:
next_date = interval_end = end
effort = 0
book_end = end
else:
book_end = interval_end
length = book_end - date
minus_effort = length * factor
effort -= minus_effort
book_end = to_end(book_end)
intervals.append((date, book_end, length, interval_resource))
return next_date, alloc_effort, resource, intervals
#@-node:test_allocation_effort
#@+node:allocate
def allocate(self, task, state):
if task.__dict__.has_key("effort"): self.allocate_effort(task, state)
else: self.allocate_length(task, state)
#@-node:allocate
#@+node:allocate_length
def allocate_length(self, task, state):
# now really book the resource
neg_sum_effort, end, resource, intervals = state
cal = task.root.calendar
to_start = task._to_start
to_end = task._to_end
to_delta = task._to_delta
task.start = to_start(task.performed_start or task.start)
task.end = to_end(end)
task._unfreeze("length")
task._unfreeze("duration")
effort = 0
for r, load, s, e in intervals:
work_time = to_delta((e - s) * load).round()
effort += work_time
r.book_task(task, s, e, load, work_time, False)
#see comment at StrictAllocator.allocate
task.booked_resource = resource
task.done = task.done
task.todo = task.todo
task.effort = to_delta(effort + task.performed_effort).round()
#@-node:allocate_length
#@+node:allocate_effort
def allocate_effort(self, task, state):
# now really book the resource
end, effort, resource, intervals = state
to_start = task._to_start
to_end = task._to_end
to_delta = task._to_delta
task.start = task.performed_start \
and to_start(task.performed_start) \
or to_start(intervals[0][0])
task.end = to_end(end)
task._unfreeze("length")
task._unfreeze("duration")
for start, end, length, resources in intervals:
for r, load, factor in resources:
work_time = to_delta(length * load)
r.book_task(task, start, end, load, work_time, False)
task.booked_resource = resource
task.done = task.done
task.todo = task.todo
task.effort = to_delta(effort)
task.length = task.end - task.start
#@-node:allocate_effort
#@-others
#@-node:class SloppyAllocator
#@-others
_smart_allocator = SmartAllocator()
_sloppy_allocator = SloppyAllocator()
_strict_allocator = StrictAllocator()
_allocators = { SMART: _smart_allocator,
SLOPPY: _sloppy_allocator,
STRICT: _strict_allocator }
_allocator_strings = { SMART: "SMART",
SLOPPY: "SLOPPY",
STRICT: "STRICT" }
#@-node:Resource Allocators
#@+node:Load Calculators
#@+node:YearlyMax
def YearlyMax(value):
"""
Calculates a load parameter with a maximal yearly workload
"""
#@ << calculate calendar and time_diff >>
#@+node:<< calculate calendar and time_diff >>
try:
cal = me.calendar
except NameError:
cal = pcalendar._default_calendar
time_diff = cal.Minutes(value)
#@nonl
#@-node:<< calculate calendar and time_diff >>
#@nl
return float(time_diff) / \
(cal.working_days_per_year \
* cal.working_hours_per_day \
* 60)
#@nonl
#@-node:YearlyMax
#@+node:WeeklyMax
def WeeklyMax(value):
"""
Calculates a load parameter with a maximal weekly workload
"""
#@ << calculate calendar and time_diff >>
#@+node:<< calculate calendar and time_diff >>
try:
cal = me.calendar
except NameError:
cal = pcalendar._default_calendar
time_diff = cal.Minutes(value)
#@nonl
#@-node:<< calculate calendar and time_diff >>
#@nl
return float(time_diff) / \
(cal.working_days_per_week \
* cal.working_hours_per_day \
* 60)
#@-node:WeeklyMax
#@+node:MonthlyMax
def MonthlyMax(value):
"""
Calculates a load parameter with a maximal monthly workload
"""
#@ << calculate calendar and time_diff >>
#@+node:<< calculate calendar and time_diff >>
try:
cal = me.calendar
except NameError:
cal = pcalendar._default_calendar
time_diff = cal.Minutes(value)
#@nonl
#@-node:<< calculate calendar and time_diff >>
#@nl
return float(time_diff) / \
(cal.working_days_per_month \
* cal.working_hours_per_day \
* 60)
#@-node:MonthlyMax
#@+node:DailyMax
def DailyMax(value):
"""
Calculates a load parameter with a maximal daily workload
"""
#@ << calculate calendar and time_diff >>
#@+node:<< calculate calendar and time_diff >>
try:
cal = me.calendar
except NameError:
cal = pcalendar._default_calendar
time_diff = cal.Minutes(value)
#@nonl
#@-node:<< calculate calendar and time_diff >>
#@nl
return float(time_diff) / (cal.working_hours_per_day * 60)
#@-node:DailyMax
#@-node:Load Calculators
#@+node:Task
#@+node:class _TaskProperty
class _TaskProperty(object):
#@ @+others
#@+node:__init__
def __init__(self, method):
self.method = method
#@-node:__init__
#@+node:__get__
def __get__(self, instance, owner):
if not instance:
return None
return instance._wrap_attrib(self.method)
#@-node:__get__
#@-others
#@-node:class _TaskProperty
#@+node:class _RoundingTaskProperty
class _RoundingTaskProperty(object):
#@ @+others
#@+node:__init__
def __init__(self, method, name):
self.method = method
self.name = name
#@-node:__init__
#@+node:__get__
def __get__(self, instance, owner):
if not instance:
return None
result = instance._wrap_attrib(self.method).round()
if instance._is_frozen:
#correct the attrib to the rounded value
setattr(instance, self.name, result)
return result
#@-node:__get__
#@-others
#@-node:class _RoundingTaskProperty
#@+node:class Task
class Task(object):
#@ << description >>
#@+node:<< description >>
"""
This class represents a single task in the project tree. A task
can have other child tasks, or is a leaf of the tree. Resources
will be allocated only to leafes. You will never create task
objects by your self, they are created indirectly by Projects.
@var root:
Returns the root project task.
@var up:
Returns the parent task.
@var title:
Specifies an alternative more descriptive name for the task.
@var start:
The start date of the task. Valid values are expressions and
strings specifing a datatime
@var end:
The end date of the task. Valid values are expressions and
strings.
@var effort:
Specifies the effort needed to complete the task. Valid values
are expressions and strings. (Todo: What happens, in case of
specified performance data...)
@var length:
Specifies the time the task occupies the resources. This is
working time, not calendar time. 7d means 7 working days, not one
week. Whether a day is considered a working day or not depends on
the defined working hours and global vacations.
@var duration:
Specifies the time the task occupies the resources. This is
calendar time, not working time. 7d means one week.
@var buffer:
Specifies the time a task can be delayed, without moving dependend
milestones. A Task with a buffer S{<=} 0d is part of the critical
chain. This attribute is readonly.
@var complete:
Specifies what percentage of the task is already completed.
@var todo:
Specifies the effort, which needs to be done to complete a
task. This is another (indirect) way to specify the ME{complete}
attribute.
@var done:
Specifies the work effort, which has been already done. This
attribute is readonly.
@var estimated_effort:
Specifies the estimated_effort given by setting the effort property.
@var performed:
Specifies a list of actual working times performed on the task.
The format is: C{[ (resource, from, to, time), ... ]}
@var performed_work_time:
Specifies the sum of all working times. This attribute is
readonly.
@var performed_effort:
Specifies the complete effort of all working times. This attribute is
readonly.
@var performed_start:
The start date of the performed data.
@var performed_end:
The end date of the performed data.
@var performed_resource:
The resources who have already performed on the task. This attribute is readonly.
@var balance:
Specifies the resource allocation type. Possible values are
CO{STRICT}, CO{SLOPPY}, CO{SMART}.
@var resource:
Specifies the possible resources, that may be allocated for the
task.
@var booked_resource:
Specifies the allocated resources of a task. This attribute is
readonly.
@var load:
Specifies the daily load of a resource for an allocation of the
specified task. A load of 1.0 (default) means the resource is
allocated for as many hours as specified by
ME{working_hours_per_day}. A load of 0.5 means half that many
hours.
@var max_load:
Specify the maximal allowed load sum of all simultaneously
allocated tasks of a resource. A ME{max_load} of 1.0 (default)
means the resource may be fully allocated. A ME{max_load} of 1.3
means the resource may be allocated with 30% overtime.
@var efficiency:
The efficiency of a resource can be used for two purposes. First
you can use it as a crude way to model a team. A team of 5 people
should have an efficiency of 5.0. Keep in mind that you cannot
track the member of the team individually if you use this
feature. The other use is to model performance variations between
your resources.
@var milestone:
Specified if the task is a milestone. The possible values are
C{True} or "later". If the start date of the milestone is not
a valid working date, the milestone will appear at the previous
working date before the given start date. If "later" is specified
the milestone will appear at the next valid working date.
A milestone has always an effort of 0d.
@var priority:
Specifies a priority between 1 and 1000. A task with higher
priority is more likely to get the requested resources. The
default priority is 500.
@var children:
Specifies a list of all subtasks. A task without children is
called a leaf task index{leaf task} otherwise it is called a
parent task index{parent task}. This attribute is readonly.
@var depth:
Specifies the depth of the task within the hierachy. This
attribute is readonly.
@var index:
Specifies a structural index number. This attribute is readonly.
@var path:
Specifies the path.
@var copy_src:
Specifies the path to an other task. When you set this attribute,
all attributes (except of ME{start} and ME{end}) of copy_src will
be copied to the current task. This is usefull if you want to
define the same task, in diffent project definitions. It acts like
a task link.
@var scenario:
The scenario which is currently evaluated. This attribute is readonly.
@var dont_inherit:
A list of attribute names, which will be not inherited by
subtasks.
@var calendar:
Specifies the task calendar.
@var working_days_per_week:
Specifies the days within a working week. This value is used
internally to convert time differences from weeks to days. The
default value is 5 days.
@var working_days_per_month:
Specifies the days within a working month. This value is used
internally to convert time differences from months to days. The
default value is 20 days.
@var working_days_per_year:
Specifies the days within a working year. This value is used
internally to convert time differences from years to days The
default value is 200 days.
@var working_hours_per_day:
Specifies the hours within a working day. This value is used
internally to convert time differences from are entered in days to
hours. The default value is 8 hours.
@var minimum_time_unit:
Specifies the minimum resolution in minutes for the task
scheduling. The default value is 15 minutes.
@var vacation:
Specifies a public vacation for the calendar. This attribute is
specified as a list of date literals or date literal intervals. Be
aware that the end of an interval is excluded, i.e. it is the
first working date.
@var extra_work:
Specifies additional worktime. This attribute is specified as a
list of date literals or date literal intervals. Be aware that the
end of an interval is excluded, i.e. it is the first working date.
@var working_days:
Specifies the weekly working time within calendar. The format of
this attribute is: [ (day_range, time_range, ...), (day_range, time_range, ...), ... ].
day_range is a comma sperated string of week days. Valid values
are mon, tue, wed, thu, fri, sat, sun.
time_range is string specifing a time interval like
8:00-10:00. You can specified any number of time_ranges, following
the first.
@var now:
Specifies the current daytime and is a date literal. ME{now} is
used to calculate several task attributes.
"""
#@nonl
#@-node:<< description >>
#@nl
#@ << declarations >>
#@+node:<< declarations >>
# Variables for the gui interface
_date_completion = { "Date": 'Date("|")',
"max": "max(|)",
"min": "min(|)",
"Multi" : "Multi(|)" }
_delta_completion = { "Delta" : 'Delta("|")',
"Multi" : "Multi(|)" }
__attrib_completions__ = { \
"def NewTask():" : "def |NewTask():\n",
"milestone": 'milestone = True',
"start": 'start = ',
"end": 'end = ',
"effort": 'effort = "|"',
"duration": 'duration = "|"',
"length": 'length = "|"',
"todo": 'todo = "|"',
"done": 'done = "|"',
"title": 'title = "|"',
"load": 'load = ',
"max_load": 'max_load = ',
"efficiency": 'efficiency = ',
"complete": 'complete = ',
"copy_src": 'copy_src =',
"__constraint__": '__constraint__():\n|"',
"priority": 'priority = ',
"balance" : 'balance = ',
"resource": 'resource = ',
"performed" : 'performed = [(|resource, "2002-02-01", "2002-02-05", "2H"),]',
"add_attrib": "add_attrib(|'name', None)",
"working_days_per_week": 'working_days_per_week = ',
"working_days_per_month": 'working_days_per_month = ',
"working_days_per_year": 'working_days_per_year = ',
"working_hours_per_day": 'working_hours_per_day = ',
"minimum_time_unit": 'minimum_time_unit = ',
"vacation": 'vacation = [("|2002-02-01", "2002-02-05")]',
"extra_work": 'extra_work = [("|2002-02-01", "2002-02-05")]',
"working_days" : 'working_days = ["|mon,tue,wed,thu,fri", "8:00-12:00", "13:00-17:00"]',
"now": 'now = "|"',
"calendar" : 'calendar = ',
"#load": { "YearlyMax": 'YearlyMax("|")',
"WeeklyMax": 'WeeklyMax("|")',
"MonthlyMax": 'MonthlyMax("|")',
"DailyMax": 'DailyMax("|")',
"VariableLoad" : "VariableLoad(|)"},
"#max_load": { "YearlyMax": 'YearlyMax("|")',
"WeeklyMax": 'WeeklyMax("|")',
"MonthlyMax": 'MonthlyMax("|")',
"DailyMax": 'DailyMax("|")' },
"#start": _date_completion,
"#end": _date_completion,
"#effort": _delta_completion,
"#duration": _delta_completion,
"#length": _delta_completion,
"#todo": _delta_completion,
"#done": _delta_completion,
"#resource" : "get_resource_completions",
"#calendar" : "get_calendar_completions",
"#balance": { "STRICT": "STRICT",
"SMART": "SMART",
"SLOPPY": "SLOPPY" } }
formats = { "start" : "%x %H:%M",
"end" : "%x %H:%M",
"performed_start" : "%x %H:%M",
"performed_end" : "%x %H:%M",
"load" : "%.2f",
"length" : "%dd{ %HH}{ %MM}",
"effort" : "%dd{ %HH}{ %MM}",
"estimated_effort" : "%dd{ %HH}{ %MM}",
"performed_effort" : "%dd{ %HH}{ %MM}",
"duration" : "%dd{ %HH}{ %MM}",
"complete" : "%i",
"priority" : "%i",
"todo" : "%dd{ %HH}{ %MM}",
"done" : "%dd{ %HH}{ %MM}",
"efficiency" : "%.2f",
"buffer" : "%dd{ %HH}{ %MM}",
"costs" : "%.2f",
"sum" : "%.2f",
"max" : "%.2f",
"min" : "%.2f",
"milestone" : "%s",
"resource" : "%s",
"booked_resource" : "%s",
"performed_resource" : "%s" }
_constraint = None
_is_frozen = False
_is_compiled = False
_is_parent_referer = False
scenario = None # only for autocompletion
milestone = False
performed = ()
performed_resource = ()
booked_resource = ()
_performed_resource_length = ()
_resource_length = ()
dont_inherit = ()
performed_start = None
performed_end = None
performed_work_time = pcalendar.Minutes(0)
_setting_hooks = {}
#@nonl
#@-node:<< declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self, func, name, parent=None, index=1):
assert(type(func) == types.FunctionType)
func_key = (func.func_code, func.func_closure and id(func.func_closure))
try:
instrumented = instrumentation_cache[func_key]
except KeyError:
instrumented = _instrument(func)
instrumented.org_code = func_key
instrumentation_cache[func_key] = instrumented
func.task_func = instrumented # will be used in the gui
self._function = instrumented
self.name = name
self.up = parent
self.children = []
self._sources = {} # all tasks, I am linked to
self._dependencies = {} # all tasks that link to me
self._original_values = {}
self._properties = {} # a registry of all non standard attributes
self.title = self.name
self.root = parent and parent.root or self
self.scenario = self.root.scenario
self.path = parent and parent.path + "." + name or name
self.depth = len(self.path.split(".")) - 1
self.index = parent and ("%s.%i" % (parent.index, index)) \
or str(index)
if self.formats.has_key(name):
raise AttributeError("Task name '%s' hides attribute of parent." \
% name)
cal = self.calendar
self._to_delta = cal.Minutes
self._to_start = cal.StartDate
self._to_end = cal.EndDate
#@-node:__init__
#@+node:__iter__
def __iter__(self):
return _step_tasks(self)
#@-node:__iter__
#@+node:__repr__
def __repr__(self):
return "<Task %s>" % self.name
#@-node:__repr__
#@+node:__cmp__
def __cmp__(self, other):
try:
return cmp(self.path, other.path)
except Exception:
return cmp(self.path, other)
#@-node:__cmp__
#@+node:__getattr__
def __getattr__(self, name):
try:
if name[0] != "_":
parent = self.up
while parent:
if name not in parent.dont_inherit:
result = getattr(parent, name)
if not (isinstance(result, Task) and result.up == parent):
return result
parent = parent.up
except AttributeError:
pass
except IndexError:
raise AttributeError()
exception = AttributeError("'%s' is not a valid attribute of '%s'."
% (name, self.path))
exception.is_frozen = self._is_frozen
raise exception
#@-node:__getattr__
#@+node:_idendity_
def _idendity_(self): return self.root.id + self.path[4:]
#@-node:_idendity_
#@+node:_set_hook
def _set_hook(cls, attrib_name, function=None):
if function:
cls._setting_hooks[attrib_name] = function
else:
try:
del cls._setting_hooks[attrib_name]
except KeyError: pass
_set_hook = classmethod(_set_hook)
#@nonl
#@-node:_set_hook
#@+node:Public methods
#@+node:to_string
def to_string(self): return _StringConverter(self)
to_string = property(to_string)
#@nonl
#@-node:to_string
#@+node:indent_name
def indent_name(self, ident=" "):
"""
returns a indented name, according to its depth in the hierachy.
"""
return ident * self.depth + self.name
indent_name.attrib_method = True
indent_name.__call_completion__ = "indent_name()"
#@-node:indent_name
#@+node:costs
def costs(self, cost_name, mode="ep"):
"""
calculates the resource costs for the task.
cost_name is the name of a rate attribute of the reosurce
mode is character combination:
e calculates the estimated costs
p calculates the performed costs
==> pe calculates all costs
"""
if self.children:
return sum([ c.costs(cost_name, mode) for c in self.children])
costs = 0
if 'e' in mode:
costs += sum(map(lambda rl: getattr(rl[0], cost_name) * rl[1],
self._resource_length))
if 'p' in mode:
costs += sum(map(lambda rl: getattr(rl[0], cost_name) * rl[1],
self._performed_resource_length))
costs /= (60.0 * self.root.calendar.working_hours_per_day)
return round(costs, 2)
costs.attrib_method = True
costs.__call_completion__ = 'costs("|")'
#@-node:costs
#@+node:sum
def sum(self, attrib_name):
val = 0
if self.children:
val += sum(map(lambda c: c.sum(attrib_name), self.children))
if self.is_inherited(attrib_name):
return val
if attrib_name not in self.dont_inherit:
return val
return val + getattr(self, attrib_name)
sum.attrib_method = True
sum.__call_completion__ = 'sum("|")'
#@-node:sum
#@+node:min
def min(self, attrib_name):
if self.children:
return min(map(lambda c: c.min(attrib_name), self.children))
return getattr(self, attrib_name)
min.attrib_method = True
min.__call_completion__ = 'min("|")'
#@-node:min
#@+node:max
def max(self, attrib_name):
if self.children:
return max(map(lambda c: c.max(attrib_name), self.children))
return getattr(self, attrib_name)
max.attrib_method = True
max.__call_completion__ = 'max("|")'
#@-node:max
#@+node:all_resources
def all_resources(self):
result = self._all_resources_as_dict()
result = result.keys()
result.sort()
return result
#@-node:all_resources
#@+node:get_task
def get_task(self, path=None):
"""
Returns a task with the given path.
"""
if not path:
return self
names = path.split(".")
rest = ".".join(names[1:])
result = getattr(self, names[0], None)
return isinstance(result, Task) and result.get_task(rest) or None
#@-node:get_task
#@+node:snapshot
def snapshot(self, indent="", name=None):
text = indent + "def %s():\n" % (name or self.name)
indent += " "
for name in ("priority", "balance", "complete",
"milestone", "end", "start", "effort", "load"):
val = getattr(self, name, None)
if val is None:
continue
if name[0] == "_":
name = name[1:]
text += "%s%s = %s\n" % (indent, name, _as_string(val))
for name in self._properties:
if name.startswith("performed"): continue
val = getattr(self, name, None)
try:
if issubclass(val, resource.Resource): continue
except TypeError:
pass
text += "%s%s = %s\n" % (indent, name, _as_string(val))
resources = tuple(self._iter_booked_resources())
if resources:
text += "%sresource = \\\n" % indent
def make_resource(res):
return "%s %s" \
% (indent, res.snapshot())
text += "&\\\n".join(map(make_resource, resources)) + "\n"
def make_resource_booking(res):
def make_booking(booking):
return '%s (%s, "%s", "%s", "%sM"),' \
% (indent, res.name,
booking.book_start.strftime("%Y%m%d %H:%M"),
booking.book_end.strftime("%Y%m%d %H:%M"),
booking.work_time)
return "\n".join(map(make_booking, res.get_bookings(self)))
text += "%sperformed = [\n" % indent
text += "\n".join(map(make_resource_booking, resources)) + "]"
child_text = map(lambda c: c.snapshot(indent), self.children)
text += "\n\n"
text += "".join(child_text)
return text
#@-node:snapshot
#@+node:is_inherited
def is_inherited(self, attrib_name):
return not self.__dict__.has_key(attrib_name)
#@-node:is_inherited
#@+node:formatter
def formatter(self, attrib_name, arg=None, format=None):
"""returns a function which is able
to convert the value of the given attrib_name to a string"""
formats = self.formats
format = format or formats.get(attrib_name)
if attrib_name in ("start", "end", "length", "effort",
"done", "todo", "buffer", "estimated_effort",
"performed_effort", "performed_start", "performed_end"):
def save_strftime(v):
try:
return v.strftime(format)
#except AttributeError: some bug avoid catching this exception
except Exception:
return str(v)
return save_strftime
if attrib_name == "duration":
def save_strftime(v):
try:
return v.strftime(format, True)
except AttributeError:
return str(v)
return save_strftime
if attrib_name in ("booked_resource", "performed_resource"):
def get_resource_name(v):
title = getattr(v, "title", None)
if title: return title
return ", ".join([r.title for r in v])
return get_resource_name
if arg and attrib_name in ("costs", "sum", "max", "min"):
format = formats.get("%s(%s)" % (attrib_name, arg), format)
if format:
return lambda v: locale.format(format, v, True)
return str
#@-node:formatter
#@-node:Public methods
#@+node:Resource allocation Methods
#@+node:_all_resources_as_dict
def _all_resources_as_dict(self):
if self.children:
result = {}
for c in self.children:
result.update(c._all_resources_as_dict())
return result
if self.resource:
return dict(map(lambda r: (r, 1), self.resource.all_members()))
return {}
#@-node:_all_resources_as_dict
#@+node:_test_allocation
def _test_allocation(self, resource_state, allocator):
resource = self.resource._get_resources(resource_state)
if not resource:
return False
return allocator.test_allocation(self, resource)
#@-node:_test_allocation
#@+node:_allocate
def _allocate(self, state, allocator):
allocator.allocate(self, state)
#activate cache for done and todo
if self.start.to_datetime() > self.end.to_datetime():
#this can happen when performed effort are
#during non working time
tmp = self.start
self.start = self.end
self.end = tmp
for r in self.performed_resource:
r.correct_bookings(self)
self._resource_length = map(lambda r: (weakref.proxy(r), \
r.length_of(self)),
self._iter_booked_resources())
#@-node:_allocate
#@+node:_convert_performed
def _convert_performed(self, all_resources):
performed = self.performed
if not performed: return False
if not isinstance(performed, (tuple, list)) \
or not isinstance(performed[0], (tuple, list)) \
or not len(performed[0]) >= 3:
self._raise(TypeError("""The format of the performed attribute must be:
[( res_name, start_literal, end_literal, working_time ), ... ].
"""), "performed")
round_down_delta = self.root.calendar.minimum_time_unit / 2
round_down_delta = datetime.timedelta(minutes=round_down_delta)
def convert_item(index):
item = performed[index]
res, start, end = item[:3]
if isinstance(res, str):
found = filter(lambda r: r.name == res, all_resources)
if found: res = found[0]
try:
if not isinstance(res, (resource.Resource,
resource._MetaResource)):
raise ValueError("the resource '%s' is unknown." % res)
start = _to_datetime(start)
end = _to_datetime(end)
if len(item) > 3:
working_time = self._to_delta(item[3]).round()
else:
working_time = self._to_delta(end - start, True)
return ((res, start, end, working_time), index)
except Exception, exc:
self._raise(exc.__class__("Item %i: %s" \
% (index + 1, str(exc))),
"performed")
converted = dict(map(convert_item, range(len(performed))))
converted = converted.items()
converted.sort()
#check for overlapping items
last_res = None
for item, index in converted:
res, start, end, work_time = item
if last_res == res and start < last_end:
self._warn("Items %i, %i: %s and %s are overlapping." \
% (last_index + 1, index + 1,
str(performed[last_index]),
str(performed[index])),
"performed")
last_res = res
last_end = end
last_index = index
self._performed = map(lambda x: x[0], converted)
return True
#@-node:_convert_performed
#@+node:_allocate_performed
def _allocate_performed(self, performed):
if not performed: return
to_delta = self._to_delta
to_start = self._to_start
to_end = self._to_end
last = datetime.datetime.min
first = datetime.datetime.max
effort = 0
work_time_sum = 0
zero_minutes = to_delta(0)
minimum_time_unit = to_delta(self.calendar.minimum_time_unit)
summary = {}
for item in performed:
res, start, end, work_time = item
effort += work_time * self.efficiency * res.efficiency
work_time_sum += work_time
res = res()
ss, es, wts = summary.get(res, (datetime.datetime.max,
datetime.datetime.min,
zero_minutes))
summary[res] = (min(ss, start), max(es, end), wts + work_time)
for r, v in summary.iteritems():
start, end, work_time = v
assert(start.__class__ is datetime.datetime)
assert(end.__class__ is datetime.datetime)
#the booking limits should be inside the workingtime
#to display them correct in resource charts
cstart = to_start(start).to_datetime()
if cstart > start: cstart = to_end(start).to_datetime()
cend = to_end(end).to_datetime()
if cend < end: cend = to_start(end).to_datetime()
if self.root.is_snapshot:
delta = to_end(cend) - to_start(cstart)
else:
delta = to_delta(cend - cstart).round()
if not delta:
delta = minimum_time_unit
book_load = float(work_time) / delta
r().book_task(self, cstart, cend, book_load, work_time, True)
last = max(end, last)
first = min(start, first)
self._performed_resource_length = tuple([ (r, v[2]) for r, v in summary.iteritems() ])
self.performed_resource = tuple(summary.keys())
self.performed_end = last
self.performed_start = first
self.performed_effort = to_delta(effort)
self.performed_work_time = to_delta(work_time_sum)
self._check_completion()
#@-node:_allocate_performed
#@+node:_iter_booked_resources
def _iter_booked_resources(self):
result = dict(map(lambda r: (r, 1), self.performed_resource))
result.update(dict(map(lambda r: (r, 1), self.booked_resource)))
return result.iterkeys()
#@-node:_iter_booked_resources
#@-node:Resource allocation Methods
#@+node:Compile Methods
#@+node:_generate
def _generate(self, deferred=None):
do_raise = False
deferred = deferred or [ self ]
while deferred:
new_deferred = []
for task in deferred:
task._compile(new_deferred, do_raise)
do_raise = deferred == new_deferred
deferred = new_deferred
#@-node:_generate
#@+node:_recalc_properties
def _recalc_properties(self):
if not self._properties: return
self.__compile_function([], False, _MeProxyRecalc(self))
self._is_compiled = True
#@-node:_recalc_properties
#@+node:_compile
def _compile(self, deferred, do_raise):
self.dont_inherit = ()
self._constraint = None
self._original_values.clear()
self._properties.clear()
try:
self.__at_compile
#@ << raise child recursion error >>
#@+node:<< raise child recursion error >>
self._raise(RecursionError("A child defines a "\
"recursive definition at %s" % self.path))
#@-node:<< raise child recursion error >>
#@nl
except AttributeError:
self.__at_compile = self, ""
try:
self.__compile_function(deferred, do_raise, _MeProxy(self))
finally:
del self.__at_compile
for c in self.children:
if not c._is_compiled:
c._compile(deferred, do_raise)
if self._is_compiled:
self.__check_milestone()
self.__check_task()
self.root.has_actual_data |= self.__dict__.has_key("performed")
#@-node:_compile
#@+node:__compile_function
def __compile_function(self, deferred, do_raise, me_instance):
self._is_compiled = self._is_frozen
restore_globals = []
globals_ = self._function.func_globals
#@ << set function global values >>
#@+node:<< set function global values >>
def to_value_wrapper(a):
if isinstance(a, _ValueWrapper):
return a
return _ValueWrapper(a, [(None, None)])
def my_max(*args):
return max(map(to_value_wrapper, args))
def my_min(*args):
return min(map(to_value_wrapper, args))
globals_["me"] = me_instance
if self._is_compiled:
globals_["up"] = self.up
globals_["root"] = self.root
else:
globals_["up"] = _Path(self.up, "up")
globals_["root"] = _Path(self.root, "root")
globals_["Delta"] = self._to_delta
globals_["Date"] = self._to_start
globals_["max"] = my_max
globals_["min"] = my_min
globals_["add_attrib"] = me_instance.add_attrib
#@nonl
#@-node:<< set function global values >>
#@nl
#@ << set me in global functions >>
#@+node:<< set me in global functions >>
#@+at
# Is used for functions like YearlyMax, MonthlyMax, ....
#@-at
#@@code
for name in self._function.global_names:
try:
obj = globals_[name]
if isinstance(obj, types.FunctionType):
fg = obj.func_globals
if not fg.has_key("me") and "me" in obj.func_code.co_names:
restore_globals.append(fg)
fg["me"] = me_instance
except KeyError: continue
#@nonl
#@-node:<< set me in global functions >>
#@nl
try:
#@ << eval function >>
#@+node:<< eval function >>
if do_raise:
try:
self._function()
self._is_compiled = True
except _IncompleteError, e:
src = e.args[1]
if src is not self:
self.__at_compile = e.args[1:]
src._compile([], True)
raise
else:
try:
self._function()
self._is_compiled = True
except AttributeError, e:
#print "AttributeError:", e, self.name, e.is_frozen, do_raise
deferred.append(self)
except _IncompleteError:
#print "_IncompleteError:", id(self), self.name, do_raise
deferred.append(self)
except RecursionError:
self._is_parent_referer = True
deferred.append(self)
#@nonl
#@-node:<< eval function >>
#@nl
finally:
for fg in restore_globals:
del fg["me"]
#@-node:__compile_function
#@-node:Compile Methods
#@+node:Setting methods
#@+node:_set_attrib
def _set_attrib(self, name, value):
if value is _NEVER_USED_: return
try:
value = self._setting_hooks[name](self, name, value)
except KeyError: pass
if name == "__constraint__":
self._constraint = value
return
if type(value) == types.FunctionType:
if value.func_code.co_argcount == 0:
#@ << add child task >>
#@+node:<< add child task >>
try:
task = self.__dict__[value.func_name]
except KeyError:
task = Task(value, value.func_name, self, len(self.children) + 1)
self.children.append(task)
setattr(self, value.func_name, task)
return
#@nonl
#@-node:<< add child task >>
#@nl
if name[0] == "_":
#private vars will not be set
return
if isinstance(value, _Path):
value = value._task
set_method = getattr(self, "_set_" + name, None)
if set_method:
#@ << set standard attribute >>
#@+node:<< set standard attribute >>
if type(value) == types.DictionaryType:
self.root.all_scenarios.update(value.keys())
value = value.get(self.scenario, value["_default"])
self.__set_sources(name, value)
self._original_values[name] = value
set_method(_val(value))
#@nonl
#@-node:<< set standard attribute >>
#@nl
else:
#@ << set userdefined attribute >>
#@+node:<< set userdefined attribute >>
if callable( getattr(self.__class__, name, None)):
raise NameError('You may not use "%s" as attribute' % name)
setattr(self, name, value)
self._properties[name] = True
self.__set_sources(name, value)
#@nonl
#@-node:<< set userdefined attribute >>
#@nl
#@-node:_set_attrib
#@+node:read only attributes
#@+node:_set_name
def _set_name(self, value):
raise AttributeError("The attribute 'name' is readonly.")
#@nonl
#@-node:_set_name
#@+node:_set_done
def _set_done(self, value):
raise AttributeError("The attribute 'done' is readonly.")
#@nonl
#@-node:_set_done
#@+node:_set_performed_work_time
def _set_performed_work_time(self, value):
raise AttributeError("The attribute 'performed_work_time' is readonly.")
#@nonl
#@-node:_set_performed_work_time
#@+node:_set_booked_resource
def _set_booked_resource(self, value):
raise AttributeError("The attribute 'booked_resource' is readonly.")
#@nonl
#@-node:_set_booked_resource
#@+node:_set_performed_effort
def _set_performed_effort(self, value):
raise AttributeError("The attribute 'performed_effort' is readonly.")
#@nonl
#@-node:_set_performed_effort
#@+node:_set_children
def _set_children(self, value):
raise AttributeError("The attribute 'children' is readonly.")
#@nonl
#@-node:_set_children
#@+node:_set_depth
def _set_depth(self, value):
raise AttributeError("The attribute 'depth' is readonly.")
#@nonl
#@-node:_set_depth
#@+node:_set_index
def _set_index(self, value):
raise AttributeError("The attribute 'index' is readonly.")
#@nonl
#@-node:_set_index
#@+node:_set_scenario
def _set_scenario(self, value):
raise AttributeError("The attribute 'scenario' is readonly.")
#@nonl
#@-node:_set_scenario
#@+node:_set_buffer
def _set_buffer(self, value):
raise AttributeError("The attribute 'buffer' is readonly.")
#@nonl
#@-node:_set_buffer
#@-node:read only attributes
#@+node:_set_start
def _set_start(self, value):
self.__start_class = value.__class__
self.start = self._to_start(value).round()
#@-node:_set_start
#@+node:_set_end
def _set_end(self, value):
self.end = self._to_end(value)
#@-node:_set_end
#@+node:_set_max_load
def _set_max_load(self, max_load):
self.max_load = float(max_load)
#@-node:_set_max_load
#@+node:_set_load
def _set_load(self, load):
self.load = float(load)
#@-node:_set_load
#@+node:_set_length
def _set_length(self, value):
self.length = self._to_delta(value).round()
#@-node:_set_length
#@+node:_set_effort
def _set_effort(self, value):
self.effort = self._to_delta(value).round()
#@-node:_set_effort
#@+node:_set_duration
def _set_duration(self, value):
self.duration = self._to_delta(value, True).round()
#@-node:_set_duration
#@+node:_set_complete
def _set_complete(self, value):
self.complete = value
#@-node:_set_complete
#@+node:_set_done
def _set_done(self, value):
self.done = self._to_delta(value).round()
#@-node:_set_done
#@+node:_set_todo
def _set_todo(self, value):
self.todo = self._to_delta(value).round()
#@-node:_set_todo
#@+node:_set_milestone
def _set_milestone(self, value):
self.milestone = value
#@-node:_set_milestone
#@+node:_set_resource
def _set_resource(self, value):
if not value:
self.resource = None
return
if isinstance(value, (tuple, list)):
value = reduce(lambda a, b: a & b, value)
self.resource = value()
#@-node:_set_resource
#@+node:_set_copy_src
def _set_copy_src(self, value):
if isinstance(value, _MeProxy):
raise RuntimeError("Cannot copy me.")
if not value._is_compiled:
raise _IncompleteError(value, "copy_src")
if value.resource and not self.resource:
self.resource = value.resource
if value.balance and not self.balance:
self.balance = value.balance
copy_parms = ("priority", "todo", "complete",
"_constraint", "load", "length",
"effort", "duration")
for p in copy_parms:
v = value.__dict__.get(p)
if v: setattr(self, p, v)
self.copy_src = value
self._properties.update(value._properties)
for k in value._properties.iterkeys():
setattr(self, k, getattr(value, k))
#@-node:_set_copy_src
#@+node:__set_sources
def __set_sources(self, attrib_name, value):
#@ << find references >>
#@+node:<< find references >>
def make_ref(val):
if isinstance(val, _ValueWrapper):
return val._ref
if isinstance(val, Task):
return [(val, "")]
return []
if isinstance(value, (list, tuple)):
sources = _refsum(map(make_ref, value))
else:
sources = make_ref(value)
#@nonl
#@-node:<< find references >>
#@nl
if not sources: return
#track only dependcies within the same project
root = self.root
sources = [ task.path + "." + attrib
for task, attrib in sources
if task and task.root is root ]
self._sources[attrib_name] = tuple(sources)
attr_path = self.path + "." + attrib_name
#set dependencies of my sources
for d in sources:
path, attrib = _split_path(d)
task = self.get_task(path)
r_d = task._dependencies
d_l = r_d.setdefault(attrib, {})
d_l[attr_path] = True
#@-node:__set_sources
#@+node:Calendar Setters
#@+node:_set_calendar
def _set_calendar(self, value):
self.calendar = value
self._to_delta = value.Minutes
self._to_start = value.StartDate
self._to_end = value.EndDate
self.__renew_dates()
#@-node:_set_calendar
#@+node:__renew_dates
def __renew_dates(self):
for attrib in ("effort", "start", "end", "length", "todo"):
try:
self._set_attrib(attrib, self._original_values[attrib])
except KeyError:
pass
#@-node:__renew_dates
#@+node:__make_calendar
def __make_calendar(self):
if not "calendar" in self.__dict__:
cal = self.calendar = self.calendar.clone()
self._to_delta = cal.Minutes
self._to_start = cal.StartDate
self._to_end = cal.EndDate
#@nonl
#@-node:__make_calendar
#@+node:_set_vacation
def _set_vacation(self, value):
self.__make_calendar()
self.calendar.set_vacation(value)
self._properties["vacation"] = True
self.vacation = value
self.__renew_dates()
#@-node:_set_vacation
#@+node:_set_extra_work
def _set_extra_work(self, value):
self.__make_calendar()
self.calendar.set_extra_work(value)
self._properties["extra_work"] = True
self.extra_work = value
self.__renew_dates()
#@-node:_set_extra_work
#@+node:_set_working_days
def _set_working_days(self, value):
if type(value[0]) is str:
value = (value, )
self.working_days = value
self._properties["working_days"] = True
self.__make_calendar()
for v in value:
day_range = v[0]
tranges = tuple(v[1:])
self.calendar.set_working_days(day_range, *tranges)
self.__renew_dates()
#@nonl
#@-node:_set_working_days
#@+node:_set_minimum_time_unit
def _set_minimum_time_unit(self, value):
self.__make_calendar()
self.calendar.minimum_time_unit = value
self._properties["minimum_time_unit"] = True
#@-node:_set_minimum_time_unit
#@+node:_get_minimum_time_unit
def _get_minimum_time_unit(self):
return self.calendar.minimum_time_unit
minimum_time_unit = property(_get_minimum_time_unit)
#@-node:_get_minimum_time_unit
#@+node:_set_working_days_per_week
def _set_working_days_per_week(self, value):
self.__make_calendar()
self.calendar.working_days_per_week = value
self._properties["working_days_per_week"] = True
#@-node:_set_working_days_per_week
#@+node:_get_working_days_per_week
def _get_working_days_per_week(self):
return self.calendar.working_days_per_week
working_days_per_week = property(_get_working_days_per_week)
#@-node:_get_working_days_per_week
#@+node:_set_working_days_per_month
def _set_working_days_per_month(self, value):
self.__make_calendar()
self.calendar.working_days_per_month = value
self._properties["working_days_per_month"] = True
#@-node:_set_working_days_per_month
#@+node:_get_working_days_per_month
def _get_working_days_per_month(self):
return self.calendar.working_days_per_month
working_days_per_month = property(_get_working_days_per_month)
#@-node:_get_working_days_per_month
#@+node:_set_working_days_per_year
def _set_working_days_per_year(self, value):
self.__make_calendar()
self.calendar.working_days_per_year = value
self._properties["working_days_per_year"] = True
#@-node:_set_working_days_per_year
#@+node:_get_working_days_per_year
def _get_working_days_per_year(self):
return self.calendar.working_days_per_year
working_days_per_year = property(_get_working_days_per_year)
#@-node:_get_working_days_per_year
#@+node:_set_working_hours_per_day
def _set_working_hours_per_day(self, value):
self.__make_calendar()
self.calendar.working_hours_per_day = value
self._properties["set_working_hours_per_day"] = True
#@-node:_set_working_hours_per_day
#@+node:_get_working_hours_per_day
def _get_working_hours_per_day(self):
return self.calendar.working_hours_per_day
working_hours_per_day = property(_get_working_hours_per_day)
#@-node:_get_working_hours_per_day
#@+node:_set_now
def _set_now(self, value):
proxy = weakref.proxy
self.calendar.now = _to_datetime(value)
#@-node:_set_now
#@-node:Calendar Setters
#@-node:Setting methods
#@+node:Freezer Methods
#@+node:_unfreeze
def _unfreeze(self, attrib_name):
if self.__dict__.has_key(attrib_name):
del self.__dict__[attrib_name]
#@-node:_unfreeze
#@+node:_wrap_attrib
def _wrap_attrib(self, method):
attrib_name = method.__name__[7:]
recursion_attrib = "_rec" + attrib_name
try:
dest, dattr = self.__at_compile
raise RecursionError("Recursive definition of %s(%s) and %s(%s)." \
% (self.path, attrib_name, dest.path, dattr))
except AttributeError: pass
if not self._is_compiled:
raise _IncompleteError(self, attrib_name)
try:
getattr(self, recursion_attrib)
raise RecursionError(self, attrib_name)
except AttributeError: pass
setattr(self, recursion_attrib, True)
try:
result = method(self)
if self._is_frozen:
setattr(self, attrib_name, result)
return result
finally:
delattr(self, recursion_attrib)
#@-node:_wrap_attrib
#@+node:_find_frozen
def _find_frozen(self, attrib_name, default=None):
value = self.__dict__.get(attrib_name)
if value is not None:
return value
up = self.up
return up and up._find_frozen(attrib_name) or default
#@-node:_find_frozen
#@-node:Freezer Methods
#@+node:Calculation Methods
#@+node:__calc_performed_effort
def __calc_performed_effort(self):
if self.children:
return self._to_delta(sum([ t.performed_effort for t in self.children ]))
return pcalendar.Minutes(0)
performed_effort = _TaskProperty(__calc_performed_effort)
#@-node:__calc_performed_effort
#@+node:__calc_estimated_effort
def __calc_estimated_effort(self):
if self.children:
return self._to_delta(sum([ t.estimated_effort for t in self.children ]))
return self.effort
estimated_effort = _TaskProperty(__calc_estimated_effort)
#@-node:__calc_estimated_effort
#@+node:__calc_start
def __calc_start(self):
to_start = self._to_start
if self.children:
try:
return min([ to_start(t.start) for t in self.children
if not t._is_parent_referer ])
except ValueError:
#@ << raise child recursion error >>
#@+node:<< raise child recursion error >>
self._raise(RecursionError("A child defines a "\
"recursive definition at %s" % self.path))
#@-node:<< raise child recursion error >>
#@nl
try:
end = self.end
duration = self.__dict__.get("duration")
if duration is not None:
start = end.to_datetime() - datetime.timedelta(minutes=duration)
else:
start = end - self.length
return to_start(start)
except RecursionError:
start = self._find_frozen("start")
if start: return to_start(start)
#@ << raise recursion error >>
#@+node:<< raise recursion error >>
raise RecursionError("you have to specify a "\
"start or an end at %s" % self.path)
#@nonl
#@-node:<< raise recursion error >>
#@nl
start = _TaskProperty(__calc_start)
#@-node:__calc_start
#@+node:__calc_end
def __calc_end(self):
to_end = self._to_end
if self.children:
try:
return max([ to_end(t.end) for t in self.children
if not t._is_parent_referer ])
except ValueError:
#@ << raise child recursion error >>
#@+node:<< raise child recursion error >>
self._raise(RecursionError("A child defines a "\
"recursive definition at %s" % self.path))
#@-node:<< raise child recursion error >>
#@nl
try:
start = self.start
duration = self.__dict__.get("duration")
if duration is not None:
end = start.to_datetime() + datetime.timedelta(minutes=duration)
else:
end = start + self.length
return to_end(end)
except RecursionError:
end = self._find_frozen("end")
if end: return to_end(end)
#@ << raise recursion error >>
#@+node:<< raise recursion error >>
raise RecursionError("you have to specify a "\
"start or an end at %s" % self.path)
#@nonl
#@-node:<< raise recursion error >>
#@nl
end = _TaskProperty(__calc_end)
#@-node:__calc_end
#@+node:__calc_load
def __calc_load(self):
length = self.__dict__.get("length")
effort = self.__dict__.get("effort")
if length is not None and effort is not None:
return float(effort) / (float(length) or 1.0)
load = self._find_frozen("load")
if load is not None: return load
return 1.0
load = _TaskProperty(__calc_load)
#@-node:__calc_load
#@+node:__calc_length
def __calc_length(self):
effort = self.__dict__.get("effort")
if effort is None:
return self.end - self.start
return self._to_delta(effort / self.load)
length = _RoundingTaskProperty(__calc_length, "length")
#@-node:__calc_length
#@+node:__calc_duration
def __calc_duration(self):
return self._to_delta(self.end.to_datetime()\
- self.start.to_datetime(), True)
duration = _TaskProperty(__calc_duration)
#@-node:__calc_duration
#@+node:__calc_effort
def __calc_effort(self):
if self.children:
return self._to_delta(sum([ t.effort for t in self.children ]))
return self._to_delta(self.length * self.load)
effort = _RoundingTaskProperty(__calc_effort, "effort")
#@-node:__calc_effort
#@+node:__calc_done
def __calc_done(self):
if self.children:
dones = map(lambda t: t.done, self.children)
return self._to_delta(sum(dones))
res = self._iter_booked_resources()
done = sum(map(lambda r: r.done_of(self), res))
complete = self.__dict__.get("complete")
todo = self.__dict__.get("todo")
if not done and complete == 100 or todo == 0:
#if now is not set
done = self.effort
return self._to_delta(done)
done = _TaskProperty(__calc_done)
#@-node:__calc_done
#@+node:__calc_buffer
def __calc_buffer(self):
if self.children:
return self._to_delta(min(map(lambda t: t.buffer, self.children)))
scenario = self.scenario
end = self.end
old_end = self.__dict__.get("end")
#@ << find all tasks, that depend on my end >>
#@+node:<< find all tasks, that depend on my end >>
deps = { }
task = self
while task:
deps.update(task._dependencies.get("end", {}))
task = task.up
#@nonl
#@-node:<< find all tasks, that depend on my end >>
#@nl
#@ << define unfreeze_parents >>
#@+node:<< define unfreeze_parents >>
def unfreeze_parents():
task = self.up
while task:
task._unfreeze("end")
task = task.up
#@nonl
#@-node:<< define unfreeze_parents >>
#@nl
buffers = [ ]
for d in deps.keys():
path, attrib = _split_path(d)
if attrib != "start":
continue
#@ << calculate buffer to descendant 'd' >>
#@+node:<< calculate buffer to descendant 'd' >>
unfreeze_parents()
# the following code considers a expressione like
# start = predecessor.end + Delta("1d") the buffer
# calculation must be aware of the 1d delay.
# (therefore a simple succ_start - end would be
# incorrect)
# Solution: Simluate a later end and calculate the
# real delay
succ_task = self.get_task(path)
simulated_task = Task(succ_task._function,
succ_task.name,
succ_task.up, 1)
current_start = succ_task.start
simulated_end = current_start
self.end = current_start
simulated_task._generate()
simulated_start = simulated_task.start
unfreeze_parents()
if old_end: self.end = old_end
else: self._unfreeze("end")
del simulated_task
current_delay = current_start - end
simulated_delay = simulated_start - simulated_end
real_delay = current_delay - simulated_delay
try:
buffer_ = real_delay + succ_task.buffer
except RecursionError, err:
self._raise(err)
#@nonl
#@-node:<< calculate buffer to descendant 'd' >>
#@nl
buffers.append(buffer_)
if not buffer_:
break
if buffers:
return self._to_delta(min(buffers))
return not self.milestone \
and self.root.end - end \
or self._to_delta(0)
buffer = _TaskProperty(__calc_buffer)
#@-node:__calc_buffer
#@+node:__calc_complete
def __calc_complete(self):
done = self.done
todo = self.todo
return int(100.0 * done / ((done + todo) or 1))
complete = _TaskProperty(__calc_complete)
#@-node:__calc_complete
#@+node:__calc_todo
def __calc_todo(self):
complete = self.__dict__.get("complete")
if complete:
# effort = done + todo
# done done
# complete = ------ ==> todo = -------- - done
# effort complete
complete = float(complete)
done = self.done
if done:
done = float(done)
return self._to_delta(done * 100.0 / complete - done)
return self._to_delta(self.effort * complete / 100.0)
if self.children:
todos = map(lambda t: t.todo, self.children)
return self._to_delta(sum(todos))
todo = sum(map(lambda r: r.todo_of(self), self.booked_resource))
return self._to_delta(max(todo, self.effort - self.done))
todo = _TaskProperty(__calc_todo)
#@-node:__calc_todo
#@-node:Calculation Methods
#@+node:Check Methods
#@+node:__check_task
def __check_task(self):
if self.children: return
start = self._find_frozen("start")
end = self._find_frozen("end")
if not (start or end):
self._raise(ValueError("You must specify either a"\
" start or an end attribute"))
if start and end: return
length = self.__dict__.get("length")
duration = self.__dict__.get("duration")
effort = self.__dict__.get("effort")
if not (effort or length or duration):
#set a default value
self._set_effort("1d")
#self._raise(ValueError("You must specify either a"\
# " length or a duration or "\
# "an effort attribute"))
#@-node:__check_task
#@+node:__check_milestone
def __check_milestone(self):
if not self.milestone: return
self.length = self._to_delta(0)
start = self.__dict__.get("start")
if not start:
self._raise(ValueError("Milestone must have start attribute"),
"milstone")
if self.__start_class.__name__ == "edt":
#the milestone is probably dependent on the end date of
#an other task (see edt in pcalendar) ==> start at the end date
self.start = self.end = self._to_end(self.start)
else:
self.start = self.end = self._to_start(self.start)
#@-node:__check_milestone
#@+node:_check_completion
def _check_completion(self):
if not self.performed_effort: return
if self.root.is_snapshot: return
# allocation is not done yet ==> self.todo, self.done,
# self.complete cannot be calculated
if self._find_frozen("complete", 0) < 100 \
and self.__dict__.get("todo", 1) > 0:
return
start = self.performed_start
end = self.performed_end
#ensure that self.start.to_datetime() < self.end.to_datetime()
cstart = self._to_start(start)
if cstart.to_datetime() > start: cstart = self._to_end(start)
cend = self._to_end(end)
if cend.to_datetime() < end: cend = self._to_start(end)
self.start = cstart
self.end = cend
if self.performed_effort != self.effort:
self.estimated_effort = self.effort
self.effort = self.performed_effort
#@-node:_check_completion
#@+node:check
def check(self):
if self._constraint and self._is_compiled:
globals_ = self._function.func_globals
globals_["me"] = self
globals_["up"] = self.up
globals_["root"] = self.root
globals_["assert_"] = self.__assert
self._constraint()
#@-node:check
#@-node:Check Methods
#@+node:Error Methods
#@+node:__assert
def __assert(self, value):
if not value:
warnings.warn('Assertion in scenario: "%s".' % self.scenario,
RuntimeWarning, 2)
#@-node:__assert
#@+node:_warn
def _warn(self, message, attrib=None, level=2):
self.__compile_function([], True, _MeProxyWarn(self, attrib, message))
#@-node:_warn
#@+node:_raise
def _raise(self, exc, attrib=None):
self.__compile_function([], True, _MeProxyError(self, attrib, exc))
raise exc
#@-node:_raise
#@-node:Error Methods
#@-others
#@nonl
#@-node:class Task
#@-node:Task
#@+node:Projects
#@+node:class _ProjectBase
class _ProjectBase(Task):
"""
Base class for all projects.
"""
#@ << class _ProjectBase declarations >>
#@+node:<< class _ProjectBase declarations >>
__attrib_completions__ = { }
__attrib_completions__.update(Task.__attrib_completions__)
del __attrib_completions__["milestone"] #project cannot be milestones
priority = 500
efficiency = 1.0
max_load = 1.0
balance = 0
resource = None
copy_src = None
has_actual_data = False
is_snapshot = False
#@-node:<< class _ProjectBase declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self, top_task, scenario="_default", id=""):
self.calendar = pcalendar.Calendar()
Task.__init__(self, top_task, top_task.func_name)
self.id = id or self.name
self.scenario = scenario
self.all_scenarios = set(("_default",))
self.path = "root"
self._globals = top_task.func_globals.copy()
self._generate()
#@-node:__init__
#@+node:_idendity_
def _idendity_(self): return self.id
#@-node:_idendity_
#@+node:_restore_globals
def _restore_globals(self):
self._function.func_globals.clear()
self._function.func_globals.update(self._globals)
del self._globals
#@-node:_restore_globals
#@+node:free
def free(self):
all_resources = self.all_resources()
for r in all_resources:
r().unbook_tasks_of_project(self.id, self.scenario)
for t in self:
t.booked_resource = ()
return all_resources
#@-node:free
#@+node:_get_balancing_list
def _get_balancing_list(self):
try:
cached_list = balancing_cache[self._function.org_code]
if len(cached_list) != len(tuple(self)):
# different scenarios can have different tasks
raise KeyError()
except KeyError:
cached_list = _build_balancing_list(self)
balancing_cache[self._function.org_code] = cached_list
else:
cached_list = [ self.get_task(t.path) for t in cached_list ]
return cached_list
#@-node:_get_balancing_list
#@+node:snapshot
def snapshot(self, indent="", name=None):
text = Task.snapshot(self, indent, name)
lines = text.splitlines(True)
indent += " "
def make_resource(r):
return '%sclass %s(Resource): title = "%s"\n' \
% (indent, r.name, r.title)
now = datetime.datetime.now().strftime("%x %H:%M")
resource_text = map(lambda r: make_resource(r), self.all_resources())
lines.insert(1, "%sfrom faces import Resource\n" % indent)
lines.insert(2, "".join(resource_text) + "\n")
lines.insert(3, '%snow = "%s"\n' % (indent, now))
lines.insert(4, '%sis_snapshot = True\n' % indent)
return "".join(lines)
#@-node:snapshot
#@-others
#@-node:class _ProjectBase
#@+node:class Project
class Project(_ProjectBase):
"""
Generates a Project without allocating resources.
@param top_task: Specifies the highest function of a project definiton.
@param scenario: Specifies the name of the scenario which should be scheduled.
@param id: Specifiess a unique idenfication name to distinguish the project from
other projects in the resource database. The default value for id
is the name of top_task.
"""
#@ << class Project declarations >>
#@+node:<< class Project declarations >>
__call_completion__ = 'Project(|top_task, scenario="_default", id=None)'
#@-node:<< class Project declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self, top_task, scenario="_default", id=None):
_ProjectBase.__init__(self, top_task, scenario, id)
no_snapshot = not self.is_snapshot
for t in self:
t._is_frozen = True
t._recalc_properties()
no_snapshot and t.check()
self._restore_globals()
#@-node:__init__
#@-others
#@-node:class Project
#@+node:class _AllocationPoject
class _AllocationPoject(_ProjectBase):
#@ @+others
#@+node:unfreeze_parents
def unfreeze_parents(self):
if self.has_actual_data:
for t in filter(lambda t: t.children, self):
if not t._original_values.has_key("start"): t._unfreeze("start")
if not t._original_values.has_key("end"): t._unfreeze("end")
#@-node:unfreeze_parents
#@-others
#@-node:class _AllocationPoject
#@+node:class BalancedProject
class BalancedProject(_AllocationPoject):
"""
Generates a project with allocated resources. The tasks are balanced
to fit the resources load conditions.
"""
#@ << class BalancedProject declarations >>
#@+node:<< class BalancedProject declarations >>
__call_completion__ = """BalancedProject(|top_task, scenario="_default",
id=None, balance=SMART, performed=None)"""
#@-node:<< class BalancedProject declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self, top_task, scenario="_default",
id=None, balance=SMART, performed=None):
_AllocationPoject.__init__(self, top_task, scenario, id)
self.balance = balance
if performed:
self._distribute_performed(performed)
self.has_actual_data = True
no_snapshot = not self.is_snapshot
if no_snapshot:
self.allocate()
else:
self.allocate_snapshot()
for t in self:
t._is_frozen = True
t._recalc_properties()
no_snapshot and t.check()
self._restore_globals()
#@nonl
#@-node:__init__
#@+node:allocate_snapshot
def allocate_snapshot(self):
all_resources = self.free()
scenario = self.scenario
has_actual_data = True
for t in self:
if not t.resource or t.milestone or t.children:
continue
t._convert_performed(all_resources)
t._allocate_performed(t._performed)
#@-node:allocate_snapshot
#@+node:allocate
def allocate(self):
all_resources = self.free()
balancing_list = self._get_balancing_list()
scenario = self.scenario
#for t in balancing_list:
# print t.path
for t in balancing_list:
t._compile([], True)
if not t.resource or t.milestone or t.children:
continue
if t._convert_performed(all_resources):
has_actual_data = True
try:
t._allocate_performed(t._performed)
except AttributeError:
pass
allocator = _allocators[t.balance]
min_val = None
min_state = None
for p in range(t.resource._permutation_count()):
state = t._test_allocation(p, allocator)
if not state: continue
to_minimize = state[0]
if not min_val or min_val > to_minimize:
min_val = to_minimize
min_state = state
if min_state:
t._allocate(min_state, allocator)
elif t.performed_start:
# t could not be allocated ==>
# performance data holds all information
t.start = t._to_start(t.performed_start)
t.end = t._to_end(t.performed_end)
self.unfreeze_parents()
#@-node:allocate
#@+node:_distribute_performed
def _distribute_performed(self, performed):
project_id = self._idendity_()
plen = len(project_id)
performed = filter(lambda item: item[0].startswith(project_id),
performed)
performed.sort()
task = None
for item in performed:
path = item[0]
rpath = "root" + path[plen:]
task = self.get_task(rpath)
if not task:
#@ << extract task in activity path >>
#@+node:<< extract task in activity path >>
#@+at
# A performed path can have sub activities appended to the
# task path.
# like:
#
# root.parent1.parent2.task.subactivity
#
# here rhe correct task path is:
#
# root.parent1.parent2.task
#
#@-at
#@@code
orpath = rpath
while not task:
#path can specify a sub module
#find the correct path to the module
try:
last_dot = rpath.rindex(".", 0, len(rpath))
except ValueError:
break
rpath = rpath[:last_dot]
task = self.get_task(rpath)
item = list(item)
item.append(orpath[len(rpath):])
#@nonl
#@-node:<< extract task in activity path >>
#@nl
if not task or task.children:
self._warn("The performance data contain "
"a task with id '%s'. But such "
"a task does not exist in your "
"project." % path)
continue
if not isinstance(task.performed, list):
task.performed = list(task.performed)
task.performed.append(item[1:])
#@nonl
#@-node:_distribute_performed
#@-others
#@-node:class BalancedProject
#@+node:class AdjustedProject
class AdjustedProject(_AllocationPoject):
"""
Generates a project with allocated resources. The tasks are
adjusted to the actual tracking data and balanced to fit the
resources load conditions.
"""
#@ << class AdjustedProject declarations >>
#@+node:<< class AdjustedProject declarations >>
__call_completion__ = 'AdjustedProject(|base_project)'
#@-node:<< class AdjustedProject declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self, base_project):
_AllocationPoject.__init__(self, base_project._function,
base_project.scenario,
base_project.id)
self.balance = base_project.balance
self.has_actual_data = base_project.has_actual_data
self.allocate(base_project)
for t in self:
t._is_frozen = True
t._recalc_properties()
t.check()
self._restore_globals()
#@-node:__init__
#@+node:allocate
def allocate(self, base):
balancing_list = self._get_balancing_list()
scenario = self.scenario
cal = self.calendar
now = cal.now
#for t in balancing_list:
# print t.path
#@ << free the resources, we have to rebook >>
#@+node:<< free the resources, we have to rebook >>
for t in balancing_list:
src = base.get_task(t.path)
if src.end > now or src.complete < 100:
for r in src._iter_booked_resources():
r.unbook_task(src)
#@nonl
#@-node:<< free the resources, we have to rebook >>
#@nl
for t in balancing_list:
src = base.get_task(t.path)
if src.end <= now and src.complete == 100:
#@ << copy the attribs of complete tasks >>
#@+node:<< copy the attribs of complete tasks >>
t.effort = src.effort
t.load = src.load
t.start = src.start
t.end = src.end
t.done = src.done
t.todo = src.todo
t.booked_resource = src.booked_resource
t.performed_resource = src.performed_resource
t._unfreeze("length")
t._unfreeze("duration")
#@nonl
#@-node:<< copy the attribs of complete tasks >>
#@nl
continue
t._compile([], True)
if not t.resource or t.milestone or t.children:
continue
# now allocate the uncomplete tasks
#@ << allocate performed data >>
#@+node:<< allocate performed data >>
try:
t._performed = src._performed
t._allocate_performed(t._performed)
except AttributeError:
pass
#@nonl
#@-node:<< allocate performed data >>
#@nl
allocator = _allocators[t.balance]
if src.start >= now:
#@ << allocate tasks, that have not begun yet >>
#@+node:<< allocate tasks, that have not begun yet >>
min_val = None
min_state = None
for p in range(t.resource._permutation_count()):
state = t._test_allocation(p, allocator)
if not state: continue
to_minimize = state[0]
if not min_val or min_val > to_minimize:
min_val = to_minimize
min_state = state
if min_state:
t._allocate(min_state, allocator)
elif t.performed_start:
t.start = t._to_start(t.performed_start)
t.end = t._to_end(t.performed_end)
#@-node:<< allocate tasks, that have not begun yet >>
#@nl
else:
#@ << allocate tasks, that are allready at work >>
#@+node:<< allocate tasks, that are allready at work >>
if t.__dict__.has_key("effort"):
t.effort = t._to_delta(src.done + src.todo).round()
resource = src.booked_resource or src.performed_resource
state = allocator.test_allocation(t, resource)
if state:
t._allocate(state, allocator)
#@nonl
#@-node:<< allocate tasks, that are allready at work >>
#@nl
self.unfreeze_parents()
#@nonl
#@-node:allocate
#@-others
#@-node:class AdjustedProject
#@-node:Projects
#@-others
"""
Atttribute mit Bedeutung:
calendar
--------
minimum_time_unit |int in minutes|
working_days_per_week |int in days |
working_days_per_month|int in days |
working_days_per_year |int in days |
working_hours_per_day |int in hours |
vacation | [ one_day, (from, to), .. ] |
working_days
now
Task
-----
load
start
end
length
effort
duration
resource
booked_resource
milestone
complete
done
todo
priority
efficiency
buffer
children
depth
index
path
dont_inherit
performed_effort
performed_end
performed_start
sum()
min()
max()
costs()
indent_name()
max_load
copy_src (set: copy all attributes of another task
get: reference of copy)
balance
for gantt
-----
line
accumulate
Resource
----------
efficiency
load
vacation
max_load
"""
#@-node:@file task.py
#@-leo
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nearai/program_synthesis | program_synthesis/karel/dataset/karel_runtime.py | 1 | 9991 | # Code based on https://github.com/alts/karel
#-*- coding: utf-8 -*-
from __future__ import print_function
import collections
import re
import numpy as np
from collections import Counter
#from .hero import Hero
from program_synthesis.karel.dataset.utils import Tcolors
from program_synthesis.karel.dataset.utils import get_rng
def draw2d(array):
print("\n".join(["".join(["#" if val > 0 else "." for val in row]) for row in array]))
def border_mask(array, value):
array[0,:], array[-1,:], array[:,0], array[:,-1] = value, value, value, value
def event_callback_prototype(block_name, block_span, cond_span, cond_value,
selected_span):
'''
block_name: if, ifElse, while, repeat
block_span: (m, n) where
- m: index of IF/IFELSE/WHILE/REPEAT
- n: index of i) e) w) r)
cond_span: (m, n) where
- m: index of first token in condition/repetitions (excluding "c(")
- n: index of last token in condition/repetitions (excluding "c)")
cond_value: True, False, or number
selected_span: (m, n) where
if cond_value is True or number < repetitions
- m: index of i( w( r(
- n: index of i) w) r)
else
- m: block_span[1] or index of e(
- n: block_span[1] or index of e)
'''
raise NotImplementedError
CachedState = collections.namedtuple('CachedState',
('height', 'width', 'hero_pos', 'hero_dir', 'nonwalls'))
class KarelRuntime(object):
HERO_CHARS = u'↑>↓<'
HERO_COMB_CHARS = u'\u0305\u0355\u0332\u0354'
WALL_CHAR = u'█'
OBSTACLE_CHAR = u'░'
EMPTY_CHAR = u' '
# (0, 0) is at bottom left corner; (h, w) is at top right
DIRECTIONS = (
(1, 0), # north
(0, 1), # east
(-1, 0), # south
(0, -1), # west
)
def __init__(self, action_callback=None, event_callback=None):
if action_callback is None:
self.action_callback = lambda *args: None
else:
self.action_callback = action_callback
if event_callback is None:
self.event_callback = lambda *args: None
else:
self.event_callback = event_callback
# Indiator array of size 15 x height x width (4 <= height, width <= 18)
# 1st axis:
# 0: Hero facing North
# 1: Hero facing East
# 2: Hero facing South
# 3: Hero facing West
# 4: Internal walls
# 5: Surrounding walls
# 6: 1 marker
# 7: 2 markers
# 8: 3 markers
# 9: 4 markers
# 10: 5 markers
# 11: 6 markers
# 12: 7 markers
# 13: 8 markers
# 14: 9 markers
# Borders of array have the surrounding walls bit set.
self.world = None
self.hero_pos = None
self.hero_dir = None
self.nonwalls = None
def init_randomly(self, world_size, max_marker_in_cell, wall_ratio,
marker_ratio, rng=None):
rng = get_rng(rng)
height, width = world_size
if height < 2 or width < 2:
raise Exception(" [!] `height` and `width` should be at least 2")
elif height > 16 or width > 16:
raise Exception(" [!] `height` and `width` should be at most 16")
# blank world
self.world = np.zeros((15, height + 2, width + 2), dtype=np.bool)
# internal walls
wall_array = rng.rand(height + 2, width + 2)
self.world[4][wall_array < wall_ratio] = 1
# external wall
border_mask(self.world[5], 1)
# hero
x = rng.randint(1, width)
y = rng.randint(1, height)
self.hero_pos = np.array([y, x])
self.hero_dir = rng.randint(4)
self.world[self.hero_dir, y, x] = 1
# markers
marker_array = rng.rand(height + 2, width + 2)
marker_array = (wall_array >= wall_ratio) & (marker_array < marker_ratio)
border_mask(marker_array, False)
# TODO Allow more than one marker at a given location
self.world[6][marker_array > 0] = 1
# Pad world to 18x18
#self.world = np.pad(self.world, ((0, 0), (0, 18 - self.world.shape[0]),
# (0, 18 - self.world.shape[1])),
# 'constant', 0)
self.compute_nonwalls()
def draw(self, prefix="", skip_number=False, with_color=False, no_print=False):
canvas = np.full(self.world.shape[1:], self.EMPTY_CHAR, dtype='U2')
canvas[self.world[4]] = self.OBSTACLE_CHAR
canvas[self.world[5]] = self.WALL_CHAR
for count, i in enumerate(range(6, 15)):
canvas[self.world[i]] = str(count + 1)
if canvas[tuple(self.hero_pos)] == self.EMPTY_CHAR:
canvas[tuple(self.hero_pos)] = self.hero_char()
else:
canvas[tuple(self.hero_pos)] += self.HERO_COMB_CHARS[self.hero_dir]
texts = []
for i in range(self.world.shape[1] - 1, -1, -1):
text = ''.join(canvas[i])
if not no_print:
print(text)
texts.append(text)
if no_print:
return texts
@property
def state(self):
return self.world
def init_from_array(self, state, cached=None):
if cached:
self.world = state[:, :cached.height, :cached.width]
self.hero_pos = cached.hero_pos.copy()
self.hero_dir = cached.hero_dir
self.nonwalls = cached.nonwalls
return
ys, xs = np.where(state[5])
height, width = ys.max() + 1, xs.max() + 1
self.world = state[:, :height, :width]
pos = list(zip(*np.where(np.any(state[:4], axis=0))))
if len(pos) > 1:
raise ValueError('Invalid state: too many hero positions')
self.hero_pos = np.array(pos[0])
direction, = np.where(np.any(state[:4], axis=(1, 2)))
if len(direction) > 1:
raise ValueError('Invalid state: too many hero directions')
self.hero_dir = direction[0]
self.compute_nonwalls()
def compute_nonwalls(self):
self.nonwalls = np.logical_not(self.world[4:6].any(axis=0))
def cached_state(self):
return CachedState(self.world.shape[1], self.world.shape[2],
self.hero_pos, self.hero_dir, self.nonwalls)
def draw_exception(self, exception):
pass
def hero_char(self):
return self.HERO_CHARS[self.hero_dir]
def move(self, metadata=None):
'''Move'''
if not self.frontIsClear():
retval = False
else:
self.world[self.hero_dir][tuple(self.hero_pos)] = False
self.hero_pos += self.DIRECTIONS[self.hero_dir]
self.world[self.hero_dir][tuple(self.hero_pos)] = True
retval = True
self.action_callback('move', retval, metadata)
return retval
def turn_left(self, metadata=None):
'''Turn left'''
self.world[self.hero_dir][tuple(self.hero_pos)] = False
self.hero_dir -= 1
self.hero_dir %= 4
self.world[self.hero_dir][tuple(self.hero_pos)] = True
self.action_callback('turnLeft', True, metadata)
return True
def turn_right(self, metadata=None):
'''Turn right'''
self.world[self.hero_dir][tuple(self.hero_pos)] = False
self.hero_dir += 1
self.hero_dir %= 4
self.world[self.hero_dir][tuple(self.hero_pos)] = True
self.action_callback('turnRight', True, metadata)
return True
def pick_marker(self, metadata=None):
'''Pick marker'''
marker_info = self.world[6:15, self.hero_pos[0], self.hero_pos[1]]
if marker_info[0]:
marker_info[0] = False
retval = True
elif not np.any(marker_info):
retval = False
else:
marker_info[:] = np.roll(marker_info, shift=-1)
retval = True
self.action_callback('pickMarker', retval, metadata)
return retval
def put_marker(self, metadata=None):
'''Put marker'''
marker_info = self.world[6:15, self.hero_pos[0], self.hero_pos[1]]
if not np.any(marker_info):
marker_info[0] = True
retval = True
elif marker_info[-1]:
retval = False
else:
marker_info[:] = np.roll(marker_info, shift=1)
retval = True
self.action_callback('putMarker', retval, metadata)
return retval
def front_is_clear(self):
'''Check front is clear'''
next_pos = self.hero_pos + self.DIRECTIONS[self.hero_dir]
return self.nonwalls[next_pos[0], next_pos[1]]
def left_is_clear(self):
'''Check left is clear'''
next_pos = self.hero_pos + self.DIRECTIONS[(self.hero_dir - 1) % 4]
return self.nonwalls[next_pos[0], next_pos[1]]
def right_is_clear(self):
'''Check right is clear'''
next_pos = self.hero_pos + self.DIRECTIONS[(self.hero_dir + 1) % 4]
return self.nonwalls[next_pos[0], next_pos[1]]
def markers_present(self):
'''Check markers present'''
return self.world[6:15, self.hero_pos[0], self.hero_pos[1]].any()
def no_markers_present(self):
'''Check no markers present'''
return not self.markers_present()
@property
def facing_north(self):
return self.hero_dir == 0
@property
def facing_south(self):
return self.hero_dir == 2
@property
def facing_west(self):
return self.hero_dir == 3
@property
def facing_east(self):
return self.hero_dir == 1
@property
def facing_idx(self):
return self.hero_dir
frontIsClear = front_is_clear
leftIsClear = left_is_clear
rightIsClear = right_is_clear
markersPresent = markers_present
noMarkersPresent = no_markers_present
turnRight = turn_right
turnLeft = turn_left
pickMarker = pick_marker
putMarker = put_marker
| apache-2.0 |
berkeley-stat222/mousestyles | mousestyles/dynamics/tests/test_dynamics.py | 3 | 9981 | from __future__ import (absolute_import, division,
print_function, unicode_literals)
import pytest
import numpy as np
import pandas as pd
from mousestyles.dynamics import (create_time_matrix,
get_prob_matrix_list,
get_prob_matrix_small_interval,
mcmc_simulation, get_score,
find_best_interval)
def test_creat_time_matrix_input():
# checking functions raise the correct errors for wrong input
# time_gap is zeros
err_string = "time_gap should be nonnegative int or float"
with pytest.raises(ValueError) as excinfo:
create_time_matrix(combined_gap=4, time_gap=0, days_index=137)
assert excinfo.value.args[0] == err_string
# time_gap is negative
with pytest.raises(ValueError) as excinfo:
create_time_matrix(combined_gap=4, time_gap=-1, days_index=137)
assert excinfo.value.args[0] == err_string
# combined_ap is negative value
err_string = "combined_gap should be nonnegative int or float"
with pytest.raises(ValueError) as excinfo:
create_time_matrix(combined_gap=-1, time_gap=1, days_index=137)
assert excinfo.value.args[0] == err_string
# min_path_length cannot be floating number
# days_index is negative value
with pytest.raises(ValueError) as excinfo:
create_time_matrix(combined_gap=4, time_gap=1, days_index=-1)
assert excinfo.value.args[0] == "days_index should be nonnegative int"
# days_index is float value
with pytest.raises(ValueError) as excinfo:
create_time_matrix(combined_gap=4, time_gap=1, days_index=0.1)
assert excinfo.value.args[0] == "days_index should be nonnegative int"
def test_creat_time_matrix():
# Checking functions output the correct time matrix
matrix = create_time_matrix(combined_gap=4, time_gap=1, days_index=0)
assert matrix.iloc[0, 2181] == 1.0
def test_get_prob_matrix_list_input():
# checking functions raise the correct errors for wrong input
# time_df is not DataFrame
with pytest.raises(ValueError) as excinfo:
get_prob_matrix_list(time_df=5, interval_length=1000)
assert excinfo.value.args[0] == "time_df should be pandas DataFrame"
# interval_length is 0
row_i = np.hstack((np.zeros(13), np.ones(10),
np.ones(10) * 2, np.ones(10) * 3))
time_df_eg = np.vstack((row_i, row_i, row_i))
time_df_eg = pd.DataFrame(time_df_eg)
with pytest.raises(ValueError) as excinfo:
get_prob_matrix_list(time_df=time_df_eg, interval_length=0)
assert excinfo.value.args[0] == "interval_length should be positive int"
# interval_length is not int
with pytest.raises(ValueError) as excinfo:
get_prob_matrix_list(time_df=time_df_eg, interval_length=0.5)
assert excinfo.value.args[0] == "interval_length should be positive int"
def test_get_prob_matrix_list():
# Checking functions output the correct matrix list
row_i = np.hstack((np.zeros(13), np.ones(10),
np.ones(10) * 2, np.ones(10) * 3))
time_df_eg = np.vstack((row_i, row_i, row_i))
time_df_eg = pd.DataFrame(time_df_eg)
mat_list = get_prob_matrix_list(time_df_eg,
interval_length=10)
assert mat_list[0][0, 0] == 1.
assert sum(sum(mat_list[0])) == 1.
def test_get_prob_matrix_small_interval_input():
# checking functions raise the correct errors for wrong input
# string_list is not list
with pytest.raises(ValueError) as excinfo:
get_prob_matrix_small_interval(string_list=np.array([1, 2]))
assert excinfo.value.args[0] == "string_list should be a list"
# items in string_list is not string
time_list = [0, 1, 2]
with pytest.raises(ValueError) as excinfo:
get_prob_matrix_small_interval(string_list=time_list)
assert excinfo.value.args[0] == "items in string_list should be str"
def test_get_prob_matrix_small_interval():
# Checking functions output the correct matrix
time_list = ['002', '001', '012']
example = get_prob_matrix_small_interval(time_list)
assert example[0, 0] == 0.4
assert example[0, 1] == 0.4
assert example[0, 2] == 0.2
assert example[1, 2] == 1.
assert sum(example[0, :]) == 1.
def test_mcmc_simulation_input():
# checking functions raise the correct errors for wrong input
# mat_list is not list
with pytest.raises(ValueError) as excinfo:
mcmc_simulation(mat_list=np.array([1, 2]), n_per_int=10)
assert excinfo.value.args[0] == "mat_list should be a list"
# items in mat_list is not string
time_list = [0, 1, 2]
with pytest.raises(ValueError) as excinfo:
mcmc_simulation(mat_list=time_list, n_per_int=10)
assert excinfo.value.args[0] == "items in mat_list should be numpy array"
# n_per_int is not integer
mat0 = np.zeros(16).reshape(4, 4)
np.fill_diagonal(mat0, val=1)
mat1 = np.zeros(16).reshape(4, 4)
mat1[0, 1] = 1
mat1[1, 0] = 1
mat1[2, 2] = 1
mat1[3, 3] = 1
mat_list_example = [mat0, mat1]
with pytest.raises(ValueError) as excinfo:
mcmc_simulation(mat_list=mat_list_example, n_per_int=0.5)
assert excinfo.value.args[0] == "n_per_int should be positive int"
# n_per_int negative integer
with pytest.raises(ValueError) as excinfo:
mcmc_simulation(mat_list=mat_list_example, n_per_int=-1)
assert excinfo.value.args[0] == "n_per_int should be positive int"
def test_mcmc_simulation():
# Checking functions output the array
mat0 = np.zeros(16).reshape(4, 4)
np.fill_diagonal(mat0, val=1)
mat1 = np.zeros(16).reshape(4, 4)
mat1[0, 1] = 1
mat1[1, 0] = 1
mat1[2, 2] = 1
mat1[3, 3] = 1
mat_list_example = [mat0, mat1]
example = mcmc_simulation(mat_list_example, 10)
assert sum(example[:10]) == 0.
assert sum(example[10:]) == 5.
assert example[10] == 1.
assert example[11] == 0.
def test_get_score_input():
# checking functions raise the correct errors for wrong input
# true_day is not numpy.array
with pytest.raises(ValueError) as excinfo:
get_score(true_day=0, simulated_day=np.zeros(13))
assert excinfo.value.args[0] == "true_day should be numpy array"
# simulated_day is not numpy.array
with pytest.raises(ValueError) as excinfo:
get_score(true_day=np.zeros(13), simulated_day=0)
assert excinfo.value.args[0] == "simulated_day should be numpy array"
# weight should be list
with pytest.raises(ValueError) as excinfo:
get_score(true_day=np.zeros(13), simulated_day=np.zeros(13),
weight=0)
assert excinfo.value.args[0] == "weight should be list"
# length of weight should be exactly 4
with pytest.raises(ValueError) as excinfo:
get_score(true_day=np.zeros(13), simulated_day=np.zeros(13),
weight=[0])
assert excinfo.value.args[0] == "Length of weight should be 4"
# check lengths of true_day and simulated_day
with pytest.raises(ValueError) as excinfo:
get_score(true_day=np.zeros(13), simulated_day=np.zeros(5))
error_message = "Length of simulated_day is smaller than true_day"
assert excinfo.value.args[0] == error_message
# check all the weights are positive
with pytest.raises(ValueError) as excinfo:
get_score(true_day=np.zeros(13), simulated_day=np.zeros(13),
weight=[-1, 2, 3, 4])
assert excinfo.value.args[0] == "All the weights should be positive"
def test_get_score():
# Checking functions output the correct score
true_day_1 = np.zeros(13)
simulated_day_1 = np.ones(13)
score_1 = get_score(true_day_1, simulated_day_1)
true_day_2 = np.ones(13)
simulated_day_2 = np.ones(13)
score_2 = get_score(true_day_2, simulated_day_2)
assert score_1 == 0.0
assert score_2 == 10.0
def test_find_best_interval_input():
# checking functions raise the correct errors for wrong input
# time_df is not DataFrame
with pytest.raises(ValueError) as excinfo:
find_best_interval(df=5, strain_num=2)
assert excinfo.value.args[0] == "df should be pandas DataFrame"
# strain_num is not integer in 0,1,2
row_i = np.hstack((np.zeros(13), np.ones(10),
np.ones(10) * 2, np.ones(10) * 3))
time_df_eg = np.vstack((row_i, row_i, row_i))
time_df_eg = pd.DataFrame(time_df_eg)
time_df_eg.rename(columns={0: 'strain'}, inplace=True)
with pytest.raises(ValueError) as excinfo:
find_best_interval(df=time_df_eg, strain_num=3)
assert excinfo.value.args[0] == "strain_num can only be 0, 1, 2"
# interval_length_initial is a numpy array with positive integers
with pytest.raises(ValueError) as excinfo:
find_best_interval(df=time_df_eg, strain_num=0,
interval_length_initial=3)
assert excinfo.value.args[0] == "interval_length_initial positive np.array"
with pytest.raises(ValueError) as excinfo:
find_best_interval(df=time_df_eg, strain_num=0,
interval_length_initial=np.array([1, 2, -1]))
assert excinfo.value.args[0] == "interval_length_initial positive np.array"
with pytest.raises(ValueError) as excinfo:
find_best_interval(df=time_df_eg, strain_num=0,
interval_length_initial=np.array([1, 2, 3.1]))
assert excinfo.value.args[0] == "interval_length_initial positive np.array"
def test_find_best_interval():
row_i = np.hstack((np.zeros(40)))
time_df_eg = np.vstack((row_i, row_i, row_i))
time_df_eg = pd.DataFrame(time_df_eg)
time_df_eg.rename(columns={0: 'strain'}, inplace=True)
time, fake, score = find_best_interval(time_df_eg, 0,
np.arange(10, 40, 10))
assert time == 10
assert np.array_equal(fake, np.zeros(40))
assert 1 - score < 0.05
| bsd-2-clause |
haeusser/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/bijectors/conditional_bijector_test.py | 22 | 2116 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ConditionalBijector Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops.bijectors import conditional_bijector
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
class _TestBijector(conditional_bijector.ConditionalBijector):
def __init__(self):
super(_TestBijector, self).__init__(
event_ndims=0,
graph_parents=[],
is_constant_jacobian=True,
validate_args=False,
dtype=dtypes.float32,
name="test_bijector")
def _forward(self, _, arg1, arg2):
raise ValueError("forward", arg1, arg2)
def _inverse(self, _, arg1, arg2):
raise ValueError("inverse", arg1, arg2)
def _inverse_log_det_jacobian(self, _, arg1, arg2):
raise ValueError("inverse_log_det_jacobian", arg1, arg2)
def _forward_log_det_jacobian(self, _, arg1, arg2):
raise ValueError("forward_log_det_jacobian", arg1, arg2)
class ConditionalBijectorTest(test.TestCase):
def testConditionalBijector(self):
b = _TestBijector()
for name in ["forward", "inverse", "inverse_log_det_jacobian",
"forward_log_det_jacobian"]:
method = getattr(b, name)
with self.assertRaisesRegexp(ValueError, name + ".*b1.*b2"):
method(1.0, arg1="b1", arg2="b2")
if __name__ == "__main__":
test.main()
| apache-2.0 |
ProfessorKaos64/openlierox | tools/DedicatedServerVideo/gdata/opensearch/data.py | 131 | 1534 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the data classes of the OpenSearch Extension"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import atom.core
OPENSEARCH_TEMPLATE_V1 = '{http://a9.com/-/spec/opensearchrss/1.0//}%s'
OPENSEARCH_TEMPLATE_V2 = '{http://a9.com/-/spec/opensearch/1.1//}%s'
class ItemsPerPage(atom.core.XmlElement):
"""Describes the number of items that will be returned per page for paged feeds"""
_qname = (OPENSEARCH_TEMPLATE_V1 % 'itemsPerPage',
OPENSEARCH_TEMPLATE_V2 % 'itemsPerPage')
class StartIndex(atom.core.XmlElement):
"""Describes the starting index of the contained entries for paged feeds"""
_qname = (OPENSEARCH_TEMPLATE_V1 % 'startIndex',
OPENSEARCH_TEMPLATE_V2 % 'startIndex')
class TotalResults(atom.core.XmlElement):
"""Describes the total number of results associated with this feed"""
_qname = (OPENSEARCH_TEMPLATE_V1 % 'totalResults',
OPENSEARCH_TEMPLATE_V2 % 'totalResults')
| lgpl-2.1 |
simonwydooghe/ansible | test/units/modules/network/slxos/test_slxos_interface.py | 38 | 4938 | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from units.compat.mock import patch
from units.modules.utils import set_module_args
from ansible.modules.network.slxos import slxos_interface
from .slxos_module import TestSlxosModule, load_fixture
class TestSlxosInterfaceModule(TestSlxosModule):
module = slxos_interface
def setUp(self):
super(TestSlxosInterfaceModule, self).setUp()
self._patch_get_config = patch(
'ansible.modules.network.slxos.slxos_interface.get_config'
)
self._patch_load_config = patch(
'ansible.modules.network.slxos.slxos_interface.load_config'
)
self._patch_exec_command = patch(
'ansible.modules.network.slxos.slxos_interface.exec_command'
)
self._get_config = self._patch_get_config.start()
self._load_config = self._patch_load_config.start()
self._exec_command = self._patch_exec_command.start()
def tearDown(self):
super(TestSlxosInterfaceModule, self).tearDown()
self._patch_get_config.stop()
self._patch_load_config.stop()
self._patch_exec_command.stop()
def load_fixtures(self, commands=None):
config_file = 'slxos_config_config.cfg'
self._get_config.return_value = load_fixture(config_file)
self._load_config.return_value = None
def test_slxos_interface_description(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
description='show version'
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface Ethernet 0/2',
'description show version'
],
'changed': True
}
)
def test_slxos_interface_speed(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
speed=1000
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface Ethernet 0/2',
'speed 1000'
],
'changed': True
}
)
def test_slxos_interface_mtu(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
mtu=1548
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface Ethernet 0/2',
'mtu 1548'
],
'changed': True
}
)
def test_slxos_interface_mtu_out_of_range(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
mtu=15000
))
result = self.execute_module(failed=True)
self.assertEqual(
result,
{
'msg': 'mtu must be between 1548 and 9216',
'failed': True
}
)
def test_slxos_interface_enabled(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/1',
enabled=True
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface Ethernet 0/1',
'no shutdown'
],
'changed': True
}
)
def test_slxos_interface_invalid_argument(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/1',
shawshank='Redemption'
))
result = self.execute_module(failed=True)
self.assertEqual(result['failed'], True)
self.assertTrue(re.match(
r'Unsupported parameters for \((basic.py|basic.pyc)\) module: '
'shawshank Supported parameters include: aggregate, '
'delay, description, enabled, mtu, name, neighbors, '
'rx_rate, speed, state, tx_rate',
result['msg']
))
| gpl-3.0 |
ChrisEby/M101P-MongoDbForDevelopers | Week 3/hw3-2and3-3/userDAO.py | 32 | 2585 |
#
# Copyright (c) 2008 - 2013 10gen, Inc. <http://10gen.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import hmac
import random
import string
import hashlib
import pymongo
# The User Data Access Object handles all interactions with the User collection.
class UserDAO:
def __init__(self, db):
self.db = db
self.users = self.db.users
self.SECRET = 'verysecret'
# makes a little salt
def make_salt(self):
salt = ""
for i in range(5):
salt = salt + random.choice(string.ascii_letters)
return salt
# implement the function make_pw_hash(name, pw) that returns a hashed password
# of the format:
# HASH(pw + salt),salt
# use sha256
def make_pw_hash(self, pw,salt=None):
if salt == None:
salt = self.make_salt();
return hashlib.sha256(pw + salt).hexdigest()+","+ salt
# Validates a user login. Returns user record or None
def validate_login(self, username, password):
user = None
try:
user = self.users.find_one({'_id': username})
except:
print "Unable to query database for user"
if user is None:
print "User not in database"
return None
salt = user['password'].split(',')[1]
if user['password'] != self.make_pw_hash(password, salt):
print "user password is not a match"
return None
# Looks good
return user
# creates a new user in the users collection
def add_user(self, username, password, email):
password_hash = self.make_pw_hash(password)
user = {'_id': username, 'password': password_hash}
if email != "":
user['email'] = email
try:
self.users.insert(user, safe=True)
except pymongo.errors.OperationFailure:
print "oops, mongo error"
return False
except pymongo.errors.DuplicateKeyError as e:
print "oops, username is already taken"
return False
return True
| mit |
jiayliu/apprtc | src/third_party/oauth2client/util.py | 174 | 5670 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Common utility library."""
__author__ = ['rafek@google.com (Rafe Kaplan)',
'guido@google.com (Guido van Rossum)',
]
__all__ = [
'positional',
'POSITIONAL_WARNING',
'POSITIONAL_EXCEPTION',
'POSITIONAL_IGNORE',
]
import inspect
import logging
import types
import urllib
import urlparse
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
logger = logging.getLogger(__name__)
POSITIONAL_WARNING = 'WARNING'
POSITIONAL_EXCEPTION = 'EXCEPTION'
POSITIONAL_IGNORE = 'IGNORE'
POSITIONAL_SET = frozenset([POSITIONAL_WARNING, POSITIONAL_EXCEPTION,
POSITIONAL_IGNORE])
positional_parameters_enforcement = POSITIONAL_WARNING
def positional(max_positional_args):
"""A decorator to declare that only the first N arguments my be positional.
This decorator makes it easy to support Python 3 style key-word only
parameters. For example, in Python 3 it is possible to write:
def fn(pos1, *, kwonly1=None, kwonly1=None):
...
All named parameters after * must be a keyword:
fn(10, 'kw1', 'kw2') # Raises exception.
fn(10, kwonly1='kw1') # Ok.
Example:
To define a function like above, do:
@positional(1)
def fn(pos1, kwonly1=None, kwonly2=None):
...
If no default value is provided to a keyword argument, it becomes a required
keyword argument:
@positional(0)
def fn(required_kw):
...
This must be called with the keyword parameter:
fn() # Raises exception.
fn(10) # Raises exception.
fn(required_kw=10) # Ok.
When defining instance or class methods always remember to account for
'self' and 'cls':
class MyClass(object):
@positional(2)
def my_method(self, pos1, kwonly1=None):
...
@classmethod
@positional(2)
def my_method(cls, pos1, kwonly1=None):
...
The positional decorator behavior is controlled by
util.positional_parameters_enforcement, which may be set to
POSITIONAL_EXCEPTION, POSITIONAL_WARNING or POSITIONAL_IGNORE to raise an
exception, log a warning, or do nothing, respectively, if a declaration is
violated.
Args:
max_positional_arguments: Maximum number of positional arguments. All
parameters after the this index must be keyword only.
Returns:
A decorator that prevents using arguments after max_positional_args from
being used as positional parameters.
Raises:
TypeError if a key-word only argument is provided as a positional
parameter, but only if util.positional_parameters_enforcement is set to
POSITIONAL_EXCEPTION.
"""
def positional_decorator(wrapped):
def positional_wrapper(*args, **kwargs):
if len(args) > max_positional_args:
plural_s = ''
if max_positional_args != 1:
plural_s = 's'
message = '%s() takes at most %d positional argument%s (%d given)' % (
wrapped.__name__, max_positional_args, plural_s, len(args))
if positional_parameters_enforcement == POSITIONAL_EXCEPTION:
raise TypeError(message)
elif positional_parameters_enforcement == POSITIONAL_WARNING:
logger.warning(message)
else: # IGNORE
pass
return wrapped(*args, **kwargs)
return positional_wrapper
if isinstance(max_positional_args, (int, long)):
return positional_decorator
else:
args, _, _, defaults = inspect.getargspec(max_positional_args)
return positional(len(args) - len(defaults))(max_positional_args)
def scopes_to_string(scopes):
"""Converts scope value to a string.
If scopes is a string then it is simply passed through. If scopes is an
iterable then a string is returned that is all the individual scopes
concatenated with spaces.
Args:
scopes: string or iterable of strings, the scopes.
Returns:
The scopes formatted as a single string.
"""
if isinstance(scopes, types.StringTypes):
return scopes
else:
return ' '.join(scopes)
def dict_to_tuple_key(dictionary):
"""Converts a dictionary to a tuple that can be used as an immutable key.
The resulting key is always sorted so that logically equivalent dictionaries
always produce an identical tuple for a key.
Args:
dictionary: the dictionary to use as the key.
Returns:
A tuple representing the dictionary in it's naturally sorted ordering.
"""
return tuple(sorted(dictionary.items()))
def _add_query_parameter(url, name, value):
"""Adds a query parameter to a url.
Replaces the current value if it already exists in the URL.
Args:
url: string, url to add the query parameter to.
name: string, query parameter name.
value: string, query parameter value.
Returns:
Updated query parameter. Does not update the url if value is None.
"""
if value is None:
return url
else:
parsed = list(urlparse.urlparse(url))
q = dict(parse_qsl(parsed[4]))
q[name] = value
parsed[4] = urllib.urlencode(q)
return urlparse.urlunparse(parsed)
| bsd-3-clause |
jpzm/clads | bind/clads/list.py | 1 | 3747 | # Copyright (C) 2012 Joao Paulo de Souza Medeiros
#
# Author(s): Joao Paulo de Souza Medeiros <ignotus21@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
"""
import clads_list
class List(object):
"""
"""
def __init__(self, cobj=None):
"""
"""
if cobj == None:
self.__cobj = clads_list.initialize()
else:
self.__cobj = clads_list.initialize(cobj)
self.set_compare_callback(self.__bind_compare_callback)
def __len__(self):
"""
"""
return clads_list.size(self.__cobj)
def __iter__(self):
"""
"""
return self
def next(self):
"""
"""
n = clads_list.next(self.__cobj)
if n is None:
raise StopIteration
return n
def get_cobj(self):
"""
"""
return self.__cobj
def insert(self, value):
"""
"""
return clads_list.insert(self.__cobj, value)
def remove(self, value):
"""
"""
return clads_list.remove(self.__cobj, value, 'F')
def removeall(self, value):
"""
"""
return clads_list.remove(self.__cobj, value, 'T')
def set_compare_callback(self, f_compare):
"""
"""
clads_list.set_compare_callback(self.__cobj, f_compare)
@staticmethod
def __bind_compare_callback(a, b):
"""
"""
if (a < b):
return -1
if (a == b):
return 0
if (a > b):
return 1
return 2
if __name__ == "__main__":
"""
"""
# testing with integer values
print "[do] Number testing..."
ln = List()
print "insert:"
ln.insert(4)
ln.insert(2)
ln.insert(2)
ln.insert(3)
ln.insert(4)
for i in ln: print i, type(i)
print "size", len(ln)
print "remove 4:"
ln.remove(4)
for i in ln: print i
print "insert 4:"
ln.insert(4)
for i in ln: print i
print "removeall 4:"
ln.removeall(4)
for i in ln: print i
print "removeall 3:"
ln.removeall(3)
for i in ln: print i
print "removeall 2:"
ln.removeall(2)
for i in ln: print i
print "size", len(ln)
print "[done]"
# testing with string values
print "[do] String testing..."
ls = List()
print "insert:"
ls.insert("john")
ls.insert("mary")
ls.insert("luca")
ls.insert("john")
ls.insert("lucy")
for i in ls: print i, type(i)
print "size", len(ls)
print "remove `john':"
ls.remove("john")
for i in ls: print i
print "insert `john':"
ls.insert("john")
for i in ls: print i
print "removeall `john':"
ls.removeall("john")
for i in ls: print i
print "removeall `lucy':"
ls.removeall("lucy")
for i in ls: print i
print "removeall `luca':"
ls.removeall("luca")
for i in ls: print i
print "remove `mary':"
ls.remove("mary")
for i in ls: print i
print "size", len(ls)
print "[done]"
| gpl-2.0 |
tarthy6/dozer-thesis | scripts/test-OLD/gts-operators.py | 4 | 1403 | """ This file shows 2 ways to fill union of triangulated surfaces:
You can either use union of 2 inGtsSurface predicates or
create union of surfaces using GTS calls first and use a single
isGtsSurface as predicate with the united surface.
The disadvantage of the predicate union | is that each sphere must fit whole in one
surface or another: with padding, several points on the sphere are tested. Therefore,
areas near both surfaces' boundary will not be filled at all.
Note that GTS only moves references to surfaces around, therefore e.g. translating
surface that is part of the union will move also the part of the united surface.
Therefore, we use the copy() method for deep copy here.
"""
from woo import pack,qt
import gts
s1=gts.read(open('horse.coarse.gts'))
s2=gts.Surface(); s2.copy(s1); s2.translate(0.04,0,0)
O.bodies.append(pack.gtsSurface2Facets(s1,color=(0,1,0))+pack.gtsSurface2Facets(s2,color=(1,0,0)))
s12=gts.Surface(); s12.copy(s1.union(s2)); s12.translate(0,0,.1)
radius=0.002
O.bodies.append(pack.gtsSurface2Facets(s12,color=(0,0,1)))
qt.View()
from time import time
t0=time()
O.bodies.append(pack.regularHexa(pack.inGtsSurface(s1) | pack.inGtsSurface(s2),radius,gap=0,color=(0,1,0)))
t1=time()
print 'Using predicate union: %gs'%(t1-t0)
O.bodies.append(pack.regularHexa(pack.inGtsSurface(s12),radius,gap=0.,color=(1,0,0)))
t2=time()
print 'Using surface union: %gs'%(t2-t1)
| gpl-2.0 |
sgso/RIOT | dist/tools/sniffer/rftestrx2pcap.py | 17 | 3928 | #!/usr/bin/env python2
'''
(C) 2012, Mariano Alvira <mar@devl.org>
(C) 2014, Oliver Hahm <oliver.hahm@inria.fr>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the Institute nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
'''
import sys,os,time
from struct import pack
import re
import serial
if len(sys.argv) < 3:
sys.stderr.write( "Usage: %s tty channel [outfile]\n" %(sys.argv[0]))
sys.stderr.write( " channel = 11-26\n")
sys.exit(2)
# change the channel
try:
serport = serial.Serial(sys.argv[1], baudrate=115200, dsrdtr=0, rtscts=0, timeout=1)
serport.setDTR(0)
serport.setRTS(0)
except IOError:
print "error opening port"
sys.exit(2)
time.sleep(1)
chanstr = ''
sys.stderr.write('chan %s\n' % sys.argv[2])
serport.write('chan %s\n' % sys.argv[2])
while 1:
c = serport.read(1)
if (c == '\n'):
chanstr = ''
continue
chanstr += c
m = re.match(".*channel: (\w+)", chanstr)
if m:
chan = int(m.group(1))
sys.stderr.write(chanstr + '\n')
break
try:
sys.stderr.write('writing to file %s \n' % (sys.argv[3]))
outfile = open(sys.argv[3], 'w+b')
except IndexError:
outfile = sys.stdout
sys.stderr.write("RX: 0\r")
### PCAP setup
MAGIC = 0xa1b2c3d4
MAJOR = 2
MINOR = 4
ZONE = 0
SIG = 0
SNAPLEN = 0xffff
NETWORK = 230 # 802.15.4 no FCS
# output overall PCAP header
outfile.write(pack('<LHHLLLL', MAGIC, MAJOR, MINOR, ZONE, SIG, SNAPLEN, NETWORK))
count = 0
fileempty = 1
newpacket = 0
try:
while 1:
line = serport.readline().rstrip()
m_rftestline = re.match(".*rftest-rx --- len 0x(\w\w).*", line)
if m_rftestline:
newpacket = 1
t = time.time()
sec = int(t)
usec = (t - sec) * 100000
length = int(m_rftestline.group(1), 16)
continue
# if this is a new packet, add a packet header
if newpacket == 1:
newpacket = 0
outfile.write(pack('<LLLL',sec,usec,length,length))
outfile.flush()
count += 1
sys.stderr.write("RX: %d\r" % count)
# clear file empty flag
if fileempty:
fileempty = 0
if fileempty == 0 :
# write payload
for d in line.split(' '):
# do a match because their might be a \r floating around
m = re.match('.*(\w\w).*', d)
if m:
outfile.write(pack('<B', int(m.group(1),16)))
outfile.flush()
except KeyboardInterrupt:
# cn.close()
sys.exit(2)
| lgpl-2.1 |
MounirMesselmeni/django | tests/auth_tests/test_handlers.py | 328 | 2868 | from __future__ import unicode_literals
from django.contrib.auth.handlers.modwsgi import (
check_password, groups_for_user,
)
from django.contrib.auth.models import Group, User
from django.contrib.auth.tests.custom_user import CustomUser
from django.test import TransactionTestCase, override_settings
# This must be a TransactionTestCase because the WSGI auth handler performs
# its own transaction management.
class ModWsgiHandlerTestCase(TransactionTestCase):
"""
Tests for the mod_wsgi authentication handler
"""
available_apps = [
'django.contrib.auth',
'django.contrib.contenttypes',
]
def test_check_password(self):
"""
Verify that check_password returns the correct values as per
http://code.google.com/p/modwsgi/wiki/AccessControlMechanisms#Apache_Authentication_Provider
"""
User.objects.create_user('test', 'test@example.com', 'test')
# User not in database
self.assertIsNone(check_password({}, 'unknown', ''))
# Valid user with correct password
self.assertTrue(check_password({}, 'test', 'test'))
# correct password, but user is inactive
User.objects.filter(username='test').update(is_active=False)
self.assertFalse(check_password({}, 'test', 'test'))
# Valid user with incorrect password
self.assertFalse(check_password({}, 'test', 'incorrect'))
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_check_password_custom_user(self):
"""
Verify that check_password returns the correct values as per
http://code.google.com/p/modwsgi/wiki/AccessControlMechanisms#Apache_Authentication_Provider
with custom user installed
"""
CustomUser._default_manager.create_user('test@example.com', '1990-01-01', 'test')
# User not in database
self.assertIsNone(check_password({}, 'unknown', ''))
# Valid user with correct password'
self.assertTrue(check_password({}, 'test@example.com', 'test'))
# Valid user with incorrect password
self.assertFalse(check_password({}, 'test@example.com', 'incorrect'))
def test_groups_for_user(self):
"""
Check that groups_for_user returns correct values as per
http://code.google.com/p/modwsgi/wiki/AccessControlMechanisms#Apache_Group_Authorisation
"""
user1 = User.objects.create_user('test', 'test@example.com', 'test')
User.objects.create_user('test1', 'test1@example.com', 'test1')
group = Group.objects.create(name='test_group')
user1.groups.add(group)
# User not in database
self.assertEqual(groups_for_user({}, 'unknown'), [])
self.assertEqual(groups_for_user({}, 'test'), [b'test_group'])
self.assertEqual(groups_for_user({}, 'test1'), [])
| bsd-3-clause |
chazmead/django-clubhouse | clubhouse/core/models/__init__.py | 1 | 6252 | # vim: ai ts=4 sts=4 et sw=4
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import inspect
import six
from django.db import models
from django.conf import settings
from django.db.models.base import ModelBase
from django.db.models.fields import AutoField
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.apps import apps
from django.utils.text import camel_case_to_spaces
from django.utils.translation import override, string_concat,\
ugettext as _, ungettext
from django.utils import timezone
from django.conf import settings
from mezzanine.pages.models import Page
from clubhouse.utils.models import ensure_model
__all__ = ['BlockBase','BlockContext','ReusableBlock','AbstractModularPage']
try:
User = settings.AUTH_USER_MODEL
except AttributeError:
from django.contrib.auth.models import User
def get_page_types():
qry = None
for m in Page.get_content_models():
q = models.Q(app_label=m._meta.app_label, model=m._meta.model_name)
qry = qry | q if qry else q
return qry
class ReusableManager(models.Manager):
def get_queryset(self):
return super(ReusableManager,self).get_queryset().filter(can_reuse=True)
class BlockBase(models.Model):
title = models.CharField(max_length=255)
date_created = models.DateTimeField(auto_now_add=True)
user_created = models.ForeignKey(User, null=True, blank=True, editable=False,
related_name='%(app_label)s_%(class)s_blocks_created')
last_updated = models.DateTimeField(auto_now=True)
user_updated = models.ForeignKey(User, null=True, blank=True, editable=False,
related_name='%(app_label)s_%(class)s_blocks_updated')
block_contexts = ()
class Meta:
abstract = True
def __unicode__(self):
return unicode(self.title)
def get_template(self):
return 'blocks/%s.html' % self._meta.model_name
class PageBlockManager(models.Manager):
def all(self):
q = models.Q(block_type=None) | models.Q(block_id=None)
return self.get_queryset().exclude(q)
class BlockContextMetaBase(object):
"""
Base class for BlockContext.Meta classes. defines ordering and
index_together for parent_type, parent_id
"""
ordering = ('order',)
index_together = (('parent_type','parent_id'),)
class BlockContext(models.Model):
block_type = models.ForeignKey(ContentType, null=True, blank=True,
on_delete=models.CASCADE,
related_name='%(app_label)s_%(class)s_block')
block_id = models.PositiveIntegerField(null=True,blank=True)
block_object = GenericForeignKey('block_type', 'block_id')
parent_type = models.ForeignKey(ContentType, null=True, blank=True,
on_delete=models.SET_NULL, related_name='%(app_label)s_%(class)s_parent')
parent_id = models.PositiveIntegerField(null=True,blank=True)
parent_object = GenericForeignKey('parent_type', 'parent_id')
order = models.PositiveIntegerField(default = 0)
additional_classes = models.CharField(max_length=255, null=True, blank=True,
help_text='Space separated list of additional css classes')
objects = PageBlockManager()
block_models = tuple()
class Meta(BlockContextMetaBase):
abstract = True
@classmethod
def get_block_models(cls):
"""
Default method of getting the block models for this type.
Can override this method with other methods, but must return an
iterable of model classes.
"""
yielded = []
for block in cls.block_models:
try:
block = ensure_model(block)
info = "%s.%s" % (block._meta.app_label, block._meta.model_name)
if info in yielded:
continue
yield block
yielded.append(info)
except ValueError:
if settings.DEBUG:
# Help figure out why blocks arn't displayed
raise
pass
for model in apps.get_models():
try:
contexts = []
for c in model.block_contexts:
try:
contexts.append(ensure_model(c))
except ValueError:
if settings.DEBUG:
# Help figure out why blocks arn't displayed
raise
pass
if issubclass(model, BlockBase) and cls in contexts:
info = "%s.%s" % (model._meta.app_label,model._meta\
.model_name)
if info in yielded:
continue
yield model
yielded.append(info)
except AttributeError:
pass
def delete(self, *args, **kwargs):
"""
Remove the block object with the relationship
"""
try:
self.block_object.delete()
except:
# Could not delete for some reason.. ignore it.
pass
return super(BlockContext, self).delete(*args, **kwargs)
def __unicode__(self):
return unicode("%s : %s" % (self.block_type, self.block_object))
@property
def classes(self):
return "%s %s" % (
self.block_object._meta.model_name,
self.additional_classes,
)
@classmethod
def get_template(cls):
return '%s.html' % cls._meta.model_name
class ReusableBlock(BlockBase):
can_reuse = models.BooleanField(default=False)
objects = models.Manager()
reusable = ReusableManager()
class Meta:
abstract = True
class AbstractModularComponent(models.Model):
class Meta:
abstract = True
def get_blocks_by_context(self, context):
ct = ContentType.objects.get_for_model(self.__class__)
return context.objects.filter(parent_type=ct, parent_id=self.pk)\
.order_by('order')
class AbstractModularPage(Page, AbstractModularComponent):
class Meta:
abstract = True
| bsd-2-clause |
kohnle-lernmodule/exe201based | twisted/pb/referenceable.py | 14 | 26828 | # -*- test-case-name: twisted.pb.test.test_sturdyref -*-
# this module is responsible for sending and receiving OnlyReferenceable and
# Referenceable (callable) objects. All details of actually invoking methods
# live in call.py
import weakref
from zope.interface import interface
from zope.interface import implements, providedBy
from twisted.python.components import registerAdapter
Interface = interface.Interface
from twisted.internet import defer, error
from twisted.python import failure, log
from twisted.pb import ipb, schema, slicer, tokens, call
BananaError = tokens.BananaError
Violation = tokens.Violation
from twisted.pb.remoteinterface import getRemoteInterface, getRemoteInterfaceByName
from twisted.pb.copyable import Copyable, RemoteCopy
class OnlyReferenceable(object):
implements(ipb.IReferenceable)
def processUniqueID(self):
return id(self)
class Referenceable(OnlyReferenceable):
implements(ipb.IReferenceable, ipb.IRemotelyCallable)
_interface = None
_interfaceName = None
# TODO: this code wants to be in an adapter, not a base class. Also, it
# would be nice to cache this across the class: if every instance has the
# same interfaces, they will have the same values of _interface and
# _interfaceName, and it feels silly to store this data separately for
# each instance. Perhaps we could compare the instance's interface list
# with that of the class and only recompute this stuff if they differ.
def getInterface(self):
if not self._interface:
self._interface = getRemoteInterface(self)
if self._interface:
self._interfaceName = self._interface.__remote_name__
else:
self._interfaceName = None
return self._interface
def getInterfaceName(self):
self.getInterface()
return self._interfaceName
def doRemoteCall(self, methodname, kwargs):
meth = getattr(self, "remote_%s" % methodname)
res = meth(**kwargs)
return res
class ReferenceableTracker:
"""I hold the data which tracks a local Referenceable that is in used by
a remote Broker.
@ivar obj: the actual object
@ivar refcount: the number of times this reference has been sent to the
remote end, minus the number of DECREF messages which it
has sent back. When it goes to zero, the remote end has
forgotten the RemoteReference, and is prepared to forget
the RemoteReferenceData as soon as the DECREF message is
acknowledged.
@ivar clid: the connection-local ID used to represent this object on the
wire.
"""
def __init__(self, tub, obj, puid, clid):
self.tub = tub
self.obj = obj
self.clid = clid
self.puid = puid
self.refcount = 0
def send(self):
"""Increment the refcount.
@return: True if this is the first transmission of the reference.
"""
self.refcount += 1
if self.refcount == 1:
return True
def getURL(self):
if self.tub:
return self.tub.getURLForReference(self.obj)
return None
def decref(self, count):
"""Call this in response to a DECREF message from the other end.
@return: True if the refcount went to zero, meaning this clid should
be retired.
"""
assert self.refcount >= count, "decref(%d) but refcount was %d" % (count, self.refcount)
self.refcount -= count
if self.refcount == 0:
return True
return False
# TODO: rather than subclassing Referenceable, ReferenceableSlicer should be
# registered to use for anything which provides any RemoteInterface
class ReferenceableSlicer(slicer.BaseSlicer):
"""I handle pb.Referenceable objects (things with remotely invokable
methods, which are copied by reference).
"""
opentype = ('my-reference',)
def sliceBody(self, streamable, broker):
puid = ipb.IReferenceable(self.obj).processUniqueID()
tracker = broker.getTrackerForMyReference(puid, self.obj)
yield tracker.clid
firstTime = tracker.send()
if firstTime:
# this is the first time the Referenceable has crossed this wire.
# In addition to the clid, send the interface name (if any), and
# any URL this reference might be known by
iname = ipb.IRemotelyCallable(self.obj).getInterfaceName()
if iname:
yield iname
else:
yield ""
url = tracker.getURL()
if url:
yield url
registerAdapter(ReferenceableSlicer, Referenceable, ipb.ISlicer)
class CallableSlicer(slicer.BaseSlicer):
"""Bound methods are serialized as my-reference sequences with negative
clid values."""
opentype = ('my-reference',)
def sliceBody(self, streamable, broker):
# TODO: consider this requirement, maybe based upon a Tub flag
# assert ipb.ISlicer(self.obj.im_self)
# or maybe even isinstance(self.obj.im_self, Referenceable)
puid = id(self.obj)
tracker = broker.getTrackerForMyCall(puid, self.obj)
yield tracker.clid
firstTime = tracker.send()
if firstTime:
# this is the first time the Call has crossed this wire. In
# addition to the clid, send the schema name and any URL this
# reference might be known by
schema = self.getSchema()
if schema:
yield schema
else:
yield ""
url = tracker.getURL()
if url:
yield url
def getSchema(self):
return None # TODO: not quite ready yet
# callables which are actually bound methods of a pb.Referenceable
# can use the schema from that
s = ipb.IReferenceable(self.obj.im_self, None)
if s:
return s.getSchemaForMethodNamed(self.obj.im_func.__name__)
# both bound methods and raw callables can also use a .schema
# attribute
return getattr(self.obj, "schema", None)
# The CallableSlicer is activated through PBRootSlicer.slicerTable, because a
# StorageBanana might want to stick with the old MethodSlicer/FunctionSlicer
# for these types
#registerAdapter(CallableSlicer, types.MethodType, ipb.ISlicer)
class ReferenceUnslicer(slicer.BaseUnslicer):
"""I turn an incoming 'my-reference' sequence into a RemoteReference or a
RemoteMethodReference."""
state = 0
clid = None
interfaceName = None
url = None
inameConstraint = schema.StringConstraint(200) # TODO: only known RI names?
urlConstraint = schema.StringConstraint(200)
def checkToken(self, typebyte, size):
if self.state == 0:
if typebyte not in (tokens.INT, tokens.NEG):
raise BananaError("reference ID must be an INT or NEG")
elif self.state == 1:
self.inameConstraint.checkToken(typebyte, size)
elif self.state == 2:
self.urlConstraint.checkToken(typebyte, size)
else:
raise Violation("too many parameters in my-reference")
def receiveChild(self, obj, ready_deferred=None):
assert not isinstance(obj, defer.Deferred)
assert ready_deferred is None
if self.state == 0:
self.clid = obj
self.state = 1
elif self.state == 1:
# must be the interface name
self.interfaceName = obj
if obj == "":
self.interfaceName = None
self.state = 2
elif self.state == 2:
# URL
self.url = obj
self.state = 3
else:
raise BananaError("Too many my-reference parameters")
def receiveClose(self):
if self.clid is None:
raise BananaError("sequence ended too early")
tracker = self.broker.getTrackerForYourReference(self.clid,
self.interfaceName,
self.url)
return tracker.getRef(), None
def describe(self):
if self.clid is None:
return "<ref-?>"
return "<ref-%s>" % self.clid
class RemoteReferenceTracker:
"""I hold the data necessary to locate (or create) a RemoteReference.
@ivar url: the target Referenceable's global URL
@ivar broker: the Broker which holds this RemoteReference
@ivar clid: for that Broker, the your-reference CLID for the
RemoteReference
@ivar interfaceName: the name of a RemoteInterface object that the
RemoteReference claims to implement
@ivar interface: our version of a RemoteInterface object that corresponds
to interfaceName
@ivar received_count: the number of times the remote end has send us this
object. We must send back decref() calls to match.
@ivar ref: a weakref to the RemoteReference itself
"""
def __init__(self, parent, clid, url, interfaceName):
self.broker = parent
self.clid = clid
# TODO: the remote end sends us a global URL, when really it should
# probably send us a per-Tub name, which can can then concatenate to
# their TubID if/when we pass it on to others. By accepting a full
# URL, we give them the ability to sort-of spoof others. We could
# check that url.startswith(broker.remoteTub.baseURL), but the Right
# Way is to just not have them send the base part in the first place.
# I haven't yet made this change because I'm not yet positive it
# would work.. how exactly does the base url get sent, anyway? What
# about Tubs visible through multiple names?
self.url = url
self.interfaceName = interfaceName
self.interface = getRemoteInterfaceByName(interfaceName)
self.received_count = 0
self.ref = None
def __repr__(self):
s = "<RemoteReferenceTracker(clid=%d,url=%s)>" % (self.clid, self.url)
return s
def getRef(self):
"""Return the actual RemoteReference that we hold, creating it if
necessary."""
if self.ref is None:
ref = RemoteReference(self)
self.ref = weakref.ref(ref, self._refLost)
self.received_count += 1
return self.ref()
def _refLost(self, wref):
count, self.received_count = self.received_count, 0
self.broker.freeYourReference(self, count)
class RemoteReferenceOnly(object):
def __init__(self, tracker):
"""@param tracker: the RemoteReferenceTracker which points to us"""
self.tracker = tracker
def getSturdyRef(self):
return self.tracker.sturdy
def notifyOnDisconnect(self, callback):
self.tracker.broker.notifyOnDisconnect(callback)
def dontNotifyOnDisconnect(self, callback):
self.tracker.broker.dontNotifyOnDisconnect(callback)
def __repr__(self):
r = "<%s at 0x%x" % (self.__class__.__name__, abs(id(self)))
if self.tracker.url:
r += " [%s]" % self.tracker.url
r += ">"
return r
class RemoteReference(RemoteReferenceOnly):
def callRemote(self, _name, *args, **kwargs):
# Note: for consistency, *all* failures are reported asynchronously.
req = None
broker = self.tracker.broker
# remember that "none" is not a valid constraint, so we use it to
# mean "not set by the caller", which means we fall back to whatever
# the RemoteInterface says. Using None would mean an AnyConstraint,
# which is not the same thing.
methodConstraintOverride = kwargs.get("_methodConstraint", "none")
resultConstraint = kwargs.get("_resultConstraint", "none")
useSchema = kwargs.get("_useSchema", True)
if "_methodConstraint" in kwargs:
del kwargs["_methodConstraint"]
if "_resultConstraint" in kwargs:
del kwargs["_resultConstraint"]
if "_useSchema" in kwargs:
del kwargs["_useSchema"]
try:
# newRequestID() could fail with a DeadReferenceError
reqID = broker.newRequestID()
except:
return defer.fail()
try:
# in this clause, we validate the outbound arguments against our
# notion of what the other end will accept (the RemoteInterface)
req = call.PendingRequest(reqID, self)
# first, figure out which method they want to invoke
(methodName, methodSchema) = self._getMethodInfo(_name)
req.methodName = methodName # for debugging
if methodConstraintOverride != "none":
methodSchema = methodConstraintOverride
if useSchema and methodSchema:
# turn positional arguments into kwargs. mapArguments() could
# fail for bad argument names or missing required parameters
argsdict = methodSchema.mapArguments(args, kwargs)
# check args against the arg constraint. This could fail if
# any arguments are of the wrong type
methodSchema.checkAllArgs(kwargs)
# the Interface gets to constraint the return value too, so
# make a note of it to use later
req.setConstraint(methodSchema.getResponseConstraint())
else:
if args:
why = "positional arguments require a RemoteInterface"
why += " for %s.%s()" % (self, methodName)
raise tokens.BananaError(why)
argsdict = kwargs
# if the caller specified a _resultConstraint, that overrides
# the schema's one
if resultConstraint != "none":
# overrides schema
req.setConstraint(schema.makeConstraint(resultConstraint))
except: # TODO: merge this with the next try/except clause
# we have not yet sent anything to the far end. A failure here
# is entirely local: stale broker, bad method name, bad
# arguments. We abandon the PendingRequest, but errback the
# Deferred it was going to use
req.fail(failure.Failure())
return req.deferred
try:
# once we start sending the CallSlicer, we could get either a
# local or a remote failure, so we must be prepared to accept an
# answer. After this point, we assign all responsibility to the
# PendingRequest structure.
self.tracker.broker.addRequest(req)
# TODO: there is a decidability problem here: if the reqID made
# it through, the other end will send us an answer (possibly an
# error if the remaining slices were aborted). If not, we will
# not get an answer. To decide whether we should remove our
# broker.waitingForAnswers[] entry, we need to know how far the
# slicing process made it.
slicer = call.CallSlicer(reqID, self.tracker.clid,
methodName, argsdict)
# this could fail if any of the arguments (or their children)
# are unsliceable
d = broker.send(slicer)
# d will fire when the last argument has been serialized. It
# will errback if the arguments could not be serialized. We need
# to catch this case and errback the caller.
except:
req.fail(failure.Failure())
return req.deferred
# if we got here, we have been able to start serializing the
# arguments. If serialization fails, the PendingRequest needs to be
# flunked (because we aren't guaranteed that the far end will do it).
d.addErrback(req.fail)
# the remote end could send back an error response for many reasons:
# bad method name
# bad argument types (violated their schema)
# exception during method execution
# method result violated the results schema
# something else could occur to cause an errback:
# connection lost before response completely received
# exception during deserialization of the response
# [but only if it occurs after the reqID is received]
# method result violated our results schema
# if none of those occurred, the callback will be run
return req.deferred
def _getMethodInfo(self, name):
assert type(name) is str
methodName = name
methodSchema = None
iface = self.tracker.interface
if iface:
interfaceName = iface.__remote_name__
try:
methodSchema = iface[name]
except KeyError:
raise Violation("%s(%s) does not offer %s" % \
(interfaceName, self, name))
return methodName, methodSchema
class RemoteMethodReferenceTracker(RemoteReferenceTracker):
def getRef(self):
if self.ref is None:
ref = RemoteMethodReference(self)
self.ref = weakref.ref(ref, self._refLost)
self.received_count += 1
return self.ref()
class RemoteMethodReference(RemoteReference):
def callRemote(self, *args, **kwargs):
# TODO: I suspect it would safer to use something other than
# 'callRemote' here.
# TODO: this probably needs a very different implementation
# there is no schema support yet, so we can't convert positional args
# into keyword args
assert args == ()
return RemoteReference.callRemote(self, "", *args, **kwargs)
def _getMethodInfo(self, name):
methodName = ""
methodSchema = None
return methodName, methodSchema
class YourReferenceSlicer(slicer.BaseSlicer):
"""I handle pb.RemoteReference objects (being sent back home to the
original pb.Referenceable-holder)
"""
def slice(self, streamable, broker):
self.streamable = streamable
tracker = self.obj.tracker
if tracker.broker == broker:
# sending back to home broker
yield 'your-reference'
yield tracker.clid
else:
# sending somewhere else
giftID = broker.makeGift(self.obj)
yield 'their-reference'
yield giftID
yield tracker.url
def describe(self):
return "<your-ref-%s>" % self.obj.tracker.clid
registerAdapter(YourReferenceSlicer, RemoteReference, ipb.ISlicer)
class YourReferenceUnslicer(slicer.LeafUnslicer):
"""I accept incoming (integer) your-reference sequences and try to turn
them back into the original Referenceable. I also accept (string)
your-reference sequences and try to turn them into a published
Referenceable that they did not have access to before."""
clid = None
def checkToken(self, typebyte, size):
if typebyte != tokens.INT:
raise BananaError("your-reference ID must be an INT")
def receiveChild(self, obj, ready_deferred=None):
assert not isinstance(obj, defer.Deferred)
assert ready_deferred is None
self.clid = obj
def receiveClose(self):
if self.clid is None:
raise BananaError("sequence ended too early")
obj = self.broker.getMyReferenceByCLID(self.clid)
if not obj:
raise Violation("unknown clid '%s'" % self.clid)
return obj, None
def describe(self):
return "<your-ref-%s>" % self.obj.refID
class TheirReferenceUnslicer(slicer.LeafUnslicer):
"""I accept gifts of third-party references. This is turned into a live
reference upon receipt."""
# (their-reference, giftID, URL)
state = 0
giftID = None
url = None
urlConstraint = schema.StringConstraint(200)
def checkToken(self, typebyte, size):
if self.state == 0:
if typebyte != tokens.INT:
raise BananaError("their-reference giftID must be an INT")
elif self.state == 1:
self.urlConstraint.checkToken(typebyte, size)
else:
raise Violation("too many parameters in their-reference")
def receiveChild(self, obj, ready_deferred=None):
assert not isinstance(obj, defer.Deferred)
assert ready_deferred is None
if self.state == 0:
self.giftID = obj
self.state = 1
elif self.state == 1:
# URL
self.url = obj
self.state = 2
else:
raise BananaError("Too many their-reference parameters")
def receiveClose(self):
if self.giftID is None or self.url is None:
raise BananaError("sequence ended too early")
d = self.broker.tub.getReference(self.url)
d.addBoth(self.ackGift)
return d,d
def ackGift(self, rref):
d = self.broker.remote_broker.callRemote("decgift",
giftID=self.giftID, count=1)
# if we lose the connection, they'll decref the gift anyway
d.addErrback(lambda f: f.trap(ipb.DeadReferenceError))
d.addErrback(lambda f: f.trap(error.ConnectionLost))
d.addErrback(lambda f: f.trap(error.ConnectionDone))
return rref
def describe(self):
if self.giftID is None:
return "<gift-?>"
return "<gift-%s>" % self.giftID
class SturdyRef(Copyable, RemoteCopy):
"""I am a pointer to a Referenceable that lives in some (probably remote)
Tub. This pointer is long-lived, however you cannot send messages with it
directly. To use it, you must ask your Tub to turn it into a
RemoteReference with tub.getReference(sturdyref).
The SturdyRef is associated with a URL: you can create a SturdyRef out of
a URL that you obtain from some other source, and you can ask the
SturdyRef for its URL.
SturdyRefs are serialized by copying their URL, and create an identical
SturdyRef on the receiving side."""
encrypted = False
tubID = None
location = None
locationHints = []
name = None
def __init__(self, url=None):
if url:
# pb://key@{ip:port,host:port,[ipv6]:port}[/unix]/swissnumber
# i.e. pb://tubID@{locationHints..}/name
#
# it can live at any one of a variety of network-accessible
# locations, or at a single UNIX-domain socket.
#
# there is also an unencrypted form, which is indexed by the
# single locationHint, because it does not have a TubID
if url.startswith("pb://"):
self.encrypted = True
url = url[len("pb://"):]
slash = url.rfind("/")
self.name = url[slash+1:]
at = url.find("@")
if at != -1:
self.tubID = url[:at]
self.locationHints = url[at+1:slash].split(",")
elif url.startswith("pbu://"):
self.encrypted = False
url = url[len("pbu://"):]
slash = url.rfind("/")
self.name = url[slash+1:]
self.tubID = None
self.location = url[:slash]
else:
raise ValueError("unknown PB-URL prefix in '%s'" % url)
def getTubRef(self):
if self.encrypted:
return TubRef(self.tubID, self.locationHints)
return NoAuthTubRef(self.location)
def getURL(self):
if self.encrypted:
return ("pb://" + self.tubID + "@" +
",".join(self.locationHints) +
"/" + self.name)
return "pbu://" + self.location + "/" + self.name
def __str__(self):
return self.getURL()
def _distinguishers(self):
"""Two SturdyRefs are equivalent if they point to the same object.
SturdyRefs to encrypted Tubs only pay attention to the TubID and the
reference name. SturdyRefs to unencrypted Tubs must use the location
hint instead of the (missing) TubID. This method makes it easier to
compare a pair of SturdyRefs."""
if self.encrypted:
return (True, self.tubID, self.name)
return (False, self.location, self.name)
def __hash__(self):
return hash(self._distinguishers())
def __cmp__(self, them):
return (cmp(type(self), type(them)) or
cmp(self.__class__, them.__class__) or
cmp(self._distinguishers(), them._distinguishers()))
def asLiveRef(self):
"""Return an object that can be sent over the wire and unserialized
as a live RemoteReference on the far end. Use this when you have a
SturdyRef and want to give someone a reference to its target, but
when you haven't bothered to acquire your own live reference to it."""
return _AsLiveRef(self)
class _AsLiveRef:
implements(ipb.ISlicer)
def __init__(self, sturdy):
self.target = sturdy
def slice(self, streamable, banana):
yield 'their-reference'
yield giftID
yield self.target.getURL()
yield [] # interfacenames
class TubRef:
"""This is a little helper class which provides a comparable identifier
for Tubs. TubRefs can be used as keys in dictionaries that track
connections to remote Tubs."""
encrypted = True
def __init__(self, tubID, locationHints=None):
self.tubID = tubID
self.locationHints = locationHints
def getLocations(self):
return self.locationHints
def __str__(self):
return "pb://" + self.tubID
def _distinguishers(self):
"""This serves the same purpose as SturdyRef._distinguishers."""
return (self.tubID,)
def __hash__(self):
return hash(self._distinguishers())
def __cmp__(self, them):
return (cmp(type(self), type(them)) or
cmp(self.__class__, them.__class__) or
cmp(self._distinguishers(), them._distinguishers()))
class NoAuthTubRef(TubRef):
# this is only used on outbound connections
encrypted = False
def __init__(self, location):
self.location = location
def getLocations(self):
return [self.location]
def __str__(self):
return "pbu://" + self.location
def _distinguishers(self):
"""This serves the same purpose as SturdyRef._distinguishers."""
return (self.location,)
| gpl-2.0 |
sunlightlabs/transparencyjobs | settings.py | 2 | 2682 | # Django settings for ppp project.
import os
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'gatekeeper.middleware.GatekeeperMiddleware',
)
ROOT_URLCONF = 'transparencyjobs.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.markup',
'django.contrib.messages',
'django.contrib.admin',
'gatekeeper',
'transparencyjobs.jobs',
)
try:
from local_settings import *
except ImportError:
pass | bsd-3-clause |
cfavi/fsmed | FsmGraphicHandle.py | 1 | 1257 | from PyQt4 import QtCore, QtGui
class FsmGraphicHandle(QtGui.QGraphicsRectItem):
def __init__(self, parent=None, scene=None):
self.SIDE=4.
super(FsmGraphicHandle, self).__init__(0, 0,
self.SIDE, self.SIDE, parent)
self.setFlags(QtGui.QGraphicsItem.ItemIsMovable |
QtGui.QGraphicsItem.ItemIsSelectable )
def shape(self):
pp = QtGui.QPainterPath()
pp.addRect(-self.SIDE/2-1, -self.SIDE/2-1,
self.SIDE+2, self.SIDE+2)
return pp
def boundingRect(self):
return self.shape().boundingRect()
def paint(self, painter, option, widget=None):
#display shape for debug
#painter.fillPath(self.shape(), QtCore.Qt.cyan)
painter.setPen(QtCore.Qt.black)
if self.isSelected():
painter.setBrush(QtCore.Qt.yellow)
else:
painter.setBrush(QtCore.Qt.white)
painter.drawRect(-self.SIDE/2, -self.SIDE/2,
self.SIDE, self.SIDE)
def mouseMoveEvent(self, event):
self.parentItem().updatePosition()
super(FsmGraphicHandle, self).mouseMoveEvent(event)
| mit |
alrusdi/lettuce | tests/integration/lib/Django-1.2.5/django/contrib/admin/views/decorators.py | 45 | 3276 | import base64
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django import http, template
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
from django.shortcuts import render_to_response
from django.utils.translation import ugettext_lazy, ugettext as _
ERROR_MESSAGE = ugettext_lazy("Please enter a correct username and password. Note that both fields are case-sensitive.")
LOGIN_FORM_KEY = 'this_is_the_login_form'
def _display_login_form(request, error_message=''):
request.session.set_test_cookie()
return render_to_response('admin/login.html', {
'title': _('Log in'),
'app_path': request.get_full_path(),
'error_message': error_message
}, context_instance=template.RequestContext(request))
def staff_member_required(view_func):
"""
Decorator for views that checks that the user is logged in and is a staff
member, displaying the login page if necessary.
"""
def _checklogin(request, *args, **kwargs):
if request.user.is_active and request.user.is_staff:
# The user is valid. Continue to the admin page.
return view_func(request, *args, **kwargs)
assert hasattr(request, 'session'), "The Django admin requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'."
# If this isn't already the login page, display it.
if LOGIN_FORM_KEY not in request.POST:
if request.POST:
message = _("Please log in again, because your session has expired.")
else:
message = ""
return _display_login_form(request, message)
# Check that the user accepts cookies.
if not request.session.test_cookie_worked():
message = _("Looks like your browser isn't configured to accept cookies. Please enable cookies, reload this page, and try again.")
return _display_login_form(request, message)
else:
request.session.delete_test_cookie()
# Check the password.
username = request.POST.get('username', None)
password = request.POST.get('password', None)
user = authenticate(username=username, password=password)
if user is None:
message = ERROR_MESSAGE
if '@' in username:
# Mistakenly entered e-mail address instead of username? Look it up.
users = list(User.objects.filter(email=username))
if len(users) == 1 and users[0].check_password(password):
message = _("Your e-mail address is not your username. Try '%s' instead.") % users[0].username
return _display_login_form(request, message)
# The user data is correct; log in the user in and continue.
else:
if user.is_active and user.is_staff:
login(request, user)
return http.HttpResponseRedirect(request.get_full_path())
else:
return _display_login_form(request, ERROR_MESSAGE)
return wraps(view_func)(_checklogin)
| gpl-3.0 |
napkindrawing/ansible | lib/ansible/plugins/filter/json_query.py | 37 | 1584 | # (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
try:
import jmespath
HAS_LIB = True
except ImportError:
HAS_LIB = False
def json_query(data, expr):
'''Query data using jmespath query language ( http://jmespath.org ). Example:
- debug: msg="{{ instance | json_query(tagged_instances[*].block_device_mapping.*.volume_id') }}"
'''
if not HAS_LIB:
raise AnsibleError('You need to install "jmespath" prior to running '
'json_query filter')
return jmespath.search(expr, data)
class FilterModule(object):
''' Query filter '''
def filters(self):
return {
'json_query': json_query
}
| gpl-3.0 |
pescobar/easybuild-framework | easybuild/toolchains/gcccuda.py | 2 | 1443 | ##
# Copyright 2013-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for a GCC+CUDA compiler toolchain.
:author: Kenneth Hoste (Ghent University)
"""
from easybuild.toolchains.compiler.cuda import Cuda
from easybuild.toolchains.gcc import GccToolchain
class GccCUDA(GccToolchain, Cuda):
"""Compiler toolchain with GCC and CUDA."""
NAME = 'gcccuda'
COMPILER_MODULE_NAME = ['GCC', 'CUDA']
SUBTOOLCHAIN = GccToolchain.NAME
| gpl-2.0 |
jostep/tensorflow | tensorflow/examples/tutorials/word2vec/word2vec_basic.py | 2 | 9909 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic word2vec example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
from tempfile import gettempdir
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
# pylint: disable=redefined-outer-name
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
local_filename = os.path.join(gettempdir(), filename)
if not os.path.exists(local_filename):
local_filename, _ = urllib.request.urlretrieve(url + filename,
local_filename)
statinfo = os.stat(local_filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception('Failed to verify ' + local_filename +
'. Can you get to it with a browser?')
return local_filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
vocabulary = read_data(filename)
print('Data size', len(vocabulary))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0: # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
if data_index + span > len(data):
data_index = 0
buffer.extend(data[data_index:data_index + span])
data_index += span
for i in range(batch_size // num_skips):
context_words = [w for w in range(span) if w != skip_window]
random.shuffle(context_words)
words_to_use = collections.deque(context_words)
for j in range(num_skips):
batch[i * num_skips + j] = buffer[skip_window]
context_word = words_to_use.pop()
labels[i * num_skips + j, 0] = buffer[context_word]
if data_index == len(data):
buffer[:] = data[:span]
data_index = span
else:
buffer.append(data[data_index])
data_index += 1
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
# pylint: disable=missing-docstring
# Function to draw visualization of distance between embeddings.
def plot_with_labels(low_dim_embs, labels, filename):
assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings'
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
# pylint: disable=g-import-not-at-top
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact')
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(), 'tsne.png'))
except ImportError as ex:
print('Please install sklearn, matplotlib, and scipy to show embeddings.')
print(ex)
| apache-2.0 |
carljm/django | tests/utils_tests/test_timesince.py | 37 | 5880 | from __future__ import unicode_literals
import datetime
import unittest
from django.test.utils import requires_tz_support
from django.utils import timezone
from django.utils.timesince import timesince, timeuntil
class TimesinceTests(unittest.TestCase):
def setUp(self):
self.t = datetime.datetime(2007, 8, 14, 13, 46, 0)
self.onemicrosecond = datetime.timedelta(microseconds=1)
self.onesecond = datetime.timedelta(seconds=1)
self.oneminute = datetime.timedelta(minutes=1)
self.onehour = datetime.timedelta(hours=1)
self.oneday = datetime.timedelta(days=1)
self.oneweek = datetime.timedelta(days=7)
self.onemonth = datetime.timedelta(days=30)
self.oneyear = datetime.timedelta(days=365)
def test_equal_datetimes(self):
""" equal datetimes. """
# NOTE: \xa0 avoids wrapping between value and unit
self.assertEqual(timesince(self.t, self.t), '0\xa0minutes')
def test_ignore_microseconds_and_seconds(self):
""" Microseconds and seconds are ignored. """
self.assertEqual(timesince(self.t, self.t + self.onemicrosecond), '0\xa0minutes')
self.assertEqual(timesince(self.t, self.t + self.onesecond), '0\xa0minutes')
def test_other_units(self):
""" Test other units. """
self.assertEqual(timesince(self.t, self.t + self.oneminute), '1\xa0minute')
self.assertEqual(timesince(self.t, self.t + self.onehour), '1\xa0hour')
self.assertEqual(timesince(self.t, self.t + self.oneday), '1\xa0day')
self.assertEqual(timesince(self.t, self.t + self.oneweek), '1\xa0week')
self.assertEqual(timesince(self.t, self.t + self.onemonth), '1\xa0month')
self.assertEqual(timesince(self.t, self.t + self.oneyear), '1\xa0year')
def test_multiple_units(self):
""" Test multiple units. """
self.assertEqual(timesince(self.t, self.t + 2 * self.oneday + 6 * self.onehour), '2\xa0days, 6\xa0hours')
self.assertEqual(timesince(self.t, self.t + 2 * self.oneweek + 2 * self.oneday), '2\xa0weeks, 2\xa0days')
def test_display_first_unit(self):
"""
If the two differing units aren't adjacent, only the first unit is
displayed.
"""
self.assertEqual(
timesince(self.t, self.t + 2 * self.oneweek + 3 * self.onehour + 4 * self.oneminute),
'2\xa0weeks'
)
self.assertEqual(timesince(self.t, self.t + 4 * self.oneday + 5 * self.oneminute), '4\xa0days')
def test_display_second_before_first(self):
"""
When the second date occurs before the first, we should always
get 0 minutes.
"""
self.assertEqual(timesince(self.t, self.t - self.onemicrosecond), '0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.onesecond), '0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.oneminute), '0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.onehour), '0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.oneday), '0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.oneweek), '0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.onemonth), '0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.oneyear), '0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - 2 * self.oneday - 6 * self.onehour), '0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - 2 * self.oneweek - 2 * self.oneday), '0\xa0minutes')
self.assertEqual(
timesince(self.t, self.t - 2 * self.oneweek - 3 * self.onehour - 4 * self.oneminute), '0\xa0minutes'
)
self.assertEqual(timesince(self.t, self.t - 4 * self.oneday - 5 * self.oneminute), '0\xa0minutes')
@requires_tz_support
def test_different_timezones(self):
""" When using two different timezones. """
now = datetime.datetime.now()
now_tz = timezone.make_aware(now, timezone.get_default_timezone())
now_tz_i = timezone.localtime(now_tz, timezone.get_fixed_timezone(195))
self.assertEqual(timesince(now), '0\xa0minutes')
self.assertEqual(timesince(now_tz), '0\xa0minutes')
self.assertEqual(timesince(now_tz_i), '0\xa0minutes')
self.assertEqual(timesince(now_tz, now_tz_i), '0\xa0minutes')
self.assertEqual(timeuntil(now), '0\xa0minutes')
self.assertEqual(timeuntil(now_tz), '0\xa0minutes')
self.assertEqual(timeuntil(now_tz_i), '0\xa0minutes')
self.assertEqual(timeuntil(now_tz, now_tz_i), '0\xa0minutes')
def test_date_objects(self):
""" Both timesince and timeuntil should work on date objects (#17937). """
today = datetime.date.today()
self.assertEqual(timesince(today + self.oneday), '0\xa0minutes')
self.assertEqual(timeuntil(today - self.oneday), '0\xa0minutes')
def test_both_date_objects(self):
""" Timesince should work with both date objects (#9672) """
today = datetime.date.today()
self.assertEqual(timeuntil(today + self.oneday, today), '1\xa0day')
self.assertEqual(timeuntil(today - self.oneday, today), '0\xa0minutes')
self.assertEqual(timeuntil(today + self.oneweek, today), '1\xa0week')
def test_naive_datetime_with_tzinfo_attribute(self):
class naive(datetime.tzinfo):
def utcoffset(self, dt):
return None
future = datetime.datetime(2080, 1, 1, tzinfo=naive())
self.assertEqual(timesince(future), '0\xa0minutes')
past = datetime.datetime(1980, 1, 1, tzinfo=naive())
self.assertEqual(timeuntil(past), '0\xa0minutes')
def test_thousand_years_ago(self):
t = datetime.datetime(1007, 8, 14, 13, 46, 0)
self.assertEqual(timesince(t, self.t), '1000\xa0years')
| bsd-3-clause |
40223150/w16b_test | ref/wsgi.py | 64 | 41503 | # coding=utf-8
# 上面的程式內容編碼必須在程式的第一或者第二行才會有作用
################# (1) 模組導入區
# 導入 cherrypy 模組, 為了在 OpenShift 平台上使用 cherrypy 模組, 必須透過 setup.py 安裝
import cherrypy
# 導入 Python 內建的 os 模組, 因為 os 模組為 Python 內建, 所以無需透過 setup.py 安裝
import os
# 導入 random 模組
import random
# 導入 gear 模組
import gear
################# (2) 廣域變數設定區
# 確定程式檔案所在目錄, 在 Windows 下有最後的反斜線
_curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
# 設定在雲端與近端的資料儲存目錄
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示程式在雲端執行
download_root_dir = os.environ['OPENSHIFT_DATA_DIR']
data_dir = os.environ['OPENSHIFT_DATA_DIR']
else:
# 表示程式在近端執行
download_root_dir = _curdir + "/local_data/"
data_dir = _curdir + "/local_data/"
'''以下為近端 input() 與 for 迴圈應用的程式碼, 若要將程式送到 OpenShift 執行, 除了採用 CherryPy 網際框架外, 還要轉為 html 列印
# 利用 input() 取得的資料型別為字串
toprint = input("要印甚麼內容?")
# 若要將 input() 取得的字串轉為整數使用, 必須利用 int() 轉換
repeat_no = int(input("重複列印幾次?"))
for i in range(repeat_no):
print(toprint)
'''
################# (3) 程式類別定義區
# 以下改用 CherryPy 網際框架程式架構
# 以下為 Hello 類別的設計內容, 其中的 object 使用, 表示 Hello 類別繼承 object 的所有特性, 包括方法與屬性設計
class Hello(object):
# Hello 類別的啟動設定
_cp_config = {
'tools.encode.encoding': 'utf-8',
'tools.sessions.on' : True,
'tools.sessions.storage_type' : 'file',
#'tools.sessions.locking' : 'explicit',
# session 以檔案儲存, 而且位於 data_dir 下的 tmp 目錄
'tools.sessions.storage_path' : data_dir+'/tmp',
# session 有效時間設為 60 分鐘
'tools.sessions.timeout' : 60
}
def __init__(self):
# 配合透過案例啟始建立所需的目錄
if not os.path.isdir(data_dir+'/tmp'):
os.mkdir(data_dir+'/tmp')
if not os.path.isdir(data_dir+"/downloads"):
os.mkdir(data_dir+"/downloads")
if not os.path.isdir(data_dir+"/images"):
os.mkdir(data_dir+"/images")
# 以 @ 開頭的 cherrypy.expose 為 decorator, 用來表示隨後的成員方法, 可以直接讓使用者以 URL 連結執行
@cherrypy.expose
# index 方法為 CherryPy 各類別成員方法中的內建(default)方法, 當使用者執行時未指定方法, 系統將會優先執行 index 方法
# 有 self 的方法為類別中的成員方法, Python 程式透過此一 self 在各成員方法間傳遞物件內容
def index_orig(self, toprint="Hello World!"):
return toprint
@cherrypy.expose
def index(self, guess=None):
# 將標準答案存入 answer session 對應區
theanswer = random.randint(1, 100)
thecount = 0
# 將答案與計算次數變數存進 session 對應變數
cherrypy.session['answer'] = theanswer
cherrypy.session['count'] = thecount
# 印出讓使用者輸入的超文件表單
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=doCheck>
請輸入您所猜的整數:<input type=text name=guess><br />
<input type=submit value=send>
</form>
<hr>
<!-- 以下在網頁內嵌 Brython 程式 -->
<script type="text/python">
from browser import document, alert
def echo(ev):
alert(document["zone"].value)
# 將文件中名稱為 mybutton 的物件, 透過 click 事件與 echo 函式 bind 在一起
document['mybutton'].bind('click',echo)
</script>
<input id="zone"><button id="mybutton">click !</button>
<hr>
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
import math
# 畫布指定在名稱為 plotarea 的 canvas 上
# 以下使用中文變數名稱
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 用紅色畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(0, 500)
ctx.strokeStyle = "red"
ctx.stroke()
# 用藍色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 0)
ctx.strokeStyle = "blue"
ctx.stroke()
# 用綠色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 500)
ctx.strokeStyle = "green"
ctx.stroke()
# 用黑色畫一個圓
ctx.beginPath()
ctx.lineWidth = 3
ctx.strokeStyle = "black"
ctx.arc(250,250,50,0,2*math.pi)
ctx.stroke()
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def twoDgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=do2Dgear>
齒數:<input type=text name=N><br />
模數:<input type=text name=M><br />
壓力角:<input type=text name=P><br />
<input type=submit value=send>
</form>
</body>
</html>
'''
return outstring
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def threeDgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<form method=POST action=do3Dgear>
齒數:<input type=text name=N><br />
模數:<input type=text name=M><br />
壓力角:<input type=text name=P><br />
<input type=submit value=send>
</form>
</body>
</html>
'''
return outstring
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def do2Dgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
import math
# 畫布指定在名稱為 plotarea 的 canvas 上
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 用紅色畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
'''
outstring += '''
ctx.moveTo('''+str(N)+","+str(M)+")"
outstring += '''
ctx.lineTo(0, 500)
ctx.strokeStyle = "red"
ctx.stroke()
# 用藍色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 0)
ctx.strokeStyle = "blue"
ctx.stroke()
# 用綠色再畫一條直線
ctx.beginPath()
ctx.lineWidth = 3
ctx.moveTo(0, 0)
ctx.lineTo(500, 500)
ctx.strokeStyle = "green"
ctx.stroke()
# 用黑色畫一個圓
ctx.beginPath()
ctx.lineWidth = 3
ctx.strokeStyle = "black"
ctx.arc(250,250,50,0,2*math.pi)
ctx.stroke()
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def do3Dgear(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script type="text/javascript" src="/static/weblink/pfcUtils.js"></script>
<script type="text/javascript" src="/static/weblink/wl_header.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document, window
# 從 javascript 導入 JSConstructor
from javascript import JSConstructor
import math
cango = JSConstructor(window.Cango2D)
if (!JSConstructor(window.pfcIsWindows())):
netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
session = JSConstructor(window.pfcGetProESession())
# 設定 config option
session.SetConfigOption("comp_placement_assumptions","no")
# 建立擺放零件的位置矩陣
identityMatrix = JSConstructor(window.pfcCreate ("pfcMatrix3D"))
for x in range(4):
for y in range(4):
if (x == y):
JSConstructor(window.identityMatrix.Set (x, y, 1.0))
else:
JSConstructor(window.identityMatrix.Set (x, y, 0.0))
transf = JSConstructor(window.pfcCreate ("pfcTransform3D").Create (identityMatrix))
# 取得目前的工作目錄
currentDir = session.getCurrentDirectory()
# 以目前已開檔, 作為 model
model = session.CurrentModel
# 查驗有無 model, 或 model 類別是否為組立件
if (model == None or model.Type != JSConstructor(window.pfcCreate("pfcModelType").MDL_ASSEMBLY)):
raise ValueError("Current model is not an assembly.")
assembly = model
'''----------------------------------------------- link0 -------------------------------------------------------------'''
# 檔案目錄,建議將圖檔放置工作目錄下較方便使用
descr = rJSConstructor(window.pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/fourbar/link0.prt"))
#若 link1.prt 在 session 則直接取用
componentModel = session.GetModelFromDescr (descr)
# 若 link1.prt 不在 session 則從工作目錄中載入 session
componentModel = session.RetrieveModel(descr)
# 若 link1.prt 已經在 session 則放入組立檔中
if (componentModel != None):
# 注意這個 asmcomp 即為設定約束條件的本體
# asmcomp 為特徵物件,直接將零件, 以 transf 座標轉換放入組立檔案中
asmcomp = assembly.AssembleComponent (componentModel, transf)
# 建立約束條件變數
constrs = JSConstructor(window.pfcCreate ("pfcComponentConstraints"))
# 設定組立檔中的三個定位面, 注意內定名稱與 Pro/E WF 中的 ASM_D_FRONT 不同, 而是 ASM_FRONT
asmDatums = ["ASM_FRONT", "ASM_TOP", "ASM_RIGHT"]
# 設定零件檔中的三個定位面, 名稱與 Pro/E WF 中相同
compDatums = ["FRONT", "TOP", "RIGHT"]
# 建立 ids 變數, intseq 為 sequence of integers 為資料類別, 使用者可以經由整數索引擷取此資料類別的元件, 第一個索引為 0
ids = JSConstructor(window.pfcCreate ("intseq"))
# 建立路徑變數
path = JSConstructor(window.pfcCreate ("MpfcAssembly").CreateComponentPath (assembly, ids))
# 採用互動式設定相關的變數
MpfcSelect = JSConstructor(window.pfcCreate ("MpfcSelect"))
# 利用迴圈分別約束組立與零件檔中的三個定位平面
for i in range(3):
# 設定組立參考面
asmItem = assembly.GetItemByName (JSConstructor(window.pfcCreate ("pfcModelItemType").ITEM_SURFACE, asmDatums [i]))
# 若無對應的組立參考面, 則啟用互動式平面選擇表單 flag
if (asmItem == None):
interactFlag = true
continue
# 設定零件參考面
compItem = componentModel.GetItemByName (JSConstructor(window.pfcCreate ("pfcModelItemType").ITEM_SURFACE, compDatums [i])
# 若無對應的零件參考面, 則啟用互動式平面選擇表單 flag
if (compItem == None):
interactFlag = true
continue;
asmSel = JSConstructor(window.MpfcSelect.CreateModelItemSelection (asmItem, path))
compSel = JSConstructor(window.MpfcSelect.CreateModelItemSelection (compItem, None))
constr = JSConstructor(window.pfcCreate ("pfcComponentConstraint").Create (JSConstructor(window.pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN))
constr.AssemblyReference = asmSel
constr.ComponentReference = compSel
constr.Attributes = JSConstructor(window.pfcCreate ("pfcConstraintAttributes")).Create (false, false)
# 將互動選擇相關資料, 附加在程式約束變數之後
constrs.Append (constr)
# 設定組立約束條件
asmcomp.SetConstraints (constrs, None)
'''-------------------------------------------------------------------------------------------------------------------'''
'''----------------------------------------------- link1 -------------------------------------------------------------'''
descr = JSConstructor(window.pfcCreate ("pfcModelDescriptor")).CreateFromFileName ("v:/home/fourbar/link1.prt")
componentModel = session.GetModelFromDescr (descr)
componentModel = session.RetrieveModel(descr)
if (componentModel != None):
asmcomp = JSConstructor(window.assembly.AssembleComponent (componentModel, transf)
components = assembly.ListFeaturesByType(true, JSConstructor(window.pfcCreate ("pfcFeatureType")).FEATTYPE_COMPONENT);
featID = components.Item(0).Id
ids.append(featID)
subPath = JSConstructor(window.pfcCreate ("MpfcAssembly")).CreateComponentPath( assembly, ids )
subassembly = subPath.Leaf
asmDatums = ["A_1", "TOP", "ASM_TOP"]
compDatums = ["A_1", "TOP", "TOP"]
relation = (JSConstructor(window.pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN), JSConstructor(window.pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
relationItem = JSConstructor(window.pfcCreate("pfcModelItemType").ITEM_AXIS,pfcCreate("pfcModelItemType").ITEM_SURFACE))
constrs = JSConstructor(window.pfcCreate ("pfcComponentConstraints"))
for i in range(2):
asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i])
if (asmItem == None):
interactFlag = True
continue
JSConstructor(window.compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == None):
interactFlag = true
continue
MpfcSelect = JSConstructor(window.pfcCreate ("MpfcSelect"))
asmSel = JSConstructor(window.MpfcSelect.CreateModelItemSelection (asmItem, subPath))
compSel = JSConstructor(window.MpfcSelect.CreateModelItemSelection (compItem, None))
constr = JSConstructor(window.pfcCreate("pfcComponentConstraint").Create (relation[i]))
constr.AssemblyReference = asmSel
constr.ComponentReference = compSel
constr.Attributes = JSConstructor(window.pfcCreate ("pfcConstraintAttributes").Create (true, false))
constrs.append (constr):
asmcomp.SetConstraints (constrs, None)
/**-------------------------------------------------------------------------------------------------------------------**/
/**----------------------------------------------- link2 -------------------------------------------------------------**/
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/fourbar/link2.prt");
var componentModel = session.GetModelFromDescr (descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate ("intseq");
ids.Append(featID+1);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_2", "TOP", "ASM_TOP");
var compDatums = new Array ("A_1", "TOP", "TOP");
var relation = new Array (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS,pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate ("pfcComponentConstraints");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, false);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
/**----------------------------------------------- link3 -------------------------------------------------------------**/
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/fourbar/link3.prt");
var componentModel = session.GetModelFromDescr (descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var relation = new Array (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS,pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate ("pfcComponentConstraints");
var ids = pfcCreate ("intseq");
ids.Append(featID+2);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_2");
var compDatums = new Array ("A_1");
for (var i = 0; i < 1; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, false);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
var ids = pfcCreate ("intseq");
ids.Append(featID);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_2", "TOP");
var compDatums = new Array ("A_2", "BOTTON");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, true);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
var session = pfcGetProESession ();
var solid = session.CurrentModel;
properties = solid.GetMassProperty(void null);
var COG = properties.GravityCenter;
document.write("MassProperty:<br />");
document.write("Mass:"+(properties.Mass.toFixed(2))+" pound<br />");
document.write("Average Density:"+(properties.Density.toFixed(2))+" pound/inch^3<br />");
document.write("Surface area:"+(properties.SurfaceArea.toFixed(2))+" inch^2<br />");
document.write("Volume:"+(properties.Volume.toFixed(2))+" inch^3<br />");
document.write("COG_X:"+COG.Item(0).toFixed(2)+"<br />");
document.write("COG_Y:"+COG.Item(1).toFixed(2)+"<br />");
document.write("COG_Z:"+COG.Item(2).toFixed(2)+"<br />");
try
{
document.write("Current Directory:<br />"+currentDir);
}
catch (err)
{
alert ("Exception occurred: "+pfcGetExceptionType (err));
}
assembly.Regenerate (void null);
session.GetModelWindow (assembly).Repaint();
</script>
<canvas id="plotarea" width="800" height="600"></canvas>
</body>
</html>
'''
return outstring
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def mygeartest(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
def create_line(x1, y1, x2, y2, width=3, fill="red"):
ctx.beginPath()
ctx.lineWidth = width
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = fill
ctx.stroke()
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = pi/180.
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
def 齒輪(midx, midy, rp, n, 顏色):
# 將角度轉換因子設為全域變數
global deg
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
print("ra:", ra)
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*cos(20*deg)
print("rp:", rp)
print("rb:", rb)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時
print("rd:", rd)
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
dr=(ra-rb)/imax
# tan(20*deg)-20*deg 為漸開線函數
sigma=pi/(2*n)+tan(20*deg)-20*deg
for j in range(n):
ang=-2.*j*pi/n+sigma
ang2=2.*j*pi/n+sigma
lxd=midx+rd*sin(ang2-2.*pi/n)
lyd=midy-rd*cos(ang2-2.*pi/n)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(alpha-ang)
ypt=r*cos(alpha-ang)
xd=rd*sin(-ang)
yd=rd*cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=顏色)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(ang2-alpha)
ypt=r*cos(ang2-alpha)
xd=rd*sin(ang2)
yd=rd*cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
create_line(lfx,lfy,rfx,rfy,fill=顏色)
齒輪(400,400,300,41,"blue")
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
</body>
</html>
'''
return outstring
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def mygeartest2(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 請注意, 這裡導入位於 Lib/site-packages 目錄下的 spur.py 檔案
import spur
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
# 以下利用 spur.py 程式進行繪圖, 接下來的協同設計運算必須要配合使用者的需求進行設計運算與繪圖
# 其中並將工作分配給其他組員建立類似 spur.py 的相關零件繪圖模組
# midx, midy 為齒輪圓心座標, rp 為節圓半徑, n 為齒數, pa 為壓力角, color 為線的顏色
# Gear(midx, midy, rp, n=20, pa=20, color="black"):
# 模數決定齒的尺寸大小, 囓合齒輪組必須有相同的模數與壓力角
# 壓力角 pa 單位為角度
pa = 20
# m 為模數
m = 20
# 第1齒輪齒數
n_g1 = 17
# 第2齒輪齒數
n_g2 = 99
# 第3齒輪齒數
n_g3 = 17
# 計算兩齒輪的節圓半徑
rp_g1 = m*n_g1/2
rp_g2 = m*n_g2/2
rp_g3 = m*n_g3/2
# 繪圖第1齒輪的圓心座標
x_g1 = 280
y_g1 = 400
# 第2齒輪的圓心座標, 假設排列成水平, 表示各齒輪圓心 y 座標相同
x_g2 = x_g1 + rp_g1 + rp_g2
y_g2 = y_g1
# 第3齒輪的圓心座標
x_g3 = x_g1 + rp_g1 + 2*rp_g2 + rp_g3
y_g3 = y_g1
# 將第1齒輪順時鐘轉 90 度
# 使用 ctx.save() 與 ctx.restore() 以確保各齒輪以相對座標進行旋轉繪圖
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g1, y_g1)
# rotate to engage
ctx.rotate(pi/2)
# put it back
ctx.translate(-x_g1, -y_g1)
spur.Spur(ctx).Gear(x_g1, y_g1, rp_g1, n_g1, pa, "blue")
ctx.restore()
# 將第2齒輪逆時鐘轉 90 度之後, 再多轉一齒, 以便與第1齒輪進行囓合
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g2, y_g2)
# rotate to engage
ctx.rotate(-pi/2-pi/n_g2)
# put it back
ctx.translate(-x_g2, -y_g2)
spur.Spur(ctx).Gear(x_g2, y_g2, rp_g2, n_g2, pa, "black")
ctx.restore()
# 將第3齒輪逆時鐘轉 90 度之後, 再往回轉第2齒輪定位帶動轉角, 然後再逆時鐘多轉一齒, 以便與第2齒輪進行囓合
ctx.save()
# translate to the origin of second gear
ctx.translate(x_g3, y_g3)
# rotate to engage
# pi+pi/n_g2 為第2齒輪從順時鐘轉 90 度之後, 必須配合目前的標記線所作的齒輪 2 轉動角度, 要轉換到齒輪3 的轉動角度
# 必須乘上兩齒輪齒數的比例, 若齒輪2 大, 則齒輪3 會轉動較快
# 第1個 -pi/2 為將原先垂直的第3齒輪定位線逆時鐘旋轉 90 度
# -pi/n_g3 則是第3齒與第2齒定位線重合後, 必須再逆時鐘多轉一齒的轉角, 以便進行囓合
# (pi+pi/n_g2)*n_g2/n_g3 則是第2齒原定位線為順時鐘轉動 90 度,
# 但是第2齒輪為了與第1齒輪囓合, 已經距離定位線, 多轉了 180 度, 再加上第2齒輪的一齒角度, 因為要帶動第3齒輪定位,
# 這個修正角度必須要再配合第2齒與第3齒的轉速比加以轉換成第3齒輪的轉角, 因此乘上 n_g2/n_g3
ctx.rotate(-pi/2-pi/n_g3+(pi+pi/n_g2)*n_g2/n_g3)
# put it back
ctx.translate(-x_g3, -y_g3)
spur.Spur(ctx).Gear(x_g3, y_g3, rp_g3, n_g3, pa, "red")
ctx.restore()
# 按照上面三個正齒輪的囓合轉角運算, 隨後的傳動齒輪轉角便可依此類推, 完成6個齒輪的囓合繪圖
</script>
<canvas id="plotarea" width="1200" height="1200"></canvas>
</body>
</html>
'''
return outstring
@cherrypy.expose
# N 為齒數, M 為模數, P 為壓力角
def my3Dgeartest(self, N=20, M=5, P=15):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<!-- 載入 brython.js -->
<script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script>
<script src="/static/Cango2D.js" type="text/javascript"></script>
<script src="/static/gearUtils-04.js" type="text/javascript"></script>
</head>
<!-- 啟動 brython() -->
<body onload="brython()">
<!-- 以下為 canvas 畫圖程式 -->
<script type="text/python">
# 從 browser 導入 document
from browser import document
from math import *
# 準備在 id="plotarea" 的 canvas 中繪圖
canvas = document["plotarea"]
ctx = canvas.getContext("2d")
def create_line(x1, y1, x2, y2, width=3, fill="red"):
ctx.beginPath()
ctx.lineWidth = width
ctx.moveTo(x1, y1)
ctx.lineTo(x2, y2)
ctx.strokeStyle = fill
ctx.stroke()
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = pi/180.
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
def gear(midx, midy, rp, n, 顏色):
# 將角度轉換因子設為全域變數
global deg
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
print("ra:", ra)
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*cos(20*deg)
print("rp:", rp)
print("rb:", rb)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時
print("rd:", rd)
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
dr=(ra-rb)/imax
# tan(20*deg)-20*deg 為漸開線函數
sigma=pi/(2*n)+tan(20*deg)-20*deg
for j in range(n):
ang=-2.*j*pi/n+sigma
ang2=2.*j*pi/n+sigma
lxd=midx+rd*sin(ang2-2.*pi/n)
lyd=midy-rd*cos(ang2-2.*pi/n)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(alpha-ang)
ypt=r*cos(alpha-ang)
xd=rd*sin(-ang)
yd=rd*cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=顏色)
#for(i=0;i<=imax;i++):
for i in range(imax+1):
r=rb+i*dr
theta=sqrt((r*r)/(rb*rb)-1.)
alpha=theta-atan(theta)
xpt=r*sin(ang2-alpha)
ypt=r*cos(ang2-alpha)
xd=rd*sin(ang2)
yd=rd*cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
create_line(lfx,lfy,rfx,rfy,fill=顏色)
gear(400,400,300,41,"blue")
</script>
<canvas id="plotarea" width="800" height="800"></canvas>
</body>
</html>
'''
return outstring
@cherrypy.expose
def doCheck(self, guess=None):
# 假如使用者直接執行 doCheck, 則設法轉回根方法
if guess is None:
raise cherrypy.HTTPRedirect("/")
# 從 session 取出 answer 對應資料, 且處理直接執行 doCheck 時無法取 session 值情況
try:
theanswer = int(cherrypy.session.get('answer'))
except:
raise cherrypy.HTTPRedirect("/")
# 經由表單所取得的 guess 資料型別為 string
try:
theguess = int(guess)
except:
return "error " + self.guessform()
# 每執行 doCheck 一次,次數增量一次
cherrypy.session['count'] += 1
# 答案與所猜數字進行比對
if theanswer < theguess:
return "big " + self.guessform()
elif theanswer > theguess:
return "small " + self.guessform()
else:
# 已經猜對, 從 session 取出累計猜測次數
thecount = cherrypy.session.get('count')
return "exact: <a href=''>再猜</a>"
def guessform(self):
# 印出讓使用者輸入的超文件表單
outstring = str(cherrypy.session.get('answer')) + "/" + str(cherrypy.session.get('count')) + '''<form method=POST action=doCheck>
請輸入您所猜的整數:<input type=text name=guess><br />
<input type=submit value=send>
</form>'''
return outstring
################# (4) 程式啟動區
# 配合程式檔案所在目錄設定靜態目錄或靜態檔案
application_conf = {'/static':{
'tools.staticdir.on': True,
# 程式執行目錄下, 必須自行建立 static 目錄
'tools.staticdir.dir': _curdir+"/static"},
'/downloads':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/downloads"},
'/images':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/images"}
}
root = Midterm()
root.gear = gear.Gear()
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示在 OpenSfhit 執行
application = cherrypy.Application(root, config=application_conf)
else:
# 表示在近端執行
cherrypy.quickstart(root, config=application_conf)
| agpl-3.0 |
kernelci/kernelci-backend | app/utils/report/test.py | 1 | 11006 | # Copyright (C) Collabora Limited 2018,2019
# Author: Guillaume Tucker <guillaume.tucker@collabora.com>
# Author: Michal Galka <michal.galka@collabora.com>
# Author: Ana Guerrero Lopez <ana.guerrero@collabora.com>
#
# Copyright (C) Baylibre 2019
# Author: Khouloud Touil <ktouil@baylibre.com>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Create the tests email report."""
import models
import os
import pymongo
import urllib
import utils
import utils.db
import utils.report.common as rcommon
import yaml
TEST_REPORT_FIELDS = [
models.ARCHITECTURE_KEY,
models.BOARD_INSTANCE_KEY,
models.BOOT_LOG_HTML_KEY,
models.BOOT_LOG_KEY,
models.BUILD_ID_KEY,
models.BUILD_ENVIRONMENT_KEY,
models.COMPILER_VERSION_FULL_KEY,
models.CREATED_KEY,
models.DEFCONFIG_FULL_KEY,
models.DEFCONFIG_KEY,
models.DEVICE_TYPE_KEY,
models.FILE_SERVER_RESOURCE_KEY,
models.GIT_BRANCH_KEY,
models.GIT_COMMIT_KEY,
models.GIT_DESCRIBE_KEY,
models.GIT_URL_KEY,
models.ID_KEY,
models.INITRD_KEY,
models.INITRD_INFO_KEY,
models.JOB_ID_KEY,
models.JOB_KEY,
models.KERNEL_KEY,
models.LAB_NAME_KEY,
models.NAME_KEY,
models.STATUS_KEY,
models.TEST_CASES_KEY,
models.SUB_GROUPS_KEY,
models.TIME_KEY,
models.VERSION_KEY,
]
TEMPLATES_YAML = os.path.join(rcommon.TEMPLATES_DIR, "templates.yaml")
with open(TEMPLATES_YAML) as templates_file:
TEMPLATES = yaml.safe_load(templates_file)["templates"]
def _regression_message(data):
last_pass = data[0]
first_fail = data[1]
last_fail = data[-1]
if len(data) == 2:
return "new failure (last pass: {})".format(
last_pass[models.KERNEL_KEY])
delta = last_fail[models.CREATED_KEY] - first_fail[models.CREATED_KEY]
plural = 's' if delta.days > 1 else ''
return "failing since {} day{} (last pass: {}, first fail: {})".format(
delta.days, plural, last_pass[models.KERNEL_KEY],
first_fail[models.KERNEL_KEY])
def _add_test_group_data(group, db, spec, hierarchy=[], regressions=None):
hierarchy = hierarchy + [group[models.NAME_KEY]]
case_collection = db[models.TEST_CASE_COLLECTION]
regr_collection = db[models.TEST_REGRESSION_COLLECTION]
group_collection = db[models.TEST_GROUP_COLLECTION]
regr_spec = dict(spec)
regr_count = 0
if regressions is None:
regressions = group.setdefault("regressions", list())
test_cases = []
for test_case_id in group[models.TEST_CASES_KEY]:
test_case = utils.db.find_one2(case_collection, test_case_id)
measurements = test_case[models.MEASUREMENTS_KEY]
for measurement in measurements:
value = measurement['value']
if (value % 1.0) == 0:
measurement['value'] = int(value)
if test_case[models.STATUS_KEY] == "FAIL":
regr_spec[models.HIERARCHY_KEY] = (
hierarchy + [test_case[models.NAME_KEY]])
regr = utils.db.find_one2(regr_collection, regr_spec)
test_case["failure_message"] = (
_regression_message(regr[models.REGRESSIONS_KEY])
if regr else "never passed")
if regr:
regr_count += 1
test_case["regression"] = regr
test_case["log_lines_short"] = test_case[
models.LOG_LINES_KEY][:10]
log_lines_count = (len(test_case[models.LOG_LINES_KEY]) - len(
test_case["log_lines_short"]))
test_case["log_lines_removed"] = log_lines_count
regressions.append(test_case)
test_cases.append(test_case)
test_cases.sort(key=lambda tc: tc[models.INDEX_KEY])
sub_groups = []
for sub_group_id in group[models.SUB_GROUPS_KEY]:
sub_group = utils.db.find_one2(group_collection, sub_group_id)
_add_test_group_data(sub_group, db, spec, hierarchy, regressions)
sub_groups.append(sub_group)
results = {
st: len(list(t for t in test_cases if t[models.STATUS_KEY] == st))
for st in ["PASS", "FAIL", "SKIP"]
}
total_results = dict(results)
for sub_group_sums in (sg["total_results"] for sg in sub_groups):
for status, count in sub_group_sums.iteritems():
total_results[status] += count
total_regr = regr_count + sum(sg["regr_count"] for sg in sub_groups)
group.update({
"test_cases": test_cases,
"results": results,
"regr_count": regr_count,
"sub_groups": sub_groups,
"total_regr": total_regr,
"total_tests": sum(total_results.values()),
"total_results": total_results,
})
def _create_summaries(groups):
def squash(item, max_length):
if len(item) > max_length:
n = (max_length - 3) / 2
m = max_length - 3 - n
item = item[:m] + '...' + item[-n:]
return item
columns = [
"platform", "arch", "lab", "compiler", "defconfig", "regressions"
]
rows = [
(
g['device_type'],
g['arch'],
g['lab_name'],
g['build_environment'],
g['defconfig_full'],
str(g['total_regr']),
) for g in groups
]
squashed = [tuple(squash(item, 28) for item in row)for row in rows]
widths = [len(col) for col in columns]
for row in squashed:
row_widths = [len(col) for col in row]
widths = [max(col) for col in zip(widths, row_widths)]
fmt = " | ".join("{{:{w}}}".format(w=w) for w in widths)
sep = '-+-'.join('-' * w for w in widths)
headers = '\n'.join([fmt.format(*tuple(columns)), sep])
summaries = [fmt.format(*row) for row in squashed]
return headers, summaries
def create_test_report(db_options, data, email_format, email_template=None,
base_path=utils.BASE_PATH):
"""Create the tests report email to be sent.
:param db_options: The mongodb database connection parameters.
:type db_options: dict
:param data: The meta-data for the test job.
:type data: dictionary
:param email_format: The email format to send.
:type email_format: list
:param email_template: A specific email template to use.
:type email_template: str
:param base_path: Path to the top-level storage directory.
:type base_path: string
:return A tuple with the email body, the email subject and the headers as
dictionary. If an error occured, None.
"""
database = utils.db.get_db_connection(db_options)
job, branch, kernel, plan = (data[k] for k in [
models.JOB_KEY,
models.GIT_BRANCH_KEY,
models.KERNEL_KEY,
models.PLAN_KEY,
])
template = TEMPLATES.get(email_template or plan, {})
spec = {x: y for x, y in data.iteritems() if x != models.PLAN_KEY}
group_spec = dict(spec)
group_spec.update({
models.NAME_KEY: plan,
models.PARENT_ID_KEY: None,
})
groups = list(utils.db.find(
database[models.TEST_GROUP_COLLECTION],
spec=group_spec,
fields=TEST_REPORT_FIELDS,
sort=[
(models.DEVICE_TYPE_KEY, pymongo.ASCENDING),
(models.BUILD_ENVIRONMENT_KEY, pymongo.ASCENDING),
(models.DEFCONFIG_KEY, pymongo.ASCENDING),
(models.LAB_NAME_KEY, pymongo.ASCENDING),
(models.ARCHITECTURE_KEY, pymongo.ASCENDING),
])
)
if not groups:
utils.LOG.warning("Failed to find test group documents")
return None
for group in groups:
group_spec = dict(spec)
group_spec.update({
k: group[k] for k in [
models.DEVICE_TYPE_KEY,
models.ARCHITECTURE_KEY,
models.BUILD_ENVIRONMENT_KEY,
models.DEFCONFIG_FULL_KEY,
]
})
_add_test_group_data(group, database, group_spec)
tests_total = sum(group["total_tests"] for group in groups)
regr_total = sum(group["total_regr"] for group in groups)
if regr_total == 0:
utils.LOG.info("No regressions, not sending any report.")
return None, None, None
plan_subject = template.get("subject", plan)
subject_str = "{}/{} {}: {} runs, {} regressions ({})".format(
job, branch, plan_subject, len(groups), regr_total, kernel)
git_url, git_commit = (groups[0][k] for k in [
models.GIT_URL_KEY, models.GIT_COMMIT_KEY])
# Add test suites info if it's the same for all the groups (typical case)
keys = set()
last_test_suites = None
for g in groups:
info = g[models.INITRD_INFO_KEY]
if not info:
continue
last_test_suites = info.get('tests_suites')
if not last_test_suites:
continue
keys.add(tuple(
(ts['name'], ts['git_commit']) for ts in last_test_suites)
)
test_suites = list(last_test_suites) if len(keys) == 1 else None
totals = {
status: sum(g['total_results'][status] for g in groups)
for status in ["PASS", "FAIL", "SKIP"]
}
groups = [gr for gr in groups if gr['total_regr']]
headers = {
rcommon.X_REPORT: rcommon.TEST_REPORT_TYPE,
rcommon.X_BRANCH: branch,
rcommon.X_TREE: job,
rcommon.X_KERNEL: kernel,
}
summary_headers, summaries = _create_summaries(groups)
for group, summary in zip(groups, summaries):
group['summary'] = summary
template_data = {
"subject_str": subject_str,
"summary_headers": summary_headers,
"tree": job,
"branch": branch,
"branch_uri": urllib.quote_plus(branch),
"git_url": git_url,
"kernel": kernel,
"git_commit": git_commit,
"plan": plan,
"boot_log": models.BOOT_LOG_KEY,
"boot_log_html": models.BOOT_LOG_HTML_KEY,
"storage_url": rcommon.DEFAULT_STORAGE_URL,
"base_url": rcommon.DEFAULT_BASE_URL,
"test_groups": groups,
"test_suites": test_suites,
"totals": totals,
}
template_file = template.get("file", "test.txt")
template_data.update(template.get("params", {}))
body = rcommon.create_txt_email(template_file, **template_data)
return body, subject_str, headers
| lgpl-2.1 |
anryko/ansible | lib/ansible/modules/network/check_point/cp_mgmt_dns_domain.py | 20 | 4802 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage Check Point Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: cp_mgmt_dns_domain
short_description: Manages dns-domain objects on Check Point over Web Services API
description:
- Manages dns-domain objects on Check Point devices including creating, updating and removing objects.
- All operations are performed over Web Services API.
version_added: "2.9"
author: "Or Soffer (@chkp-orso)"
options:
name:
description:
- Object name.
type: str
required: True
is_sub_domain:
description:
- Whether to match sub-domains in addition to the domain itself.
type: bool
tags:
description:
- Collection of tag identifiers.
type: list
color:
description:
- Color of the object. Should be one of existing colors.
type: str
choices: ['aquamarine', 'black', 'blue', 'crete blue', 'burlywood', 'cyan', 'dark green', 'khaki', 'orchid', 'dark orange', 'dark sea green',
'pink', 'turquoise', 'dark blue', 'firebrick', 'brown', 'forest green', 'gold', 'dark gold', 'gray', 'dark gray', 'light green', 'lemon chiffon',
'coral', 'sea green', 'sky blue', 'magenta', 'purple', 'slate blue', 'violet red', 'navy blue', 'olive', 'orange', 'red', 'sienna', 'yellow']
comments:
description:
- Comments string.
type: str
details_level:
description:
- The level of detail for some of the fields in the response can vary from showing only the UID value of the object to a fully detailed
representation of the object.
type: str
choices: ['uid', 'standard', 'full']
ignore_warnings:
description:
- Apply changes ignoring warnings.
type: bool
ignore_errors:
description:
- Apply changes ignoring errors. You won't be able to publish such a changes. If ignore-warnings flag was omitted - warnings will also be ignored.
type: bool
extends_documentation_fragment: checkpoint_objects
"""
EXAMPLES = """
- name: add-dns-domain
cp_mgmt_dns_domain:
is_sub_domain: false
name: .www.example.com
state: present
- name: set-dns-domain
cp_mgmt_dns_domain:
is_sub_domain: true
name: .www.example.com
state: present
- name: delete-dns-domain
cp_mgmt_dns_domain:
name: .example.com
state: absent
"""
RETURN = """
cp_mgmt_dns_domain:
description: The checkpoint object created or updated.
returned: always, except when deleting the object.
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.checkpoint.checkpoint import checkpoint_argument_spec_for_objects, api_call
def main():
argument_spec = dict(
name=dict(type='str', required=True),
is_sub_domain=dict(type='bool'),
tags=dict(type='list'),
color=dict(type='str', choices=['aquamarine', 'black', 'blue', 'crete blue', 'burlywood', 'cyan', 'dark green',
'khaki', 'orchid', 'dark orange', 'dark sea green', 'pink', 'turquoise', 'dark blue', 'firebrick', 'brown',
'forest green', 'gold', 'dark gold', 'gray', 'dark gray', 'light green', 'lemon chiffon', 'coral', 'sea green',
'sky blue', 'magenta', 'purple', 'slate blue', 'violet red', 'navy blue', 'olive', 'orange', 'red', 'sienna',
'yellow']),
comments=dict(type='str'),
details_level=dict(type='str', choices=['uid', 'standard', 'full']),
ignore_warnings=dict(type='bool'),
ignore_errors=dict(type='bool')
)
argument_spec.update(checkpoint_argument_spec_for_objects)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
api_call_object = 'dns-domain'
result = api_call(module, api_call_object)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
SmartArduino/Arduino-1 | arduino-core/src/processing/app/i18n/python/requests/packages/charade/utf8prober.py | 2919 | 2652 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == constants.eError:
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
| lgpl-2.1 |
jamespacileo/django-france | django/conf/urls/defaults.py | 320 | 1656 | from django.core.urlresolvers import RegexURLPattern, RegexURLResolver
from django.core.exceptions import ImproperlyConfigured
__all__ = ['handler404', 'handler500', 'include', 'patterns', 'url']
handler404 = 'django.views.defaults.page_not_found'
handler500 = 'django.views.defaults.server_error'
def include(arg, namespace=None, app_name=None):
if isinstance(arg, tuple):
# callable returning a namespace hint
if namespace:
raise ImproperlyConfigured('Cannot override the namespace for a dynamic module that provides a namespace')
urlconf_module, app_name, namespace = arg
else:
# No namespace hint - use manually provided namespace
urlconf_module = arg
return (urlconf_module, app_name, namespace)
def patterns(prefix, *args):
pattern_list = []
for t in args:
if isinstance(t, (list, tuple)):
t = url(prefix=prefix, *t)
elif isinstance(t, RegexURLPattern):
t.add_prefix(prefix)
pattern_list.append(t)
return pattern_list
def url(regex, view, kwargs=None, name=None, prefix=''):
if isinstance(view, (list,tuple)):
# For include(...) processing.
urlconf_module, app_name, namespace = view
return RegexURLResolver(regex, urlconf_module, kwargs, app_name=app_name, namespace=namespace)
else:
if isinstance(view, basestring):
if not view:
raise ImproperlyConfigured('Empty URL pattern view name not permitted (for pattern %r)' % regex)
if prefix:
view = prefix + '.' + view
return RegexURLPattern(regex, view, kwargs, name)
| bsd-3-clause |
apihackers/wapps | wapps/templatetags/images.py | 1 | 2247 | import jinja2
from django.conf import settings
from django.utils.safestring import mark_safe
from django.forms.utils import flatatt
from django.utils.translation import ugettext as _
from django_jinja import library
from wapps.models import IdentitySettings
PLACEHOLDIT_URL = 'https://placehold.it/{width}x{height}/{bg}/{fg}?text={text}'
DEFAULT_BACKGROUND = '#ccc'
DEFAULT_FOREGROUND = '#969696'
@library.filter
@jinja2.contextfilter
def placeholder(ctx, value, width, height, bg=None, fg=None, text=None, site=True, **kwargs):
'''
A placehold.it fallback filter or global function.
Try ``IdentitySettings.name`` then fallback on ``Site.name`` for text.
You can disable this behavior with `site=False` parameter,
then the dimensions will be used as text.
You can override the text with `text='My text'` parameter or configuration.
All parameters default can be set in your settings::
WAPPS_PLACEHOLDER = {
'fg': '#0aa',
'bg': '#aaa',
'text': 'Placeholder',
}
'''
if value:
return value
placeholder_settings = getattr(settings, 'WAPPS_PLACEHOLDER', {})
params = {
'width': width,
'height': height,
'fg': fg or placeholder_settings.get('fg', DEFAULT_FOREGROUND),
'bg': bg or placeholder_settings.get('bg', DEFAULT_BACKGROUND),
}
if not text:
request = ctx['request']
site = request.site
if site:
identity = IdentitySettings.for_site(site)
text = getattr(identity, 'name') or site.site_name
if not text:
text = '{width}x{height}'.format(**params)
params['text'] = text.replace(' ', '+')
for key in 'fg', 'bg':
params[key] = params[key].replace('#', '')
url = PLACEHOLDIT_URL.format(**params)
if value == '':
# Empty image() call, expect an <img/> tag
attrs = {
'src': url,
'title': text,
'alt': _('Placeholder'),
'width': params['width'],
'height': params['height'],
}
attrs.update(**kwargs)
return mark_safe('<img {0}/>'.format(flatatt(attrs)))
else:
# This is an undefined url
return url
| mit |
muffinresearch/olympia | sites/stage/settings_addons.py | 13 | 2339 | """private_addons will be populated from puppet and placed in this directory"""
from lib.settings_base import * # noqa
from settings_base import * # noqa
import private_addons
DOMAIN = 'addons.allizom.org'
SERVER_EMAIL = 'zstage@addons.mozilla.org'
SITE_URL = 'https://addons.allizom.org'
SERVICES_URL = SITE_URL
STATIC_URL = getattr(private_addons, 'STATIC_URL',
'https://addons-stage-cdn.allizom.org/static/')
MEDIA_URL = getattr(private_addons, 'MEDIA_URL',
'https://addons-stage-cdn.allizom.org/user-media/')
CSP_SCRIPT_SRC = CSP_SCRIPT_SRC + (STATIC_URL[:-1],)
CSP_FRAME_SRC = ("'self'", "https://sandbox.paypal.com",)
SESSION_COOKIE_DOMAIN = ".%s" % DOMAIN
CACHE_PREFIX = 'stage.%s' % CACHE_PREFIX
KEY_PREFIX = CACHE_PREFIX
CACHE_MIDDLEWARE_KEY_PREFIX = CACHE_PREFIX
STATSD_PREFIX = 'addons-stage'
GRAPHITE_PREFIX = STATSD_PREFIX
CEF_PRODUCT = STATSD_PREFIX
SYSLOG_TAG = "http_app_addons_stage"
SYSLOG_TAG2 = "http_app_addons_stage_timer"
SYSLOG_CSP = "http_app_addons_stage_csp"
# Signing
SIGNING_SERVER = private_addons.SIGNING_SERVER
PRELIMINARY_SIGNING_SERVER = private_addons.PRELIMINARY_SIGNING_SERVER
# sandbox
PAYPAL_PAY_URL = 'https://svcs.sandbox.paypal.com/AdaptivePayments/'
PAYPAL_FLOW_URL = 'https://sandbox.paypal.com/webapps/adaptivepayment/flow/pay'
PAYPAL_API_URL = 'https://api-3t.sandbox.paypal.com/nvp'
PAYPAL_EMAIL = private_addons.PAYPAL_EMAIL
PAYPAL_APP_ID = private_addons.PAYPAL_APP_ID
PAYPAL_PERMISSIONS_URL = 'https://svcs.sandbox.paypal.com/Permissions/'
PAYPAL_CGI_URL = 'https://www.sandbox.paypal.com/cgi-bin/webscr'
PAYPAL_EMBEDDED_AUTH = {
'USER': private_addons.PAYPAL_EMBEDDED_AUTH_USER,
'PASSWORD': private_addons.PAYPAL_EMBEDDED_AUTH_PASSWORD,
'SIGNATURE': private_addons.PAYPAL_EMBEDDED_AUTH_SIGNATURE,
}
PAYPAL_CGI_AUTH = {'USER': private_addons.PAYPAL_CGI_AUTH_USER,
'PASSWORD': private_addons.PAYPAL_CGI_AUTH_PASSWORD,
'SIGNATURE': private_addons.PAYPAL_CGI_AUTH_SIGNATURE}
PAYPAL_CHAINS = (
(30, private_addons.PAYPAL_CHAINS_EMAIL),
)
TMP_PATH = os.path.join(NETAPP_STORAGE, 'tmp')
PACKAGER_PATH = os.path.join(TMP_PATH, 'packager')
GOOGLE_ANALYTICS_DOMAIN = 'addons.mozilla.org'
NEWRELIC_INI = '/etc/newrelic.d/addons.allizom.org.ini'
SENTRY_DSN = private_addons.SENTRY_DSN
| bsd-3-clause |
rackerlabs/rore | src/rore/shell.py | 1 | 22416 | # Copyright (c) 2013 Jesse Keating <jesse.keating@rackspace.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import ConfigParser
import logging
import sys
import os
import redmine
import tempfile
from redmine import exceptions as rm_exc
from subprocess import call
# Setup the basic logging objects
LOG = logging.getLogger('rore')
def get_user(rmine, userdata):
"""Get the user ID from the provided data"""
# first see if we got an int
try:
userdata = int(userdata)
return userdata
except ValueError:
pass
users = rmine.user.filter(name=userdata)
if not users:
raise RuntimeError('Unknown user %s' % userdata)
if len(users) > 1:
raise RuntimeError('Multiple users for %s found' % userdata)
return users[0].id
def print_issue(rmine, issue, verbose=False, oneline=False):
"""Print out a redmine issue object."""
# handle unauth issues -- github #20
if issue.id == 0:
print('Unauthorized to view this issue')
return
# handle oneline printing
if oneline:
print('%s %s %s %s %s %s' % (issue.id, issue.project.name,
issue.tracker.name, issue.priority.name,
issue.status.name, issue.subject))
return
print('%s ( %s )' % (issue.id, issue.url))
print('subject: %s' % issue.subject)
print('type: %s' % issue.tracker.name)
try:
print('assigned to: %s' % issue.assigned_to.name)
except rm_exc.ResourceAttrError:
print('assigned to: UNASSIGNED')
print('project: %s' % issue.project.name)
print('status: %s' % issue.status.name)
print('priority: %s' % issue.priority.name)
print('completion: %s' % issue.done_ratio)
if verbose:
# Here is where we should enumerate all the possible fields
print('priority: %s' % issue.priority.name)
print('start date: %s' % issue.start_date)
try:
print('due date: %s' % issue.due_date)
except rm_exc.ResourceAttrError:
pass
try:
print('parent: %s' % issue.parent['id'])
except rm_exc.ResourceAttrError:
pass
print('updated_on: %s' % issue.updated_on)
if hasattr(issue, 'description'):
print('description:\n')
print(issue.description)
print('----')
for relation in issue.relations:
# Get the verb right for blocking relationship
reltype = relation.relation_type
if relation.issue_id != issue.id:
if relation.relation_type == 'blocks':
reltype = 'blocked by'
if relation.relation_type == 'blocked':
reltype = 'blocks'
relish = rmine.issue.get(relation.issue_id)
else:
relish = rmine.issue.get(relation.issue_to_id)
# Check for unauth -- wtf? See github #20
if relish.id == 0:
continue
# Get the verb right for blocking relationship
reltype = relation.relation_type
if relation.issue_id != issue.id:
if relation.relation_type == 'blocks':
reltype = 'blocked by'
if relation.relation_type == 'blocked':
reltype = 'blocks'
if relation.relation_type == 'duplicates':
reltype = 'duplicated by'
if relation.relation_type == 'precedes':
reltype = 'preceded by'
if relation.relation_type == 'follows':
reltype = 'followed by'
print('%s %s - %s #%s: %s (%s)') % (reltype,
relish.project.name,
relish.tracker.name,
relish.id,
relish.subject,
relation.id)
for journ in issue.journals:
print('\n####')
print('Updated by %s on %s:' % (journ.user.name,
journ.created_on))
if hasattr(journ, 'notes'):
print(journ.notes)
print('\n')
def print_project(rmine, proj, verbose=False):
"""Print out a redmine project object."""
print('%s ( %s )' % (proj.name, '%s/projects/%s' % (rmine.url,
proj.identifier)))
if verbose:
# Here is where we should enumerate all the possible fields
print('description: %s' % proj.description)
print('identifier: %s' % proj.identifier)
try:
print('parent: %s' % proj.parent['name'])
except rm_exc.ResourceAttrError:
pass
print('\n')
def editor_text(initial_description=""):
EDITOR = os.environ.get('EDITOR')
text = initial_description
if EDITOR:
with tempfile.NamedTemporaryFile(suffix=".tmp", delete=False,
dir='/tmp/') as tmp:
tmp.write(initial_description)
tmp.flush()
call([EDITOR, tmp.name])
with open(tmp.name, 'r') as fh:
text = fh.read()
os.remove(tmp.name)
return text
def create_relation(rmine, issue, relissue, reltype):
"""Creates a new issue relationship between two issues."""
rmine.issue_relation.create(issue_id=issue,
issue_to_id=relissue,
relation_type=reltype)
def get_priority(rmine, priority):
"""Gets the id for the priority passed in."""
priorities = rmine.enumeration.filter(resource='issue_priorities')
p = [p for p in priorities if p.name.lower() == priority.lower()]
if len(p) == 0:
raise RuntimeError("Priority '%s' is not a priority.")
return p[0]
def print_user(user):
print('\n')
print("###################")
print("Id: %s" % user.id)
print("Name: %s %s" % (user.firstname, user.lastname))
print("Email: %s" % user.mail)
print("###################")
print('\n')
def users(args, rmine):
"""Handle users"""
if not args.me:
print("Currently only argument available for users is --me")
print("Going to assume you want to see your information")
my_user = rmine.user.get('current')
print_user(my_user)
def issues(args, rmine):
"""Handle issues"""
# Just print issue details
if args.ID and not (args.update or args.close):
ishs = [rmine.issue.get(ID) for ID in args.ID]
for ish in ishs:
print_issue(rmine, ish, args.verbose, args.oneline)
return
# query
if args.query:
qdict = {}
if args.project:
qdict['project_id'] = args.project
if args.nosubs:
qdict['subproject_id'] = '!*'
if args.assigned_to:
qdict['assigned_to_id'] = get_user(rmine, args.assigned_to)
if args.mine:
my_id = rmine.user.get('current').id
qdict['assigned_to_id'] = my_id
if args.status:
qdict['status_id'] = args.status
if args.query_id:
if not args.project:
raise RuntimeError("query_id argument requires '--project "
"[projectid]' argument also")
qdict['query_id'] = args.query_id
# Get the issues
ishes = rmine.issue.filter(**qdict)
if args.priority:
priority = get_priority(rmine, args.priority)
ishes = [i for i in ishes
if i.priority.name.lower() == priority.name.lower()]
# This output is kinda lame, but functional for now
for issue in ishes:
print_issue(rmine, issue, args.verbose, args.oneline)
if not args.oneline:
print('##############')
return
# create
if args.create:
idict = {}
# We have to have these items to continue
if not args.project or not args.subject:
raise RuntimeError('project and subject must be defined')
idict['project_id'] = args.project
idict['subject'] = args.subject
# Get tracker by type
itype = [
tracker.id for tracker in rmine.tracker.all() if
tracker.name == args.type]
try:
idict['tracker_id'] = itype[0]
except IndexError:
raise RuntimeError('Unknown issue type %s' % args.type)
if args.assigned_to and args.assigned_to != 'UNASSIGNED':
idict['assigned_to_id'] = get_user(rmine, args.assigned_to)
# Would be rad to do a git commit like editor pop up here
if args.description:
idict['description'] = args.description
else:
idict['description'] = editor_text()
# figure out the status
if args.status:
stat = [status for status in rmine.issue_status.all() if
status.name == args.status]
try:
idict['status_id'] = stat[0].id
except IndexError:
raise RuntimeError('Unknown issue type %s' % args.type)
# set priority
if args.priority:
p = get_priority(rmine, args.priority)
idict['priority_id'] = p.id
# Create the issue
issue = rmine.issue.create(**idict)
# Create a relationship if one was asked for
if args.relate_to:
create_relation(rmine, issue.id, args.relate_to,
args.relation_type)
issue = issue.refresh()
# Print it out
print_issue(rmine, issue, args.verbose, args.oneline)
return
# update the ticket(s)
if args.update:
ishs = [rmine.issue.get(ID) for ID in args.ID]
udict = {}
# Discover status ID
if args.status:
stat = [status for status in rmine.issue_status.all() if
status.name == args.status]
try:
udict['status_id'] = stat[0].id
except IndexError:
raise RuntimeError('Unknown issue status %s' % args.status)
if args.type:
itype = [
tracker for tracker in rmine.tracker.all()
if tracker.name == args.type]
try:
udict['tracker_id'] = itype[0].id
except IndexError:
raise RuntimeError('Unknown issue type %s' % args.type)
if args.assigned_to:
udict['assigned_to_id'] = get_user(rmine, args.assigned_to)
if args.project:
udict['project_id'] = args.project
if args.subject:
udict['subject'] = args.subject
if args.description:
udict['description'] = args.description
if args.priority:
udict['priority_id'] = get_priority(rmine, args.priority).id
if args.notes:
udict['notes'] = args.notes
for ish in ishs:
if udict:
rmine.issue.update(ish.id, **udict)
else:
rmine.issue.get(ish.id)
if args.relate_to:
create_relation(rmine, ish.id, args.relate_to,
args.relation_type)
ish = ish.refresh()
print_issue(rmine, ish, args.verbose, args.oneline)
return
# close the ticket(s)
if args.close:
ishs = [rmine.issue.get(ID) for ID in args.ID]
closestatus = [status for status in rmine.issue_status.all() if
status.name == 'Closed']
for ish in ishs:
rmine.issue.update(ish.id, status_id=closestatus[0].id,
notes=args.notes)
ish = ish.refresh()
print_issue(rmine, ish, args.verbose, args.oneline)
return
# issue types
if args.list_types:
if args.project:
# Get trackers via the project entry point
proj = rmine.project.get(args.project, include='trackers')
print('Available issue types for %s :' % proj.url)
print('\n'.join(itype.name for itype in proj.trackers))
else:
print('Available issue types for %s :' % rmine.url)
print('\n'.join(itype.name for itype in rmine.tracker.all()))
return
# issue types
if args.list_statuses:
print('Available issue statuses for %s :' % rmine.url)
print('\n'.join(status.name for status in rmine.issue_status.all()))
return
# issue queries
if args.list_queries:
print('Available issue queries for %s :' % rmine.url)
print('\n'.join("%s %s" % (q.id, q.name) for q in
sorted(rmine.query.all(), key=lambda k: k['id'])))
return
# delete a relation
if args.delete_relation:
relation = rmine.issue_relation.get(args.delete_relation)
rmine.issue_relation.delete(args.delete_relation)
print('Deleted relation %s: %s %s %s' % (relation.id,
relation.issue_id,
relation.relation_type,
relation.issue_to_id))
def projects(args, rmine):
"""Handle projects"""
if args.list:
for proj in rmine.project.all():
print_project(rmine, proj, args.verbose)
return
def create_parser():
parser = argparse.ArgumentParser(prog='rore')
# config
parser.add_argument('--config', '-C', default=None,
help='Specify a config file to use '
'(defaults to ~/.rore)')
parser.add_argument('--site', '-S', default='default',
help='Specify which site to use '
'(defaults to default)')
# verbosity
parser.add_argument('-v', action='store_true',
help='Run with verbose debug output')
parser.add_argument('-q', action='store_true',
help='Run quietly only displaying errors')
# subparsers
subparsers = parser.add_subparsers(
title='Subcommands',
description='Valid Redmine interaction targets',
help='Each target has its own --help.')
# Issues
issues_parser = subparsers.add_parser('issues',
help='Interact with issues')
# verbs
issues_parser.add_argument('--query', action='store_true',
help='Query for tickets')
issues_parser.add_argument('--create', action='store_true',
help='Create a new ticket')
issues_parser.add_argument('--close', action='store_true',
help='Close a ticket')
issues_parser.add_argument('--update', action='store_true',
help='Update an existing ticket')
issues_parser.add_argument('--list-types', action='store_true',
help='List available issue types. Specify a '
'project ID to get specific types for that '
'project')
issues_parser.add_argument('--list-queries', action='store_true',
help='List available issue queries')
issues_parser.add_argument('--list-statuses', action='store_true',
help='List available statuses.')
issues_parser.add_argument('--delete_relation',
help='Delete a relationship',
type=int, metavar='RELATION_ID')
# details
issues_parser.add_argument('--project', help='Filter by or assign to '
'project')
issues_parser.add_argument('--type', help='Filter by or create issue '
'type. Defaults to Bug.')
# I don't like the asterisk here, change it to something else soon
issues_parser.add_argument('--nosubs', help='Filter out issues from sub '
'projects', action='store_true')
group = issues_parser.add_mutually_exclusive_group()
# Need a way to filter all assigned issues, just show unassigned
group.add_argument('--assigned_to', help='Filter by or assign to '
'user. Defaults to UNASSIGNED when creating.')
group.add_argument('--mine', action='store_true', help='Only your issues')
issues_parser.add_argument('--priority', help='Filter by or create '
'priority. Defaults to Normal')
issues_parser.add_argument('--status',
help='Only deal with issues with this status '
'or set an issue to this status.')
issues_parser.add_argument('--subject', help='Set subject when creating '
'a new issue')
issues_parser.add_argument('--description', help='Set description when '
'creating a new issue')
issues_parser.add_argument('--notes', help='Notes to use when resolving '
'or closing an issue')
issues_parser.add_argument('--query_id', help='Filter by query ID. '
' Requires --project [project] and '
'--query arguments.')
issues_parser.add_argument('--relate_to', help='Create a relationship',
type=int)
issues_parser.add_argument('--relation_type', help='Type of relationship '
'to create',
choices=['relates', 'duplicates',
'blocks', 'blocked',
'precedes', 'follows'])
# More options when showing issues
issues_parser.add_argument('--verbose', action='store_true',
help='Show more of the ticket details',
default=False)
issues_parser.add_argument('--oneline', action='store_true',
help='Show each ticket on one line',
default=False)
# Lastly just feed specific issue numbers in
issues_parser.add_argument('ID', help='Issue IDs to find', nargs='*')
# assign the function
issues_parser.set_defaults(command=issues)
# Users
user_parser = subparsers.add_parser('users',
help='Interact with users')
user_parser.add_argument('--me', action='store_true',
help='Get your user information')
# assign the function
user_parser.set_defaults(command=users)
# Projects
project_parser = subparsers.add_parser('projects',
help='Interact with projects')
# verbs
project_parser.add_argument('--list', action='store_true',
help='List out all the projects')
# details
project_parser.add_argument('--verbose', action='store_true',
help='Show more of the project details',
default=False)
# assign the function
project_parser.set_defaults(command=projects)
return parser
def setup_logging(args):
class StdoutFilter(logging.Filter):
def filter(self, record):
# If the record is 20 (INFO) or lower, let it through
return record.levelno <= logging.INFO
myfilter = StdoutFilter()
formatter = logging.Formatter('%(message)s')
stdouthandler = logging.StreamHandler(sys.stdout)
stdouthandler.addFilter(myfilter)
stdouthandler.setFormatter(formatter)
stderrhandler = logging.StreamHandler() # Defaults to stderr
stderrhandler.setLevel(logging.WARNING)
stderrhandler.setFormatter(formatter)
LOG.addHandler(stdouthandler)
LOG.addHandler(stderrhandler)
if args.v:
LOG.setLevel(logging.DEBUG)
elif args.q:
LOG.setLevel(logging.WARNING)
else:
LOG.setLevel(logging.INFO)
def load_config(args):
# load the credentials
if not args.config:
args.config = '~/.rore'
configfile = os.path.expanduser(args.config)
cparser = ConfigParser.SafeConfigParser()
try:
cparser.readfp(open(configfile, 'r'))
except IOError:
LOG.error("Couldn't find config file: %s" % configfile)
exit(1)
siteurl = cparser.get(args.site, 'url')
key = cparser.get(args.site, 'key')
try:
verify = cparser.getboolean(args.site, 'verify')
except ConfigParser.NoOptionError:
verify = False
if args.command == issues:
if not args.type:
try:
args.type = cparser.get(args.site, 'default issue tracker')
except ConfigParser.NoOptionError:
args.type = 'Bug'
if not args.project:
try:
args.type = cparser.get(args.site, 'default issue project')
except ConfigParser.NoOptionError:
pass
return siteurl, key, verify
def connect_to_redmine(siteurl, key, verify):
# Figure out a way to make this a config option in .rore
rmine = redmine.Redmine(siteurl, key=key, requests={'verify': verify})
return rmine
def cmd():
"""This is the entry point for the shell command"""
parser = create_parser()
args = parser.parse_args()
setup_logging(args)
config_keys = load_config(args)
rmine = connect_to_redmine(*config_keys)
# Run the required command -- pass args into it for reference
args.command(args, rmine)
| apache-2.0 |
quamilek/django | tests/template_tests/filter_tests/test_length_is.py | 360 | 3204 | from django.template.defaultfilters import length_is
from django.test import SimpleTestCase
from ..utils import setup
class LengthIsTests(SimpleTestCase):
@setup({'length_is01': '{% if some_list|length_is:"4" %}Four{% endif %}'})
def test_length_is01(self):
output = self.engine.render_to_string('length_is01', {'some_list': ['4', None, True, {}]})
self.assertEqual(output, 'Four')
@setup({'length_is02': '{% if some_list|length_is:"4" %}Four{% else %}Not Four{% endif %}'})
def test_length_is02(self):
output = self.engine.render_to_string('length_is02', {'some_list': ['4', None, True, {}, 17]})
self.assertEqual(output, 'Not Four')
@setup({'length_is03': '{% if mystring|length_is:"4" %}Four{% endif %}'})
def test_length_is03(self):
output = self.engine.render_to_string('length_is03', {'mystring': 'word'})
self.assertEqual(output, 'Four')
@setup({'length_is04': '{% if mystring|length_is:"4" %}Four{% else %}Not Four{% endif %}'})
def test_length_is04(self):
output = self.engine.render_to_string('length_is04', {'mystring': 'Python'})
self.assertEqual(output, 'Not Four')
@setup({'length_is05': '{% if mystring|length_is:"4" %}Four{% else %}Not Four{% endif %}'})
def test_length_is05(self):
output = self.engine.render_to_string('length_is05', {'mystring': ''})
self.assertEqual(output, 'Not Four')
@setup({'length_is06': '{% with var|length as my_length %}{{ my_length }}{% endwith %}'})
def test_length_is06(self):
output = self.engine.render_to_string('length_is06', {'var': 'django'})
self.assertEqual(output, '6')
# Boolean return value from length_is should not be coerced to a string
@setup({'length_is07': '{% if "X"|length_is:0 %}Length is 0{% else %}Length not 0{% endif %}'})
def test_length_is07(self):
output = self.engine.render_to_string('length_is07', {})
self.assertEqual(output, 'Length not 0')
@setup({'length_is08': '{% if "X"|length_is:1 %}Length is 1{% else %}Length not 1{% endif %}'})
def test_length_is08(self):
output = self.engine.render_to_string('length_is08', {})
self.assertEqual(output, 'Length is 1')
# Invalid uses that should fail silently.
@setup({'length_is09': '{{ var|length_is:"fish" }}'})
def test_length_is09(self):
output = self.engine.render_to_string('length_is09', {'var': 'django'})
self.assertEqual(output, '')
@setup({'length_is10': '{{ int|length_is:"1" }}'})
def test_length_is10(self):
output = self.engine.render_to_string('length_is10', {'int': 7})
self.assertEqual(output, '')
@setup({'length_is11': '{{ none|length_is:"1" }}'})
def test_length_is11(self):
output = self.engine.render_to_string('length_is11', {'none': None})
self.assertEqual(output, '')
class FunctionTests(SimpleTestCase):
def test_empty_list(self):
self.assertEqual(length_is([], 0), True)
self.assertEqual(length_is([], 1), False)
def test_string(self):
self.assertEqual(length_is('a', 1), True)
self.assertEqual(length_is('a', 10), False)
| bsd-3-clause |
SSJohns/osf.io | scripts/migrate_logs_fix.py | 57 | 1427 | import sys
import logging
from website.app import init_app
from website.models import NodeLog
from scripts import utils as script_utils
from modularodm import Q
logger = logging.getLogger(__name__)
def do_migration(records, dry=False):
for log in records:
logger.info(
'Migrating log - {}, '.format(log._id)
)
count = 0
if not dry:
log.should_hide = False
for node in log.node__logged:
if node != log.node:
node.logs.remove(log)
count += 1
try:
node.save()
except Exception as err: # Allow script to continue if error occurs
logger.exception(err)
else:
log.was_connected_to.append(node)
log.save()
logger.info(
'Removed {} nodes from log - {}'.format(
count, log._id)
)
logger.info('{}Migrated {} logs'.format('[dry]'if dry else '', len(records)))
def get_targets():
return NodeLog.find(Q('should_hide', 'eq', True))
def main():
init_app(routes=False) # Sets the storage backends on all models
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
do_migration(get_targets(), dry)
if __name__ == '__main__':
main()
| apache-2.0 |
DavidDzl/online_platform | test_app/api_urls.py | 1 | 1190 | """test_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from rest_framework import routers
from .views import POIViewSetInfra, POIViewSetOrg, POIViewSetSafe, POIViewSetComm, POIViewSetOth
router = routers.DefaultRouter()
router.register(r'infrastructure', POIViewSetInfra, base_name='infra')
router.register(r'organization', POIViewSetOrg, base_name='org')
router.register(r'community', POIViewSetComm, base_name='comm')
router.register(r'safety', POIViewSetSafe, base_name='safe')
router.register(r'other', POIViewSetOth, base_name='oth')
urlpatterns = router.urls | lgpl-3.0 |
GeoCat/QGIS | python/qsci_apis/generate_console_pap.py | 12 | 3340 | # -*- coding:utf-8 -*-
"""
/***************************************************************************
Module to generate prepared APIs for calltips and auto-completion.
-------------------
begin : 2013-08-29
copyright : (C) 2013 Larry Shaffer
email : larrys (at) dakotacarto (dot) com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
Portions of this file contain code from Eric4 APIsManager module.
"""
import sys
import os
from qgis.PyQt.Qsci import QsciLexerPython, QsciAPIs
from qgis.PyQt.QtWidgets import QApplication
from qgis.PyQt.QtCore import QObject
class PrepareAPIs(QObject):
def __init__(self, api_lexer, api_files, pap_file):
QObject.__init__(self)
self._api = None
self._api_files = api_files
self._api_lexer = api_lexer
self._pap_file = pap_file
def _clearLexer(self):
self.qlexer = None
def _stopPreparation(self):
if self._api is not None:
self._api.cancelPreparation()
self._api = None
sys.exit(1)
def _preparationFinished(self):
self._clearLexer()
try:
if os.path.exists(self._pap_file):
os.remove(self._pap_file)
prepd = self._api.savePrepared(unicode(self._pap_file))
self._api = None
sys.exit(0 if prepd else 1)
except Exception as err:
self._api = None
sys.exit(1)
def prepareAPI(self):
try:
self._api = QsciAPIs(self._api_lexer)
self._api.apiPreparationFinished.connect(self._preparationFinished)
for api_file in self._api_files:
self._api.load(unicode(api_file))
self._api.prepare()
except Exception as err:
self._api = None
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) != 4:
print 'Usage: python <script> <pap_file> <apis_src_dir> <api_bin_dir>'
sys.exit(1)
pap_file = sys.argv[1]
api_src_dir = sys.argv[2]
api_bin_dir = sys.argv[3]
api_files = [
os.path.join(api_bin_dir, 'PyQGIS.api'),
os.path.join(api_src_dir, 'Python-2.7.api'),
os.path.join(api_src_dir, 'PyQt4-4.7.4.api'),
os.path.join(api_src_dir, 'OSGeo_GEOS-3.4.2.api'),
os.path.join(api_src_dir, 'OSGeo_GDAL-OGR-1.10.0.api')
]
# print api_files.__repr__()
# print pap_file.__repr__()
app = QApplication(sys.argv, False) # just start a non-gui console app
api_lexer = QsciLexerPython()
prepap = PrepareAPIs(api_lexer, api_files, pap_file)
prepap.prepareAPI()
sys.exit(app.exec_())
| gpl-2.0 |
craneworks/python-pyroute2 | pyroute2/ndb/interface.py | 1 | 3117 | import weakref
from pyroute2.common import basestring
from pyroute2.netlink.rtnl.ifinfmsg import ifinfmsg
class Interface(dict):
table = 'interfaces'
def __init__(self, db, key):
self.db = db
self.event_map = {ifinfmsg: "load_ifinfmsg"}
self.kspec = ('target', ) + db.indices[self.table]
self.schema = ('target', ) + \
tuple(db.schema[self.table].keys())
self.names = tuple((ifinfmsg.nla2name(x) for x in self.schema))
self.key = self.complete_key(key)
self.changed = set()
self.load_sql()
def __setitem__(self, key, value):
self.changed.add(key)
dict.__setitem__(self, key, value)
def snapshot(self):
snp = type(self)(self.db, self.key)
self.db.save_deps(self.table, id(snp), weakref.ref(snp))
return snp
def complete_key(self, key):
if isinstance(key, dict):
ret_key = key
else:
ret_key = {'target': 'localhost'}
if isinstance(key, basestring):
ret_key['IFLA_IFNAME'] = key
elif isinstance(key, int):
ret_key['index'] = key
fetch = []
for name in self.kspec:
if name not in ret_key:
fetch.append('f_%s' % name)
if fetch:
keys = []
values = []
for name, value in ret_key.items():
keys.append('f_%s = ?' % name)
values.append(value)
spec = (self
.db
.execute('SELECT %s FROM interfaces WHERE %s' %
(' , '.join(fetch), ' AND '.join(keys)),
values)
.fetchone())
for name, value in zip(fetch, spec):
ret_key[name[2:]] = value
return ret_key
def update(self, data):
for key, value in data.items():
self.load_value(key, value)
def load_value(self, key, value):
if key not in self.changed:
dict.__setitem__(self, key, value)
def load_ifinfmsg(self, target, event):
# TODO: partial match (object rename / restore)
# ...
# full match
for name, value in self.key.items():
if name == 'target':
if value != target:
return
elif value != (event.get_attr(name) or event.get(name)):
return
#
# load the event
for name in self.schema:
value = event.get_attr(name) or event.get(name)
if value is not None:
self.load_value(ifinfmsg.nla2name(name), value)
def load_sql(self):
keys = []
values = []
for name, value in self.key.items():
keys.append('f_%s = ?' % name)
values.append(value)
spec = (self
.db
.execute('SELECT * FROM interfaces WHERE %s' %
' AND '.join(keys), values)
.fetchone())
self.update(dict(zip(self.names, spec)))
return self
| apache-2.0 |
xiaoyanit/kivy | examples/miscellaneous/two_panes.py | 41 | 1126 | '''
Demonstrates using kv language to create some simple buttons and a
label, with each button modifying the label text.
'''
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.lang import Builder
Builder.load_string('''
<MainWidget>:
BoxLayout:
orientation: 'vertical'
Button:
text: 'some string '
on_press: the_right_pane.text += self.text
Button:
text: 'one two three four '
on_press: the_right_pane.text += self.text
Button:
text: 'follow the yellow brick road '
on_press: the_right_pane.text += self.text
Button:
text: 'five six seven eight '
on_press: the_right_pane.text += self.text
Button:
text: 'CLEAR LABEL'
on_press: the_right_pane.text = ''
Label:
id: the_right_pane
text: ''
text_size: self.size
halign: 'center'
valign: 'middle'
''')
class MainWidget(BoxLayout):
pass
class ExampleApp(App):
def build(self):
return MainWidget()
ExampleApp().run()
| mit |
hamiltont/CouchPotatoServer | libs/sqlalchemy/orm/util.py | 18 | 27030 | # orm/util.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy import sql, util, event, exc as sa_exc
from sqlalchemy.sql import expression, util as sql_util, operators
from sqlalchemy.orm.interfaces import MapperExtension, EXT_CONTINUE,\
PropComparator, MapperProperty
from sqlalchemy.orm import attributes, exc
import operator
import re
mapperlib = util.importlater("sqlalchemy.orm", "mapperlib")
all_cascades = frozenset(("delete", "delete-orphan", "all", "merge",
"expunge", "save-update", "refresh-expire",
"none"))
_INSTRUMENTOR = ('mapper', 'instrumentor')
class CascadeOptions(frozenset):
"""Keeps track of the options sent to relationship().cascade"""
_add_w_all_cascades = all_cascades.difference([
'all', 'none', 'delete-orphan'])
_allowed_cascades = all_cascades
def __new__(cls, arg):
values = set([
c for c
in re.split('\s*,\s*', arg or "")
if c
])
if values.difference(cls._allowed_cascades):
raise sa_exc.ArgumentError(
"Invalid cascade option(s): %s" %
", ".join([repr(x) for x in
sorted(
values.difference(cls._allowed_cascades)
)])
)
if "all" in values:
values.update(cls._add_w_all_cascades)
if "none" in values:
values.clear()
values.discard('all')
self = frozenset.__new__(CascadeOptions, values)
self.save_update = 'save-update' in values
self.delete = 'delete' in values
self.refresh_expire = 'refresh-expire' in values
self.merge = 'merge' in values
self.expunge = 'expunge' in values
self.delete_orphan = "delete-orphan" in values
if self.delete_orphan and not self.delete:
util.warn("The 'delete-orphan' cascade "
"option requires 'delete'.")
return self
def __repr__(self):
return "CascadeOptions(%r)" % (
",".join([x for x in sorted(self)])
)
def _validator_events(desc, key, validator, include_removes):
"""Runs a validation method on an attribute value to be set or appended."""
if include_removes:
def append(state, value, initiator):
return validator(state.obj(), key, value, False)
def set_(state, value, oldvalue, initiator):
return validator(state.obj(), key, value, False)
def remove(state, value, initiator):
validator(state.obj(), key, value, True)
else:
def append(state, value, initiator):
return validator(state.obj(), key, value)
def set_(state, value, oldvalue, initiator):
return validator(state.obj(), key, value)
event.listen(desc, 'append', append, raw=True, retval=True)
event.listen(desc, 'set', set_, raw=True, retval=True)
if include_removes:
event.listen(desc, "remove", remove, raw=True, retval=True)
def polymorphic_union(table_map, typecolname, aliasname='p_union', cast_nulls=True):
"""Create a ``UNION`` statement used by a polymorphic mapper.
See :ref:`concrete_inheritance` for an example of how
this is used.
:param table_map: mapping of polymorphic identities to
:class:`.Table` objects.
:param typecolname: string name of a "discriminator" column, which will be
derived from the query, producing the polymorphic identity for each row. If
``None``, no polymorphic discriminator is generated.
:param aliasname: name of the :func:`~sqlalchemy.sql.expression.alias()`
construct generated.
:param cast_nulls: if True, non-existent columns, which are represented as labeled
NULLs, will be passed into CAST. This is a legacy behavior that is problematic
on some backends such as Oracle - in which case it can be set to False.
"""
colnames = util.OrderedSet()
colnamemaps = {}
types = {}
for key in table_map.keys():
table = table_map[key]
# mysql doesnt like selecting from a select;
# make it an alias of the select
if isinstance(table, sql.Select):
table = table.alias()
table_map[key] = table
m = {}
for c in table.c:
colnames.add(c.key)
m[c.key] = c
types[c.key] = c.type
colnamemaps[table] = m
def col(name, table):
try:
return colnamemaps[table][name]
except KeyError:
if cast_nulls:
return sql.cast(sql.null(), types[name]).label(name)
else:
return sql.type_coerce(sql.null(), types[name]).label(name)
result = []
for type, table in table_map.iteritems():
if typecolname is not None:
result.append(
sql.select([col(name, table) for name in colnames] +
[sql.literal_column(sql_util._quote_ddl_expr(type)).
label(typecolname)],
from_obj=[table]))
else:
result.append(sql.select([col(name, table) for name in colnames],
from_obj=[table]))
return sql.union_all(*result).alias(aliasname)
def identity_key(*args, **kwargs):
"""Get an identity key.
Valid call signatures:
* ``identity_key(class, ident)``
class
mapped class (must be a positional argument)
ident
primary key, if the key is composite this is a tuple
* ``identity_key(instance=instance)``
instance
object instance (must be given as a keyword arg)
* ``identity_key(class, row=row)``
class
mapped class (must be a positional argument)
row
result proxy row (must be given as a keyword arg)
"""
if args:
if len(args) == 1:
class_ = args[0]
try:
row = kwargs.pop("row")
except KeyError:
ident = kwargs.pop("ident")
elif len(args) == 2:
class_, ident = args
elif len(args) == 3:
class_, ident = args
else:
raise sa_exc.ArgumentError("expected up to three "
"positional arguments, got %s" % len(args))
if kwargs:
raise sa_exc.ArgumentError("unknown keyword arguments: %s"
% ", ".join(kwargs.keys()))
mapper = class_mapper(class_)
if "ident" in locals():
return mapper.identity_key_from_primary_key(util.to_list(ident))
return mapper.identity_key_from_row(row)
instance = kwargs.pop("instance")
if kwargs:
raise sa_exc.ArgumentError("unknown keyword arguments: %s"
% ", ".join(kwargs.keys()))
mapper = object_mapper(instance)
return mapper.identity_key_from_instance(instance)
class ORMAdapter(sql_util.ColumnAdapter):
"""Extends ColumnAdapter to accept ORM entities.
The selectable is extracted from the given entity,
and the AliasedClass if any is referenced.
"""
def __init__(self, entity, equivalents=None,
chain_to=None, adapt_required=False):
self.mapper, selectable, is_aliased_class = _entity_info(entity)
if is_aliased_class:
self.aliased_class = entity
else:
self.aliased_class = None
sql_util.ColumnAdapter.__init__(self, selectable,
equivalents, chain_to,
adapt_required=adapt_required)
def replace(self, elem):
entity = elem._annotations.get('parentmapper', None)
if not entity or entity.isa(self.mapper):
return sql_util.ColumnAdapter.replace(self, elem)
else:
return None
class AliasedClass(object):
"""Represents an "aliased" form of a mapped class for usage with Query.
The ORM equivalent of a :func:`sqlalchemy.sql.expression.alias`
construct, this object mimics the mapped class using a
__getattr__ scheme and maintains a reference to a
real :class:`~sqlalchemy.sql.expression.Alias` object.
Usage is via the :class:`~sqlalchemy.orm.aliased()` synonym::
# find all pairs of users with the same name
user_alias = aliased(User)
session.query(User, user_alias).\\
join((user_alias, User.id > user_alias.id)).\\
filter(User.name==user_alias.name)
The resulting object is an instance of :class:`.AliasedClass`, however
it implements a ``__getattribute__()`` scheme which will proxy attribute
access to that of the ORM class being aliased. All classmethods
on the mapped entity should also be available here, including
hybrids created with the :ref:`hybrids_toplevel` extension,
which will receive the :class:`.AliasedClass` as the "class" argument
when classmethods are called.
:param cls: ORM mapped entity which will be "wrapped" around an alias.
:param alias: a selectable, such as an :func:`.alias` or :func:`.select`
construct, which will be rendered in place of the mapped table of the
ORM entity. If left as ``None``, an ordinary :class:`.Alias` of the
ORM entity's mapped table will be generated.
:param name: A name which will be applied both to the :class:`.Alias`
if one is generated, as well as the name present in the "named tuple"
returned by the :class:`.Query` object when results are returned.
:param adapt_on_names: if True, more liberal "matching" will be used when
mapping the mapped columns of the ORM entity to those of the given selectable -
a name-based match will be performed if the given selectable doesn't
otherwise have a column that corresponds to one on the entity. The
use case for this is when associating an entity with some derived
selectable such as one that uses aggregate functions::
class UnitPrice(Base):
__tablename__ = 'unit_price'
...
unit_id = Column(Integer)
price = Column(Numeric)
aggregated_unit_price = Session.query(
func.sum(UnitPrice.price).label('price')
).group_by(UnitPrice.unit_id).subquery()
aggregated_unit_price = aliased(UnitPrice, alias=aggregated_unit_price, adapt_on_names=True)
Above, functions on ``aggregated_unit_price`` which
refer to ``.price`` will return the
``fund.sum(UnitPrice.price).label('price')`` column,
as it is matched on the name "price". Ordinarily, the "price" function wouldn't
have any "column correspondence" to the actual ``UnitPrice.price`` column
as it is not a proxy of the original.
.. versionadded:: 0.7.3
"""
def __init__(self, cls, alias=None, name=None, adapt_on_names=False):
self.__mapper = _class_to_mapper(cls)
self.__target = self.__mapper.class_
self.__adapt_on_names = adapt_on_names
if alias is None:
alias = self.__mapper._with_polymorphic_selectable.alias(name=name)
self.__adapter = sql_util.ClauseAdapter(alias,
equivalents=self.__mapper._equivalent_columns,
adapt_on_names=self.__adapt_on_names)
self.__alias = alias
# used to assign a name to the RowTuple object
# returned by Query.
self._sa_label_name = name
self.__name__ = 'AliasedClass_' + str(self.__target)
def __getstate__(self):
return {
'mapper':self.__mapper,
'alias':self.__alias,
'name':self._sa_label_name,
'adapt_on_names':self.__adapt_on_names,
}
def __setstate__(self, state):
self.__mapper = state['mapper']
self.__target = self.__mapper.class_
self.__adapt_on_names = state['adapt_on_names']
alias = state['alias']
self.__adapter = sql_util.ClauseAdapter(alias,
equivalents=self.__mapper._equivalent_columns,
adapt_on_names=self.__adapt_on_names)
self.__alias = alias
name = state['name']
self._sa_label_name = name
self.__name__ = 'AliasedClass_' + str(self.__target)
def __adapt_element(self, elem):
return self.__adapter.traverse(elem).\
_annotate({
'parententity': self,
'parentmapper':self.__mapper}
)
def __adapt_prop(self, existing, key):
comparator = existing.comparator.adapted(self.__adapt_element)
queryattr = attributes.QueryableAttribute(self, key,
impl=existing.impl, parententity=self, comparator=comparator)
setattr(self, key, queryattr)
return queryattr
def __getattr__(self, key):
for base in self.__target.__mro__:
try:
attr = object.__getattribute__(base, key)
except AttributeError:
continue
else:
break
else:
raise AttributeError(key)
if isinstance(attr, attributes.QueryableAttribute):
return self.__adapt_prop(attr, key)
elif hasattr(attr, 'func_code'):
is_method = getattr(self.__target, key, None)
if is_method and is_method.im_self is not None:
return util.types.MethodType(attr.im_func, self, self)
else:
return None
elif hasattr(attr, '__get__'):
ret = attr.__get__(None, self)
if isinstance(ret, PropComparator):
return ret.adapted(self.__adapt_element)
return ret
else:
return attr
def __repr__(self):
return '<AliasedClass at 0x%x; %s>' % (
id(self), self.__target.__name__)
def aliased(element, alias=None, name=None, adapt_on_names=False):
if isinstance(element, expression.FromClause):
if adapt_on_names:
raise sa_exc.ArgumentError("adapt_on_names only applies to ORM elements")
return element.alias(name)
else:
return AliasedClass(element, alias=alias, name=name, adapt_on_names=adapt_on_names)
def _orm_annotate(element, exclude=None):
"""Deep copy the given ClauseElement, annotating each element with the
"_orm_adapt" flag.
Elements within the exclude collection will be cloned but not annotated.
"""
return sql_util._deep_annotate(element, {'_orm_adapt':True}, exclude)
_orm_deannotate = sql_util._deep_deannotate
class _ORMJoin(expression.Join):
"""Extend Join to support ORM constructs as input."""
__visit_name__ = expression.Join.__visit_name__
def __init__(self, left, right, onclause=None,
isouter=False, join_to_left=True):
adapt_from = None
if hasattr(left, '_orm_mappers'):
left_mapper = left._orm_mappers[1]
if join_to_left:
adapt_from = left.right
else:
left_mapper, left, left_is_aliased = _entity_info(left)
if join_to_left and (left_is_aliased or not left_mapper):
adapt_from = left
right_mapper, right, right_is_aliased = _entity_info(right)
if right_is_aliased:
adapt_to = right
else:
adapt_to = None
if left_mapper or right_mapper:
self._orm_mappers = (left_mapper, right_mapper)
if isinstance(onclause, basestring):
prop = left_mapper.get_property(onclause)
elif isinstance(onclause, attributes.QueryableAttribute):
if adapt_from is None:
adapt_from = onclause.__clause_element__()
prop = onclause.property
elif isinstance(onclause, MapperProperty):
prop = onclause
else:
prop = None
if prop:
pj, sj, source, dest, \
secondary, target_adapter = prop._create_joins(
source_selectable=adapt_from,
dest_selectable=adapt_to,
source_polymorphic=True,
dest_polymorphic=True,
of_type=right_mapper)
if sj is not None:
left = sql.join(left, secondary, pj, isouter)
onclause = sj
else:
onclause = pj
self._target_adapter = target_adapter
expression.Join.__init__(self, left, right, onclause, isouter)
def join(self, right, onclause=None, isouter=False, join_to_left=True):
return _ORMJoin(self, right, onclause, isouter, join_to_left)
def outerjoin(self, right, onclause=None, join_to_left=True):
return _ORMJoin(self, right, onclause, True, join_to_left)
def join(left, right, onclause=None, isouter=False, join_to_left=True):
"""Produce an inner join between left and right clauses.
:func:`.orm.join` is an extension to the core join interface
provided by :func:`.sql.expression.join()`, where the
left and right selectables may be not only core selectable
objects such as :class:`.Table`, but also mapped classes or
:class:`.AliasedClass` instances. The "on" clause can
be a SQL expression, or an attribute or string name
referencing a configured :func:`.relationship`.
``join_to_left`` indicates to attempt aliasing the ON clause,
in whatever form it is passed, to the selectable
passed as the left side. If False, the onclause
is used as is.
:func:`.orm.join` is not commonly needed in modern usage,
as its functionality is encapsulated within that of the
:meth:`.Query.join` method, which features a
significant amount of automation beyond :func:`.orm.join`
by itself. Explicit usage of :func:`.orm.join`
with :class:`.Query` involves usage of the
:meth:`.Query.select_from` method, as in::
from sqlalchemy.orm import join
session.query(User).\\
select_from(join(User, Address, User.addresses)).\\
filter(Address.email_address=='foo@bar.com')
In modern SQLAlchemy the above join can be written more
succinctly as::
session.query(User).\\
join(User.addresses).\\
filter(Address.email_address=='foo@bar.com')
See :meth:`.Query.join` for information on modern usage
of ORM level joins.
"""
return _ORMJoin(left, right, onclause, isouter, join_to_left)
def outerjoin(left, right, onclause=None, join_to_left=True):
"""Produce a left outer join between left and right clauses.
This is the "outer join" version of the :func:`.orm.join` function,
featuring the same behavior except that an OUTER JOIN is generated.
See that function's documentation for other usage details.
"""
return _ORMJoin(left, right, onclause, True, join_to_left)
def with_parent(instance, prop):
"""Create filtering criterion that relates this query's primary entity
to the given related instance, using established :func:`.relationship()`
configuration.
The SQL rendered is the same as that rendered when a lazy loader
would fire off from the given parent on that attribute, meaning
that the appropriate state is taken from the parent object in
Python without the need to render joins to the parent table
in the rendered statement.
.. versionchanged:: 0.6.4
This method accepts parent instances in all
persistence states, including transient, persistent, and detached.
Only the requisite primary key/foreign key attributes need to
be populated. Previous versions didn't work with transient
instances.
:param instance:
An instance which has some :func:`.relationship`.
:param property:
String property name, or class-bound attribute, which indicates
what relationship from the instance should be used to reconcile the
parent/child relationship.
"""
if isinstance(prop, basestring):
mapper = object_mapper(instance)
prop = getattr(mapper.class_, prop).property
elif isinstance(prop, attributes.QueryableAttribute):
prop = prop.property
return prop.compare(operators.eq,
instance,
value_is_parent=True)
def _entity_info(entity, compile=True):
"""Return mapping information given a class, mapper, or AliasedClass.
Returns 3-tuple of: mapper, mapped selectable, boolean indicating if this
is an aliased() construct.
If the given entity is not a mapper, mapped class, or aliased construct,
returns None, the entity, False. This is typically used to allow
unmapped selectables through.
"""
if isinstance(entity, AliasedClass):
return entity._AliasedClass__mapper, entity._AliasedClass__alias, True
if isinstance(entity, mapperlib.Mapper):
mapper = entity
elif isinstance(entity, type):
class_manager = attributes.manager_of_class(entity)
if class_manager is None:
return None, entity, False
mapper = class_manager.mapper
else:
return None, entity, False
if compile and mapperlib.module._new_mappers:
mapperlib.configure_mappers()
return mapper, mapper._with_polymorphic_selectable, False
def _entity_descriptor(entity, key):
"""Return a class attribute given an entity and string name.
May return :class:`.InstrumentedAttribute` or user-defined
attribute.
"""
if isinstance(entity, expression.FromClause):
description = entity
entity = entity.c
elif not isinstance(entity, (AliasedClass, type)):
description = entity = entity.class_
else:
description = entity
try:
return getattr(entity, key)
except AttributeError:
raise sa_exc.InvalidRequestError(
"Entity '%s' has no property '%s'" %
(description, key)
)
def _orm_columns(entity):
mapper, selectable, is_aliased_class = _entity_info(entity)
if isinstance(selectable, expression.Selectable):
return [c for c in selectable.c]
else:
return [selectable]
def _orm_selectable(entity):
mapper, selectable, is_aliased_class = _entity_info(entity)
return selectable
def _attr_as_key(attr):
if hasattr(attr, 'key'):
return attr.key
else:
return expression._column_as_key(attr)
def _is_aliased_class(entity):
return isinstance(entity, AliasedClass)
_state_mapper = util.dottedgetter('manager.mapper')
def object_mapper(instance):
"""Given an object, return the primary Mapper associated with the object
instance.
Raises UnmappedInstanceError if no mapping is configured.
"""
try:
state = attributes.instance_state(instance)
return state.manager.mapper
except exc.UnmappedClassError:
raise exc.UnmappedInstanceError(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
def class_mapper(class_, compile=True):
"""Given a class, return the primary :class:`.Mapper` associated
with the key.
Raises :class:`.UnmappedClassError` if no mapping is configured
on the given class, or :class:`.ArgumentError` if a non-class
object is passed.
"""
try:
class_manager = attributes.manager_of_class(class_)
mapper = class_manager.mapper
except exc.NO_STATE:
if not isinstance(class_, type):
raise sa_exc.ArgumentError("Class object expected, got '%r'." % class_)
raise exc.UnmappedClassError(class_)
if compile and mapperlib.module._new_mappers:
mapperlib.configure_mappers()
return mapper
def _class_to_mapper(class_or_mapper, compile=True):
if _is_aliased_class(class_or_mapper):
return class_or_mapper._AliasedClass__mapper
elif isinstance(class_or_mapper, type):
try:
class_manager = attributes.manager_of_class(class_or_mapper)
mapper = class_manager.mapper
except exc.NO_STATE:
raise exc.UnmappedClassError(class_or_mapper)
elif isinstance(class_or_mapper, mapperlib.Mapper):
mapper = class_or_mapper
else:
raise exc.UnmappedClassError(class_or_mapper)
if compile and mapperlib.module._new_mappers:
mapperlib.configure_mappers()
return mapper
def has_identity(object):
state = attributes.instance_state(object)
return state.has_identity
def _is_mapped_class(cls):
"""Return True if the given object is a mapped class,
:class:`.Mapper`, or :class:`.AliasedClass`."""
if isinstance(cls, (AliasedClass, mapperlib.Mapper)):
return True
if isinstance(cls, expression.ClauseElement):
return False
if isinstance(cls, type):
manager = attributes.manager_of_class(cls)
return manager and _INSTRUMENTOR in manager.info
return False
def _mapper_or_none(cls):
"""Return the :class:`.Mapper` for the given class or None if the
class is not mapped."""
manager = attributes.manager_of_class(cls)
if manager is not None and _INSTRUMENTOR in manager.info:
return manager.info[_INSTRUMENTOR]
else:
return None
def instance_str(instance):
"""Return a string describing an instance."""
return state_str(attributes.instance_state(instance))
def state_str(state):
"""Return a string describing an instance via its InstanceState."""
if state is None:
return "None"
else:
return '<%s at 0x%x>' % (state.class_.__name__, id(state.obj()))
def state_class_str(state):
"""Return a string describing an instance's class via its InstanceState."""
if state is None:
return "None"
else:
return '<%s>' % (state.class_.__name__, )
def attribute_str(instance, attribute):
return instance_str(instance) + "." + attribute
def state_attribute_str(state, attribute):
return state_str(state) + "." + attribute
def identity_equal(a, b):
if a is b:
return True
if a is None or b is None:
return False
try:
state_a = attributes.instance_state(a)
state_b = attributes.instance_state(b)
except exc.NO_STATE:
return False
if state_a.key is None or state_b.key is None:
return False
return state_a.key == state_b.key
| gpl-3.0 |
hcarvalhoalves/SublimeHaskell | parseoutput.py | 4 | 11648 | import os
import re
import sublime
import sublime_plugin
import time
from sys import version
from threading import Thread
from collections import defaultdict
PyV3 = version[0] == "3"
if int(sublime.version()) < 3000:
from sublime_haskell_common import log, are_paths_equal, call_and_wait, get_setting_async, show_status_message_process, show_status_message
else:
from SublimeHaskell.sublime_haskell_common import log, are_paths_equal, call_and_wait, get_setting_async, show_status_message_process, show_status_message
ERROR_PANEL_NAME = 'haskell_error_checker'
# This regex matches an unindented line, followed by zero or more
# indented, non-empty lines.
# It also eats whitespace before the first line.
# The first line is divided into a filename, a line number, and a column.
output_regex = re.compile(
r'\s*^(\S*):(\d+):(\d+):(.*$(?:\n^[ \t].*$)*)',
re.MULTILINE)
# Extract the filename, line, column, and description from an error message:
result_file_regex = r'^(\S*?): line (\d+), column (\d+):$'
# Global list of errors. Used e.g. for jumping to the next one.
# Properly assigned being a defaultdict in clear_error_marks().
# Structure: ERRORS[filename][m.line] = OutputMessage()
ERRORS = {}
def filename_of_path(p):
"""Returns everything after the last slash or backslash."""
# Not using os.path here because we don't know/care here if
# we have forward or backslashes on Windows.
return re.match(r'(.*[/\\])?(.*)', p).groups()[1]
class OutputMessage(object):
"Describe an error or warning message produced by GHC."
def __init__(self, filename, line, column, message, level):
self.filename = filename
self.line = int(line)
self.column = int(column)
self.message = message.replace(os.linesep, "\n")
self.level = level
def __unicode__(self):
# must match result_file_regex
return u'{0}: line {1}, column {2}:\n {3}'.format(
self.filename,
self.line,
self.column,
self.message)
def __str__(self):
return self.__unicode__()
def __repr__(self):
return '<OutputMessage {0}:{1}:{2}: {3}>'.format(
filename_of_path(self.filename),
self.line,
self.column,
self.message[:10] + '..')
def find_region_in_view(self, view):
"Return the Region referred to by this error message."
# Convert line and column count to zero-based indices:
point = view.text_point(self.line - 1, 0)
# Return the whole line:
region = view.line(point)
region = trim_region(view, region)
return region
def clear_error_marks():
global ERRORS
listdict = lambda: defaultdict(list)
ERRORS = defaultdict(listdict)
def set_global_error_messages(messages):
global ERRORS
clear_error_marks()
for m in messages:
ERRORS[m.filename][m.line].append(m)
def run_build_thread(view, cabal_project_dir, msg, cmd, on_done):
run_chain_build_thread(view, cabal_project_dir, msg, [cmd], on_done)
def run_chain_build_thread(view, cabal_project_dir, msg, cmds, on_done):
show_status_message_process(msg, priority = 3)
thread = Thread(
target=wait_for_chain_to_complete,
args=(view, cabal_project_dir, msg, cmds, on_done))
thread.start()
def wait_for_build_to_complete(view, cabal_project_dir, msg, cmd, on_done):
"""Run a command, wait for it to complete, then parse and display
the resulting errors."""
wait_for_chain_to_complete(view, cabal_project_dir, msg, [cmd], on_done)
def wait_for_chain_to_complete(view, cabal_project_dir, msg, cmds, on_done):
"""Chains several commands, wait for them to complete, then parse and display
the resulting errors."""
# First hide error panel to show that something is going on
sublime.set_timeout(lambda: hide_output(view), 0)
# run and wait commands, fail on first fail
for cmd in cmds:
exit_code, stdout, stderr = call_and_wait(
cmd,
cwd=cabal_project_dir)
if exit_code != 0:
break
errmsg = stderr if stderr else stdout
# Notify UI thread that commands are done
sublime.set_timeout(on_done, 0)
parse_output_messages_and_show(view, msg, cabal_project_dir, exit_code, errmsg)
def format_output_messages(messages):
"""Formats list of messages"""
if PyV3:
return '\n'.join(str(x) for x in messages)
else:
return u'\n'.join(unicode(x) for x in messages)
def show_output_result_text(view, msg, text, exit_code, base_dir):
"""Shows text (formatted messages) in output with build result"""
success = exit_code == 0
success_message = 'SUCCEEDED' if success else 'FAILED'
output = u'Build {0}\n\n{1}'.format(success_message, text.strip())
show_status_message_process(msg, success)
# Show panel if there is any text to show (without the part that we add)
if text:
if get_setting_async('show_output_window'):
sublime.set_timeout(lambda: write_output(view, output, base_dir), 0)
def parse_output_messages_and_show(view, msg, base_dir, exit_code, stderr):
"""Parse errors and display resulting errors"""
# stderr/stdout can contain unicode characters
# already done in call_and_wait
# stderr = stderr.decode('utf-8')
# The process has terminated; parse and display the output:
parsed_messages = parse_output_messages(base_dir, stderr)
# The unparseable part (for other errors)
unparsable = output_regex.sub('', stderr).strip()
# Set global error list
set_global_error_messages(parsed_messages)
# If we couldn't parse any messages, just show the stderr
# Otherwise the parsed errors and the unparsable stderr remainder
outputs = []
if parsed_messages:
outputs += [format_output_messages(parsed_messages)]
if unparsable:
outputs += ["\nREMAINING STDERR:\n", unparsable]
output_text = '\n'.join(outputs)
show_output_result_text(view, msg, output_text, exit_code, base_dir)
sublime.set_timeout(lambda: mark_messages_in_views(parsed_messages), 0)
def mark_messages_in_views(errors):
"Mark the regions in open views where errors were found."
begin_time = time.clock()
# Mark each diagnostic in each open view in all windows:
for w in sublime.windows():
for v in w.views():
view_filename = v.file_name()
# Unsaved files have no file name
if view_filename is None:
continue
errors_in_view = list(filter(
lambda x: are_paths_equal(view_filename, x.filename),
errors))
mark_messages_in_view(errors_in_view, v)
end_time = time.clock()
log('total time to mark {0} diagnostics: {1} seconds'.format(
len(errors), end_time - begin_time))
message_levels = {
'hint': {
'style': 'comment.warning',
'icon': 'light_x_bright'
},
'warning': {
'style': 'comment.warning',
'icon': 'grey_x_light_shadow'
},
'error': {
'style': 'invalid',
'icon': 'grey_x'
}
}
# These next and previous commands were shamelessly copied
# from the great SublimeClang plugin.
class SublimeHaskellNextError(sublime_plugin.TextCommand):
def run(self, edit):
log("SublimeHaskellNextError")
v = self.view
fn = v.file_name().encode("utf-8")
line, column = v.rowcol(v.sel()[0].a)
line += 1
gotoline = -1
if fn in ERRORS:
for errLine in sorted(ERRORS[fn].keys()):
if errLine > line:
gotoline = errLine
break
# No next line: Wrap around if possible
if gotoline == -1 and len(ERRORS[fn]) > 0:
gotoline = sorted(ERRORS[fn].keys())[0]
if gotoline != -1:
v.window().open_file("%s:%d" % (fn, gotoline), sublime.ENCODED_POSITION)
else:
sublime.status_message("No more errors or warnings!")
class SublimeHaskellPreviousError(sublime_plugin.TextCommand):
def run(self, edit):
v = self.view
fn = v.file_name().encode("utf-8")
line, column = v.rowcol(v.sel()[0].a)
line += 1
gotoline = -1
if fn in ERRORS:
for errLine in sorted(ERRORS[fn].keys(), key = lambda x: -x):
if errLine < line:
gotoline = errLine
break
# No previous line: Wrap around if possible
if gotoline == -1 and len(ERRORS[fn]) > 0:
gotoline = sorted(ERRORS[fn].keys())[-1]
if gotoline != -1:
v.window().open_file("%s:%d" % (fn, gotoline), sublime.ENCODED_POSITION)
else:
sublime.status_message("No more errors or warnings!")
def region_key(name):
return 'subhs-{0}s'.format(name)
def mark_messages_in_view(messages, view):
# Regions by level
regions = {}
for k in message_levels.keys():
regions[k] = []
for m in messages:
regions[m.level].append(m.find_region_in_view(view))
for nm, lev in message_levels.items():
view.erase_regions(region_key(nm))
view.add_regions(
region_key(nm),
regions[nm],
lev['style'],
lev['icon'],
sublime.DRAW_OUTLINED)
def write_output(view, text, cabal_project_dir):
"Write text to Sublime's output panel."
output_view = view.window().get_output_panel(ERROR_PANEL_NAME)
output_view.set_read_only(False)
# Configure Sublime's error message parsing:
output_view.settings().set("result_file_regex", result_file_regex)
output_view.settings().set("result_base_dir", cabal_project_dir)
# Write to the output buffer:
output_view.run_command('sublime_haskell_output_text', {
'text': text })
# Set the selection to the beginning of the view so that "next result" works:
output_view.sel().clear()
output_view.sel().add(sublime.Region(0))
output_view.set_read_only(True)
# Show the results panel:
view.window().run_command('show_panel', {'panel': 'output.' + ERROR_PANEL_NAME})
def hide_output(view):
view.window().run_command('hide_panel', {'panel': 'output.' + ERROR_PANEL_NAME})
def parse_output_messages(base_dir, text):
"Parse text into a list of OutputMessage objects."
matches = output_regex.finditer(text)
def to_error(m):
filename, line, column, messy_details = m.groups()
return OutputMessage(
# Record the absolute, normalized path.
os.path.normpath(os.path.join(base_dir, filename)),
line,
column,
messy_details.strip(),
'warning' if 'warning' in messy_details.lower() else 'error')
return list(map(to_error, matches))
def trim_region(view, region):
"Return the specified Region, but without leading or trailing whitespace."
text = view.substr(region)
# Regions may be selected backwards, so b could be less than a.
a = min(region.a, region.b)
b = max(region.a, region.b)
# Figure out how much to move the endpoints to lose the space.
# If the region is entirely whitespace, give up and return it unchanged.
if text.isspace():
return region
else:
text_trimmed_on_left = text.lstrip()
text_trimmed = text_trimmed_on_left.rstrip()
a += len(text) - len(text_trimmed_on_left)
b -= len(text_trimmed_on_left) - len(text_trimmed)
return sublime.Region(a, b)
| mit |
xdevelsistemas/taiga-back-community | taiga/projects/migrations/0011_auto_20141028_2057.py | 27 | 1109 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_pgjson.fields
class Migration(migrations.Migration):
dependencies = [
('projects', '0010_project_modules_config'),
]
operations = [
migrations.CreateModel(
name='ProjectModulesConfig',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('config', django_pgjson.fields.JsonField(null=True, verbose_name='modules config', blank=True)),
('project', models.OneToOneField(to='projects.Project', verbose_name='project', related_name='modules_config')),
],
options={
'verbose_name_plural': 'project modules configs',
'verbose_name': 'project modules config',
'ordering': ['project'],
},
bases=(models.Model,),
),
migrations.RemoveField(
model_name='project',
name='modules_config',
),
]
| agpl-3.0 |
ncdesouza/bookworm | env/lib/python2.7/site-packages/pip/_vendor/html5lib/tokenizer.py | 1710 | 76929 | from __future__ import absolute_import, division, unicode_literals
try:
chr = unichr # flake8: noqa
except NameError:
pass
from collections import deque
from .constants import spaceCharacters
from .constants import entities
from .constants import asciiLetters, asciiUpper2Lower
from .constants import digits, hexDigits, EOF
from .constants import tokenTypes, tagTokenTypes
from .constants import replacementCharacters
from .inputstream import HTMLInputStream
from .trie import Trie
entitiesTrie = Trie(entities)
class HTMLTokenizer(object):
""" This class takes care of tokenizing HTML.
* self.currentToken
Holds the token that is currently being processed.
* self.state
Holds a reference to the method to be invoked... XXX
* self.stream
Points to HTMLInputStream object.
"""
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=True, lowercaseAttrName=True, parser=None):
self.stream = HTMLInputStream(stream, encoding, parseMeta, useChardet)
self.parser = parser
# Perform case conversions?
self.lowercaseElementName = lowercaseElementName
self.lowercaseAttrName = lowercaseAttrName
# Setup the initial tokenizer state
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
# The current token being created
self.currentToken = None
super(HTMLTokenizer, self).__init__()
def __iter__(self):
""" This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.
"""
self.tokenQueue = deque([])
# Start processing. When EOF is reached self.state will return False
# instead of True and the loop will terminate.
while self.state():
while self.stream.errors:
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
while self.tokenQueue:
yield self.tokenQueue.popleft()
def consumeNumberEntity(self, isHex):
"""This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
"""
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
# Consume all the characters that are in range while making sure we
# don't hit an EOF.
c = self.stream.char()
while c in allowed and c is not EOF:
charStack.append(c)
c = self.stream.char()
# Convert the set of characters consumed to an int.
charAsInt = int("".join(charStack), radix)
# Certain characters get replaced with others
if charAsInt in replacementCharacters:
char = replacementCharacters[charAsInt]
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
elif ((0xD800 <= charAsInt <= 0xDFFF) or
(charAsInt > 0x10FFFF)):
char = "\uFFFD"
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
else:
# Should speed up this check somehow (e.g. move the set to a constant)
if ((0x0001 <= charAsInt <= 0x0008) or
(0x000E <= charAsInt <= 0x001F) or
(0x007F <= charAsInt <= 0x009F) or
(0xFDD0 <= charAsInt <= 0xFDEF) or
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
0xFFFFF, 0x10FFFE, 0x10FFFF])):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
try:
# Try/except needed as UCS-2 Python builds' unichar only works
# within the BMP.
char = chr(charAsInt)
except ValueError:
v = charAsInt - 0x10000
char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF))
# Discard the ; if present. Otherwise, put it back on the queue and
# invoke parseError on parser.
if c != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"numeric-entity-without-semicolon"})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# Initialise to the default output for when no entity is matched
output = "&"
charStack = [self.stream.char()]
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&")
or (allowedChar is not None and allowedChar == charStack[0])):
self.stream.unget(charStack[0])
elif charStack[0] == "#":
# Read the next character to see if it's hex or decimal
hex = False
charStack.append(self.stream.char())
if charStack[-1] in ("x", "X"):
hex = True
charStack.append(self.stream.char())
# charStack[-1] should be the first digit
if (hex and charStack[-1] in hexDigits) \
or (not hex and charStack[-1] in digits):
# At least one digit found, so consume the whole number
self.stream.unget(charStack[-1])
output = self.consumeNumberEntity(hex)
else:
# No digits found
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "expected-numeric-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
# At this point in the process might have named entity. Entities
# are stored in the global variable "entities".
#
# Consume characters and compare to these to a substring of the
# entity names in the list until the substring no longer matches.
while (charStack[-1] is not EOF):
if not entitiesTrie.has_keys_with_prefix("".join(charStack)):
break
charStack.append(self.stream.char())
# At this point we have a string that starts with some characters
# that may match an entity
# Try to find the longest entity the string will match to take care
# of ¬i for instance.
try:
entityName = entitiesTrie.longest_prefix("".join(charStack[:-1]))
entityLength = len(entityName)
except KeyError:
entityName = None
if entityName is not None:
if entityName[-1] != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"named-entity-without-semicolon"})
if (entityName[-1] != ";" and fromAttribute and
(charStack[entityLength] in asciiLetters or
charStack[entityLength] in digits or
charStack[entityLength] == "=")):
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += "".join(charStack[entityLength:])
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-named-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
if fromAttribute:
self.currentToken["data"][-1][1] += output
else:
if output in spaceCharacters:
tokenType = "SpaceCharacters"
else:
tokenType = "Characters"
self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
def processEntityInAttribute(self, allowedChar):
"""This method replaces the need for "entityInAttributeValueState".
"""
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
"""This method is a generic handler for emitting the tags. It also sets
the state to "data" because that's what's needed after a token has been
emitted.
"""
token = self.currentToken
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
if self.lowercaseElementName:
token["name"] = token["name"].translate(asciiUpper2Lower)
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "attributes-in-end-tag"})
if token["selfClosing"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "self-closing-flag-on-end-tag"})
self.tokenQueue.append(token)
self.state = self.dataState
# Below are the various tokenizer states worked out.
def dataState(self):
data = self.stream.char()
if data == "&":
self.state = self.entityDataState
elif data == "<":
self.state = self.tagOpenState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\u0000"})
elif data is EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.dataState
return True
def rcdataState(self):
data = self.stream.char()
if data == "&":
self.state = self.characterReferenceInRcdata
elif data == "<":
self.state = self.rcdataLessThanSignState
elif data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def characterReferenceInRcdata(self):
self.consumeEntity()
self.state = self.rcdataState
return True
def rawtextState(self):
data = self.stream.char()
if data == "<":
self.state = self.rawtextLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataState(self):
data = self.stream.char()
if data == "<":
self.state = self.scriptDataLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def plaintextState(self):
data = self.stream.char()
if data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + self.stream.charsUntil("\u0000")})
return True
def tagOpenState(self):
data = self.stream.char()
if data == "!":
self.state = self.markupDeclarationOpenState
elif data == "/":
self.state = self.closeTagOpenState
elif data in asciiLetters:
self.currentToken = {"type": tokenTypes["StartTag"],
"name": data, "data": [],
"selfClosing": False,
"selfClosingAcknowledged": False}
self.state = self.tagNameState
elif data == ">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"})
self.state = self.dataState
elif data == "?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.bogusCommentState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.dataState
return True
def closeTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
"data": [], "selfClosing": False}
self.state = self.tagNameState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.state = self.dataState
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.bogusCommentState
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-tag-name"})
self.state = self.dataState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
else:
self.currentToken["name"] += data
# (Don't use charsUntil here, because tag names are
# very short and it's faster to not do anything fancy)
return True
def rcdataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rcdataEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rcdataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rawtextLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rawtextEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rawtextEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rawtextState
return True
def scriptDataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEndTagOpenState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"})
self.state = self.scriptDataEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.scriptDataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapeStartDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.state = self.dataState
else:
chars = self.stream.charsUntil(("<", "-", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEscapedEndTagOpenState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data})
self.temporaryBuffer = data
self.state = self.scriptDataDoubleEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer = data
self.state = self.scriptDataEscapedEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapeStartState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataDoubleEscapedState
else:
self.state = self.scriptDataEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"})
self.temporaryBuffer = ""
self.state = self.scriptDataDoubleEscapeEndState
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapeEndState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataEscapedState
else:
self.state = self.scriptDataDoubleEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data in ("'", '"', "=", "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-name-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == "=":
self.state = self.beforeAttributeValueState
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == ">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.afterAttributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][0] += "\uFFFD"
leavingThisState = False
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-attribute-name"})
self.state = self.dataState
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
if self.lowercaseAttrName:
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, value in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "=":
self.state = self.beforeAttributeValueState
elif data == ">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-after-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-end-of-tag-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "\"":
self.state = self.attributeValueDoubleQuotedState
elif data == "&":
self.state = self.attributeValueUnQuotedState
self.stream.unget(data)
elif data == "'":
self.state = self.attributeValueSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-right-bracket"})
self.emitCurrentToken()
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
self.state = self.attributeValueUnQuotedState
elif data in ("=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute('"')
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-double-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", "&", "\u0000"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute("'")
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-single-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", "&", "\u0000"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == "&":
self.processEntityInAttribute(">")
elif data == ">":
self.emitCurrentToken()
elif data in ('"', "'", "=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-no-quotes"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-EOF-after-attribute-value"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if data == ">":
self.currentToken["selfClosing"] = True
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"unexpected-EOF-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
data = self.stream.charsUntil(">")
data = data.replace("\u0000", "\uFFFD")
self.tokenQueue.append(
{"type": tokenTypes["Comment"], "data": data})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if charStack[-1] == "-":
charStack.append(self.stream.char())
if charStack[-1] == "-":
self.currentToken = {"type": tokenTypes["Comment"], "data": ""}
self.state = self.commentStartState
return True
elif charStack[-1] in ('d', 'D'):
matched = True
for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'),
('y', 'Y'), ('p', 'P'), ('e', 'E')):
charStack.append(self.stream.char())
if charStack[-1] not in expected:
matched = False
break
if matched:
self.currentToken = {"type": tokenTypes["Doctype"],
"name": "",
"publicId": None, "systemId": None,
"correct": True}
self.state = self.doctypeState
return True
elif (charStack[-1] == "[" and
self.parser is not None and
self.parser.tree.openElements and
self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
matched = True
for expected in ["C", "D", "A", "T", "A", "["]:
charStack.append(self.stream.char())
if charStack[-1] != expected:
matched = False
break
if matched:
self.state = self.cdataSectionState
return True
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-dashes-or-doctype"})
while charStack:
self.stream.unget(charStack.pop())
self.state = self.bogusCommentState
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentStartDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data
self.state = self.commentState
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + \
self.stream.charsUntil(("-", "\u0000"))
return True
def commentEndDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentEndState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--\uFFFD"
self.state = self.commentState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-bang-after-double-dash-in-comment"})
self.state = self.commentEndBangState
elif data == "-":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-dash-after-double-dash-in-comment"})
self.currentToken["data"] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-double-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-comment"})
self.currentToken["data"] += "--" + data
self.state = self.commentState
return True
def commentEndBangState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "-":
self.currentToken["data"] += "--!"
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--!\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-bang-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "--!" + data
self.state = self.commentState
return True
def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-right-bracket"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] = "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] = data
self.state = self.doctypeNameState
return True
def doctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.state = self.afterDoctypeNameState
elif data == ">":
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype-name"})
self.currentToken["correct"] = False
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.currentToken["correct"] = False
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
if data in ("p", "P"):
matched = True
for expected in (("u", "U"), ("b", "B"), ("l", "L"),
("i", "I"), ("c", "C")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypePublicKeywordState
return True
elif data in ("s", "S"):
matched = True
for expected in (("y", "Y"), ("s", "S"), ("t", "T"),
("e", "E"), ("m", "M")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypeSystemKeywordState
return True
# All the characters read before the current 'data' will be
# [a-zA-Z], so they're garbage in the bogus doctype and can be
# discarded; only the latest character might be '>' or EOF
# and needs to be ungetted
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-space-or-right-bracket-in-doctype", "datavars":
{"data": data}})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypePublicKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypePublicIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def betweenDoctypePublicAndSystemIdentifiersState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypeSystemKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeSystemIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.state = self.bogusDoctypeState
return True
def bogusDoctypeState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
# XXX EMIT
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
pass
return True
def cdataSectionState(self):
data = []
while True:
data.append(self.stream.charsUntil("]"))
data.append(self.stream.charsUntil(">"))
char = self.stream.char()
if char == EOF:
break
else:
assert char == ">"
if data[-1][-2:] == "]]":
data[-1] = data[-1][:-2]
break
else:
data.append(char)
data = "".join(data)
# Deal with null here rather than in the parser
nullCount = data.count("\u0000")
if nullCount > 0:
for i in range(nullCount):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
data = data.replace("\u0000", "\uFFFD")
if data:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": data})
self.state = self.dataState
return True
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.