repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
dlongman/beegee-squeegee | prototype-parser/AccuChekMobileParser.py | 1 | 3411 | import csv
import time
from Reading import Reading as Reading
### TODO ### Add documentation to classes and methods
class AccuChekMobileParser(object):
# this file has some summary data in row 2 with the headers in row 1
# the result data is then in row 4 with the headers in row 3
def __init__(self, file_location):
self.file_path = file_location
self.serial_number = ''
self.last_download_date = ''
self.HEADER_ROW_COUNT = 2
self.result_data = []
def read_result_data(self, notifier):
headers = ['Date', 'Time', 'Result', 'Unit', 'TempWarning', \
'OutOfTargetRange', 'Other', 'BeforeMeal', 'AfterMeal', 'ControlTest']
with open(self.file_path, 'rb') as dataFile:
reader = csv.DictReader(dataFile, fieldnames=headers, delimiter=';')
# skip the header rows and the 2 rows of summary data
for x in range(0, self.HEADER_ROW_COUNT + 1):
next(reader, None)
for row in reader:
# TODO: Replace with a dict to make sure there are no duplicates
if row['Result'] != None:
reading = Reading(row['Date'], row['Time'], row['Result'], row['Unit'])
self.result_data.append(reading)
notifier(reading)
else:
pass # this data has no reading so ignore it
def read_header_data(self):
headers = ['Serial Number', 'Download Date', 'Download Time']
with open(self.file_path, 'rb') as df:
counter = 0
reader = csv.DictReader(df, headers, delimiter=';')
for row in reader:
counter = counter + 1
if counter == 1:
# these are the headers so ignore
self.serial_number = ''
elif counter == 2:
# this is the summary data
self.serial_number = row['Serial Number']
self.last_download_date = self.convert_date_time(row['Download Date'], row['Download Time'])
else:
# this is the start of the real data
break
def convert_date_time(self, d, t):
return time.strptime(d + t, "%d.%m.%Y%H:%M")
def __str__(self):
return 'This is the result data for meter {0}. ' \
'The data was downloaded on {1}\\{2}\\{3} at {4}:{5}.\n' \
'It contains {6} readings' \
.format(self.serial_number, self.last_download_date.tm_mday, self.last_download_date.tm_mon, \
self.last_download_date.tm_year, self.last_download_date.tm_hour, \
self.last_download_date.tm_min, len(self.result_data))
if __name__ == "__main__":
def callback(data):
print "Processed data for {0}".format(data)
### TODO ### process data into sql db
parser = AccuChekMobileParser('./data/DiaryU101341933-14Dec2014.csv')
#parser = AccuChekMobileParser('./data/DiaryU101341933-12Aug2015.csv')
parser.read_header_data()
parser.read_result_data(callback)
parser.file_path = './data/DiaryU101341933-12Aug2015.csv'
parser.read_header_data()
parser.read_result_data(callback)
print parser
| mit |
simalytics/askbot-devel | askbot/migrations/0048_set_proper_revision_types.py | 20 | 27517 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
orm.QuestionRevision.objects.update(revision_type=1)
orm.AnswerRevision.objects.update(revision_type=2)
def backwards(self, orm):
"Write your backwards methods here."
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['askbot.Question']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.answer': {
'Meta': {'object_name': 'Answer', 'db_table': "u'answer'"},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_answers'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['askbot.Question']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.answerrevision': {
'Meta': {'ordering': "('-revision',)", 'unique_together': "(('answer', 'revision'),)", 'object_name': 'AnswerRevision', 'db_table': "u'answer_revision'"},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['askbot.Answer']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answerrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'revision_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '125', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'askbot.comment': {
'Meta': {'ordering': "('-added_at',)", 'object_name': 'Comment', 'db_table': "u'comment'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'html': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2048'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'offensive_flag_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['auth.User']"})
},
'askbot.emailfeedsetting': {
'Meta': {'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.question': {
'Meta': {'object_name': 'Question', 'db_table': "u'question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'answer_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'to': "orm['auth.User']"}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'closed_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'favorite_questions'", 'symmetrical': 'False', 'through': "orm['askbot.FavoriteQuestion']", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_questions'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_active_in_questions'", 'to': "orm['auth.User']"}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_questions'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'questions'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.questionrevision': {
'Meta': {'ordering': "('-revision',)", 'unique_together': "(('question', 'revision'),)", 'object_name': 'QuestionRevision', 'db_table': "u'question_revision'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questionrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['askbot.Question']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'revision_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '125', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'askbot.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Question']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Question']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'user'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot']
| gpl-3.0 |
rcarrillocruz/ansible | lib/ansible/utils/module_docs_fragments/tower.py | 44 | 1991 | # (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
class ModuleDocFragment(object):
# Ansible Tower documentation fragment
DOCUMENTATION = '''
options:
tower_host:
description:
- URL to your Tower instance.
required: False
default: null
tower_username:
description:
- Username for your Tower instance.
required: False
default: null
tower_password:
description:
- Password for your Tower instance.
required: False
default: null
tower_verify_ssl:
description:
- Dis/allow insecure connections to Tower. If C(no), SSL certificates will not be validated.
This should only be used on personally controlled sites using self-signed certificates.
required: False
default: True
tower_config_file:
description:
- Path to the Tower config file. See notes.
required: False
default: null
requirements:
- "python >= 2.6"
- "ansible-tower-cli >= 3.0.2"
notes:
- If no I(config_file) is provided we will attempt to use the tower-cli library
defaults to find your Tower host information.
- I(config_file) should contain Tower configuration in the following format
host=hostname
username=username
password=password
'''
| gpl-3.0 |
dpshelio/scikit-image | skimage/future/graph/graph_cut.py | 19 | 9309 | try:
import networkx as nx
except ImportError:
import warnings
warnings.warn('RAGs require networkx')
import numpy as np
from . import _ncut
from . import _ncut_cy
from scipy.sparse import linalg
def cut_threshold(labels, rag, thresh, in_place=True):
"""Combine regions separated by weight less than threshold.
Given an image's labels and its RAG, output new labels by
combining regions whose nodes are separated by a weight less
than the given threshold.
Parameters
----------
labels : ndarray
The array of labels.
rag : RAG
The region adjacency graph.
thresh : float
The threshold. Regions connected by edges with smaller weights are
combined.
in_place : bool
If set, modifies `rag` in place. The function will remove the edges
with weights less that `thresh`. If set to `False` the function
makes a copy of `rag` before proceeding.
Returns
-------
out : ndarray
The new labelled array.
Examples
--------
>>> from skimage import data, segmentation
>>> from skimage.future import graph
>>> img = data.astronaut()
>>> labels = segmentation.slic(img)
>>> rag = graph.rag_mean_color(img, labels)
>>> new_labels = graph.cut_threshold(labels, rag, 10)
References
----------
.. [1] Alain Tremeau and Philippe Colantoni
"Regions Adjacency Graph Applied To Color Image Segmentation"
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.11.5274
"""
if not in_place:
rag = rag.copy()
# Because deleting edges while iterating through them produces an error.
to_remove = [(x, y) for x, y, d in rag.edges_iter(data=True)
if d['weight'] >= thresh]
rag.remove_edges_from(to_remove)
comps = nx.connected_components(rag)
# We construct an array which can map old labels to the new ones.
# All the labels within a connected component are assigned to a single
# label in the output.
map_array = np.arange(labels.max() + 1, dtype=labels.dtype)
for i, nodes in enumerate(comps):
for node in nodes:
for label in rag.node[node]['labels']:
map_array[label] = i
return map_array[labels]
def cut_normalized(labels, rag, thresh=0.001, num_cuts=10, in_place=True,
max_edge=1.0):
"""Perform Normalized Graph cut on the Region Adjacency Graph.
Given an image's labels and its similarity RAG, recursively perform
a 2-way normalized cut on it. All nodes belonging to a subgraph
that cannot be cut further are assigned a unique label in the
output.
Parameters
----------
labels : ndarray
The array of labels.
rag : RAG
The region adjacency graph.
thresh : float
The threshold. A subgraph won't be further subdivided if the
value of the N-cut exceeds `thresh`.
num_cuts : int
The number or N-cuts to perform before determining the optimal one.
in_place : bool
If set, modifies `rag` in place. For each node `n` the function will
set a new attribute ``rag.node[n]['ncut label']``.
max_edge : float, optional
The maximum possible value of an edge in the RAG. This corresponds to
an edge between identical regions. This is used to put self
edges in the RAG.
Returns
-------
out : ndarray
The new labeled array.
Examples
--------
>>> from skimage import data, segmentation
>>> from skimage.future import graph
>>> img = data.astronaut()
>>> labels = segmentation.slic(img, compactness=30, n_segments=400)
>>> rag = graph.rag_mean_color(img, labels, mode='similarity')
>>> new_labels = graph.cut_normalized(labels, rag)
References
----------
.. [1] Shi, J.; Malik, J., "Normalized cuts and image segmentation",
Pattern Analysis and Machine Intelligence,
IEEE Transactions on, vol. 22, no. 8, pp. 888-905, August 2000.
"""
if not in_place:
rag = rag.copy()
for node in rag.nodes_iter():
rag.add_edge(node, node, weight=max_edge)
_ncut_relabel(rag, thresh, num_cuts)
map_array = np.zeros(labels.max() + 1, dtype=labels.dtype)
# Mapping from old labels to new
for n, d in rag.nodes_iter(data=True):
map_array[d['labels']] = d['ncut label']
return map_array[labels]
def partition_by_cut(cut, rag):
"""Compute resulting subgraphs from given bi-parition.
Parameters
----------
cut : array
A array of booleans. Elements set to `True` belong to one
set.
rag : RAG
The Region Adjacency Graph.
Returns
-------
sub1, sub2 : RAG
The two resulting subgraphs from the bi-partition.
"""
# `cut` is derived from `D` and `W` matrices, which also follow the
# ordering returned by `rag.nodes()` because we use
# nx.to_scipy_sparse_matrix.
# Example
# rag.nodes() = [3, 7, 9, 13]
# cut = [True, False, True, False]
# nodes1 = [3, 9]
# nodes2 = [7, 10]
nodes1 = [n for i, n in enumerate(rag.nodes()) if cut[i]]
nodes2 = [n for i, n in enumerate(rag.nodes()) if not cut[i]]
sub1 = rag.subgraph(nodes1)
sub2 = rag.subgraph(nodes2)
return sub1, sub2
def get_min_ncut(ev, d, w, num_cuts):
"""Threshold an eigenvector evenly, to determine minimum ncut.
Parameters
----------
ev : array
The eigenvector to threshold.
d : ndarray
The diagonal matrix of the graph.
w : ndarray
The weight matrix of the graph.
num_cuts : int
The number of evenly spaced thresholds to check for.
Returns
-------
mask : array
The array of booleans which denotes the bi-partition.
mcut : float
The value of the minimum ncut.
"""
mcut = np.inf
mn = ev.min()
mx = ev.max()
# If all values in `ev` are equal, it implies that the graph can't be
# further sub-divided. In this case the bi-partition is the the graph
# itself and an empty set.
min_mask = np.zeros_like(ev, dtype=np.bool)
if np.allclose(mn, mx):
return min_mask, mcut
# Refer Shi & Malik 2001, Section 3.1.3, Page 892
# Perform evenly spaced n-cuts and determine the optimal one.
for t in np.linspace(mn, mx, num_cuts, endpoint=False):
mask = ev > t
cost = _ncut.ncut_cost(mask, d, w)
if cost < mcut:
min_mask = mask
mcut = cost
return min_mask, mcut
def _label_all(rag, attr_name):
"""Assign a unique integer to the given attribute in the RAG.
This function assumes that all labels in `rag` are unique. It
picks up a random label from them and assigns it to the `attr_name`
attribute of all the nodes.
rag : RAG
The Region Adjacency Graph.
attr_name : string
The attribute to which a unique integer is assigned.
"""
node = rag.nodes()[0]
new_label = rag.node[node]['labels'][0]
for n, d in rag.nodes_iter(data=True):
d[attr_name] = new_label
def _ncut_relabel(rag, thresh, num_cuts):
"""Perform Normalized Graph cut on the Region Adjacency Graph.
Recursively partition the graph into 2, until further subdivision
yields a cut greater than `thresh` or such a cut cannot be computed.
For such a subgraph, indices to labels of all its nodes map to a single
unique value.
Parameters
----------
labels : ndarray
The array of labels.
rag : RAG
The region adjacency graph.
thresh : float
The threshold. A subgraph won't be further subdivided if the
value of the N-cut exceeds `thresh`.
num_cuts : int
The number or N-cuts to perform before determining the optimal one.
map_array : array
The array which maps old labels to new ones. This is modified inside
the function.
"""
d, w = _ncut.DW_matrices(rag)
m = w.shape[0]
if m > 2:
d2 = d.copy()
# Since d is diagonal, we can directly operate on its data
# the inverse of the square root
d2.data = np.reciprocal(np.sqrt(d2.data, out=d2.data), out=d2.data)
# Refer Shi & Malik 2001, Equation 7, Page 891
vals, vectors = linalg.eigsh(d2 * (d - w) * d2, which='SM',
k=min(100, m - 2))
# Pick second smallest eigenvector.
# Refer Shi & Malik 2001, Section 3.2.3, Page 893
vals, vectors = np.real(vals), np.real(vectors)
index2 = _ncut_cy.argmin2(vals)
ev = vectors[:, index2]
cut_mask, mcut = get_min_ncut(ev, d, w, num_cuts)
if (mcut < thresh):
# Sub divide and perform N-cut again
# Refer Shi & Malik 2001, Section 3.2.5, Page 893
sub1, sub2 = partition_by_cut(cut_mask, rag)
_ncut_relabel(sub1, thresh, num_cuts)
_ncut_relabel(sub2, thresh, num_cuts)
return
# The N-cut wasn't small enough, or could not be computed.
# The remaining graph is a region.
# Assign `ncut label` by picking any label from the existing nodes, since
# `labels` are unique, `new_label` is also unique.
_label_all(rag, 'ncut label')
| bsd-3-clause |
j-windsor/cs3240-f15-team21-v2 | reports/models.py | 1 | 1870 | from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
import os
class Report(models.Model):
title = models.CharField(max_length=30)
description = models.CharField(max_length = 200)
create_date = models.DateTimeField('date created')
public = models.BooleanField(default=True)
creator = models.ForeignKey(User)
#encrypted = models.BooleanField(default=False)
def get_creator(self):
return str(self.creator)
def is_public(self):
return self.public
class Folder(models.Model):
label = models.CharField(max_length=30)
reports = models.ManyToManyField(Report)
owner = models.ForeignKey(User)
def __str__(self):
return self.label
def get_upload_file_name(instance,filename):
return "uploaded_files/%s_%s" % (str(timezone.now()).replace('.','_'),filename)
class Attachment (models.Model):
name = models.CharField(max_length=30)
upload = models.FileField(upload_to=get_upload_file_name)
key = models.CharField(max_length=100)
encrypted = models.BooleanField(default=False)
upload_date = models.DateTimeField('date uploaded', auto_now_add=True)
report = models.ForeignKey(Report)
def __str__(self):
return self.name
def filename(self):
return os.path.basename(self.upload.path)
def has_access(self, user):
retval = False
try:
for folder in user.folder_set.all():
for report in folder.reports.all():
if self in report.attachment_set.all():
retval = True
if self.report.public == True:
retval = True
except:
pass
return retval
class Contributor (models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
| mit |
Samuel789/MediPi | MedManagementWeb/env/lib/python3.5/site-packages/django/contrib/postgres/fields/hstore.py | 42 | 3360 | import json
from django.contrib.postgres import forms, lookups
from django.contrib.postgres.fields.array import ArrayField
from django.core import exceptions
from django.db.models import Field, TextField, Transform
from django.utils import six
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
__all__ = ['HStoreField']
class HStoreField(Field):
empty_strings_allowed = False
description = _('Map of strings to strings/nulls')
default_error_messages = {
'not_a_string': _('The value of "%(key)s" is not a string or null.'),
}
def db_type(self, connection):
return 'hstore'
def get_transform(self, name):
transform = super(HStoreField, self).get_transform(name)
if transform:
return transform
return KeyTransformFactory(name)
def validate(self, value, model_instance):
super(HStoreField, self).validate(value, model_instance)
for key, val in value.items():
if not isinstance(val, six.string_types) and val is not None:
raise exceptions.ValidationError(
self.error_messages['not_a_string'],
code='not_a_string',
params={'key': key},
)
def to_python(self, value):
if isinstance(value, six.string_types):
value = json.loads(value)
return value
def value_to_string(self, obj):
return json.dumps(self.value_from_object(obj))
def formfield(self, **kwargs):
defaults = {
'form_class': forms.HStoreField,
}
defaults.update(kwargs)
return super(HStoreField, self).formfield(**defaults)
def get_prep_value(self, value):
value = super(HStoreField, self).get_prep_value(value)
if isinstance(value, dict):
prep_value = {}
for key, val in value.items():
key = force_text(key)
if val is not None:
val = force_text(val)
prep_value[key] = val
value = prep_value
if isinstance(value, list):
value = [force_text(item) for item in value]
return value
HStoreField.register_lookup(lookups.DataContains)
HStoreField.register_lookup(lookups.ContainedBy)
HStoreField.register_lookup(lookups.HasKey)
HStoreField.register_lookup(lookups.HasKeys)
HStoreField.register_lookup(lookups.HasAnyKeys)
class KeyTransform(Transform):
output_field = TextField()
def __init__(self, key_name, *args, **kwargs):
super(KeyTransform, self).__init__(*args, **kwargs)
self.key_name = key_name
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return "(%s -> '%s')" % (lhs, self.key_name), params
class KeyTransformFactory(object):
def __init__(self, key_name):
self.key_name = key_name
def __call__(self, *args, **kwargs):
return KeyTransform(self.key_name, *args, **kwargs)
@HStoreField.register_lookup
class KeysTransform(Transform):
lookup_name = 'keys'
function = 'akeys'
output_field = ArrayField(TextField())
@HStoreField.register_lookup
class ValuesTransform(Transform):
lookup_name = 'values'
function = 'avals'
output_field = ArrayField(TextField())
| apache-2.0 |
hassoon3/odoo | addons/mail/mail_followers.py | 26 | 12994 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import threading
from openerp.osv import osv, fields
from openerp import tools, SUPERUSER_ID
from openerp.tools.translate import _
from openerp.tools.mail import plaintext2html
class mail_followers(osv.Model):
""" mail_followers holds the data related to the follow mechanism inside
OpenERP. Partners can choose to follow documents (records) of any kind
that inherits from mail.thread. Following documents allow to receive
notifications for new messages.
A subscription is characterized by:
:param: res_model: model of the followed objects
:param: res_id: ID of resource (may be 0 for every objects)
"""
_name = 'mail.followers'
_rec_name = 'partner_id'
_log_access = False
_description = 'Document Followers'
_columns = {
'res_model': fields.char('Related Document Model',
required=True, select=1,
help='Model of the followed resource'),
'res_id': fields.integer('Related Document ID', select=1,
help='Id of the followed resource'),
'partner_id': fields.many2one('res.partner', string='Related Partner',
ondelete='cascade', required=True, select=1),
'subtype_ids': fields.many2many('mail.message.subtype', string='Subtype',
help="Message subtypes followed, meaning subtypes that will be pushed onto the user's Wall."),
}
#
# Modifying followers change access rights to individual documents. As the
# cache may contain accessible/inaccessible data, one has to refresh it.
#
def create(self, cr, uid, vals, context=None):
res = super(mail_followers, self).create(cr, uid, vals, context=context)
self.invalidate_cache(cr, uid, context=context)
return res
def write(self, cr, uid, ids, vals, context=None):
res = super(mail_followers, self).write(cr, uid, ids, vals, context=context)
self.invalidate_cache(cr, uid, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(mail_followers, self).unlink(cr, uid, ids, context=context)
self.invalidate_cache(cr, uid, context=context)
return res
_sql_constraints = [('mail_followers_res_partner_res_model_id_uniq','unique(res_model,res_id,partner_id)','Error, a partner cannot follow twice the same object.')]
class mail_notification(osv.Model):
""" Class holding notifications pushed to partners. Followers and partners
added in 'contacts to notify' receive notifications. """
_name = 'mail.notification'
_rec_name = 'partner_id'
_log_access = False
_description = 'Notifications'
_columns = {
'partner_id': fields.many2one('res.partner', string='Contact',
ondelete='cascade', required=True, select=1),
'is_read': fields.boolean('Read', select=1, oldname='read'),
'starred': fields.boolean('Starred', select=1,
help='Starred message that goes into the todo mailbox'),
'message_id': fields.many2one('mail.message', string='Message',
ondelete='cascade', required=True, select=1),
}
_defaults = {
'is_read': False,
'starred': False,
}
def init(self, cr):
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('mail_notification_partner_id_read_starred_message_id',))
if not cr.fetchone():
cr.execute('CREATE INDEX mail_notification_partner_id_read_starred_message_id ON mail_notification (partner_id, is_read, starred, message_id)')
def get_partners_to_email(self, cr, uid, ids, message, context=None):
""" Return the list of partners to notify, based on their preferences.
:param browse_record message: mail.message to notify
:param list partners_to_notify: optional list of partner ids restricting
the notifications to process
"""
notify_pids = []
for notification in self.browse(cr, uid, ids, context=context):
if notification.is_read:
continue
partner = notification.partner_id
# Do not send to partners without email address defined
if not partner.email:
continue
# Do not send to partners having same email address than the author (can cause loops or bounce effect due to messy database)
if message.author_id and message.author_id.email == partner.email:
continue
# Partner does not want to receive any emails or is opt-out
if partner.notify_email == 'none':
continue
notify_pids.append(partner.id)
return notify_pids
def get_signature_footer(self, cr, uid, user_id, res_model=None, res_id=None, context=None, user_signature=True):
""" Format a standard footer for notification emails (such as pushed messages
notification or invite emails).
Format:
<p>--<br />
Administrator
</p>
<div>
<small>Sent from <a ...>Your Company</a> using <a ...>OpenERP</a>.</small>
</div>
"""
footer = ""
if not user_id:
return footer
# add user signature
user = self.pool.get("res.users").browse(cr, SUPERUSER_ID, [user_id], context=context)[0]
if user_signature:
if user.signature:
signature = user.signature
else:
signature = "--<br />%s" % user.name
footer = tools.append_content_to_html(footer, signature, plaintext=False)
# add company signature
if user.company_id.website:
website_url = ('http://%s' % user.company_id.website) if not user.company_id.website.lower().startswith(('http:', 'https:')) \
else user.company_id.website
company = "<a style='color:inherit' href='%s'>%s</a>" % (website_url, user.company_id.name)
else:
company = user.company_id.name
sent_by = _('Sent by %(company)s using %(odoo)s')
signature_company = '<br /><small>%s</small>' % (sent_by % {
'company': company,
'odoo': "<a style='color:inherit' href='https://www.odoo.com/'>Odoo</a>"
})
footer = tools.append_content_to_html(footer, signature_company, plaintext=False, container_tag='div')
return footer
def update_message_notification(self, cr, uid, ids, message_id, partner_ids, context=None):
existing_pids = set()
new_pids = set()
new_notif_ids = []
for notification in self.browse(cr, uid, ids, context=context):
existing_pids.add(notification.partner_id.id)
# update existing notifications
self.write(cr, uid, ids, {'is_read': False}, context=context)
# create new notifications
new_pids = set(partner_ids) - existing_pids
for new_pid in new_pids:
new_notif_ids.append(self.create(cr, uid, {'message_id': message_id, 'partner_id': new_pid, 'is_read': False}, context=context))
return new_notif_ids
def _notify_email(self, cr, uid, ids, message_id, force_send=False, user_signature=True, context=None):
message = self.pool['mail.message'].browse(cr, SUPERUSER_ID, message_id, context=context)
# compute partners
email_pids = self.get_partners_to_email(cr, uid, ids, message, context=context)
if not email_pids:
return True
# compute email body (signature, company data)
body_html = message.body
# add user signature except for mail groups, where users are usually adding their own signatures already
user_id = message.author_id and message.author_id.user_ids and message.author_id.user_ids[0] and message.author_id.user_ids[0].id or None
signature_company = self.get_signature_footer(cr, uid, user_id, res_model=message.model, res_id=message.res_id, context=context, user_signature=(user_signature and message.model != 'mail.group'))
if signature_company:
body_html = tools.append_content_to_html(body_html, signature_company, plaintext=False, container_tag='div')
# compute email references
references = message.parent_id.message_id if message.parent_id else False
# custom values
custom_values = dict()
if message.model and message.res_id and self.pool.get(message.model) and hasattr(self.pool[message.model], 'message_get_email_values'):
custom_values = self.pool[message.model].message_get_email_values(cr, uid, message.res_id, message, context=context)
# create email values
max_recipients = 50
chunks = [email_pids[x:x + max_recipients] for x in xrange(0, len(email_pids), max_recipients)]
email_ids = []
for chunk in chunks:
if message.model and message.res_id and self.pool.get(message.model) and hasattr(self.pool[message.model], 'message_get_recipient_values'):
recipient_values = self.pool[message.model].message_get_recipient_values(cr, uid, message.res_id, notif_message=message, recipient_ids=chunk, context=context)
else:
recipient_values = self.pool['mail.thread'].message_get_recipient_values(cr, uid, message.res_id, notif_message=message, recipient_ids=chunk, context=context)
mail_values = {
'mail_message_id': message.id,
'auto_delete': (context or {}).get('mail_auto_delete', True),
'mail_server_id': (context or {}).get('mail_server_id', False),
'body_html': body_html,
'references': references,
}
mail_values.update(custom_values)
mail_values.update(recipient_values)
email_ids.append(self.pool.get('mail.mail').create(cr, uid, mail_values, context=context))
# NOTE:
# 1. for more than 50 followers, use the queue system
# 2. do not send emails immediately if the registry is not loaded,
# to prevent sending email during a simple update of the database
# using the command-line.
if force_send and len(chunks) < 2 and \
(not self.pool._init or
getattr(threading.currentThread(), 'testing', False)):
self.pool.get('mail.mail').send(cr, uid, email_ids, context=context)
return True
def _notify(self, cr, uid, message_id, partners_to_notify=None, context=None,
force_send=False, user_signature=True):
""" Send by email the notification depending on the user preferences
:param list partners_to_notify: optional list of partner ids restricting
the notifications to process
:param bool force_send: if True, the generated mail.mail is
immediately sent after being created, as if the scheduler
was executed for this message only.
:param bool user_signature: if True, the generated mail.mail body is
the body of the related mail.message with the author's signature
"""
notif_ids = self.search(cr, SUPERUSER_ID, [('message_id', '=', message_id), ('partner_id', 'in', partners_to_notify)], context=context)
# update or create notifications
new_notif_ids = self.update_message_notification(cr, SUPERUSER_ID, notif_ids, message_id, partners_to_notify, context=context)
# mail_notify_noemail (do not send email) or no partner_ids: do not send, return
if context and context.get('mail_notify_noemail'):
return True
# browse as SUPERUSER_ID because of access to res_partner not necessarily allowed
self._notify_email(cr, SUPERUSER_ID, new_notif_ids, message_id, force_send, user_signature, context=context)
| agpl-3.0 |
Kenishi/iamacr | libs/markdown/extensions/smart_strong.py | 12 | 1151 | '''
Smart_Strong Extension for Python-Markdown
==========================================
This extention adds smarter handling of double underscores within words.
See <https://pythonhosted.org/Markdown/extensions/smart_strong.html>
for documentation.
Original code Copyright 2011 [Waylan Limberg](http://achinghead.com)
All changes Copyright 2011-2014 The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
'''
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..inlinepatterns import SimpleTagPattern
SMART_STRONG_RE = r'(?<!\w)(_{2})(?!_)(.+?)(?<!_)\2(?!\w)'
STRONG_RE = r'(\*{2})(.+?)\2'
class SmartEmphasisExtension(Extension):
""" Add smart_emphasis extension to Markdown class."""
def extendMarkdown(self, md, md_globals):
""" Modify inline patterns. """
md.inlinePatterns['strong'] = SimpleTagPattern(STRONG_RE, 'strong')
md.inlinePatterns.add('strong2', SimpleTagPattern(SMART_STRONG_RE, 'strong'), '>emphasis2')
def makeExtension(*args, **kwargs):
return SmartEmphasisExtension(*args, **kwargs)
| mit |
tareqalayan/ansible | test/units/modules/network/iosxr/test_iosxr_system.py | 49 | 4219 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from units.modules.utils import set_module_args
from .iosxr_module import TestIosxrModule, load_fixture
from ansible.modules.network.iosxr import iosxr_system
class TestIosxrSystemModule(TestIosxrModule):
module = iosxr_system
def setUp(self):
super(TestIosxrSystemModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.iosxr.iosxr_system.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.iosxr.iosxr_system.load_config')
self.load_config = self.mock_load_config.start()
self.mock_is_cliconf = patch('ansible.modules.network.iosxr.iosxr_system.is_cliconf')
self.is_cliconf = self.mock_is_cliconf.start()
def tearDown(self):
super(TestIosxrSystemModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None):
self.get_config.return_value = load_fixture('iosxr_system_config.cfg')
self.load_config.return_value = dict(diff=None, session='session')
self.is_cliconf.return_value = True
def test_iosxr_system_hostname_changed(self):
set_module_args(dict(hostname='foo'))
commands = ['hostname foo', 'no domain lookup disable']
self.execute_module(changed=True, commands=commands)
def test_iosxr_system_domain_name(self):
set_module_args(dict(domain_name='test.com'))
commands = ['domain name test.com', 'no domain lookup disable']
self.execute_module(changed=True, commands=commands)
def test_iosxr_system_domain_search(self):
set_module_args(dict(domain_search=['ansible.com', 'redhat.com']))
commands = ['domain list ansible.com', 'no domain list cisco.com', 'no domain lookup disable']
self.execute_module(changed=True, commands=commands)
def test_iosxr_system_lookup_source(self):
set_module_args(dict(lookup_source='Ethernet1'))
commands = ['domain lookup source-interface Ethernet1', 'no domain lookup disable']
self.execute_module(changed=True, commands=commands)
def test_iosxr_system_lookup_enabled(self):
set_module_args(dict(lookup_enabled=True))
commands = ['no domain lookup disable']
self.execute_module(changed=True, commands=commands)
def test_iosxr_system_name_servers(self):
name_servers = ['8.8.8.8', '8.8.4.4', '1.1.1.1']
set_module_args(dict(name_servers=name_servers))
commands = ['domain name-server 1.1.1.1', 'no domain name-server 8.8.4.4', 'no domain lookup disable']
self.execute_module(changed=True)
def test_iosxr_system_state_absent(self):
set_module_args(dict(state='absent'))
commands = [
'no hostname',
'no domain name',
'no domain lookup disable',
'no domain lookup source-interface MgmtEth0/0/CPU0/0',
'no domain list redhat.com',
'no domain list cisco.com',
'no domain name-server 8.8.8.8',
'no domain name-server 8.8.4.4'
]
self.execute_module(changed=True, commands=commands)
def test_iosxr_system_no_change(self):
set_module_args(dict(hostname='iosxr01', domain_name='eng.ansible.com', lookup_enabled=False))
self.execute_module()
| gpl-3.0 |
SebasSBM/django | tests/template_tests/syntax_tests/test_invalid_string.py | 440 | 2310 | from django.test import SimpleTestCase
from ..utils import setup
class InvalidStringTests(SimpleTestCase):
libraries = {'i18n': 'django.templatetags.i18n'}
@setup({'invalidstr01': '{{ var|default:"Foo" }}'})
def test_invalidstr01(self):
output = self.engine.render_to_string('invalidstr01')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, 'Foo')
@setup({'invalidstr02': '{{ var|default_if_none:"Foo" }}'})
def test_invalidstr02(self):
output = self.engine.render_to_string('invalidstr02')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'invalidstr03': '{% for v in var %}({{ v }}){% endfor %}'})
def test_invalidstr03(self):
output = self.engine.render_to_string('invalidstr03')
self.assertEqual(output, '')
@setup({'invalidstr04': '{% if var %}Yes{% else %}No{% endif %}'})
def test_invalidstr04(self):
output = self.engine.render_to_string('invalidstr04')
self.assertEqual(output, 'No')
@setup({'invalidstr04_2': '{% if var|default:"Foo" %}Yes{% else %}No{% endif %}'})
def test_invalidstr04_2(self):
output = self.engine.render_to_string('invalidstr04_2')
self.assertEqual(output, 'Yes')
@setup({'invalidstr05': '{{ var }}'})
def test_invalidstr05(self):
output = self.engine.render_to_string('invalidstr05')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'invalidstr06': '{{ var.prop }}'})
def test_invalidstr06(self):
output = self.engine.render_to_string('invalidstr06')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
@setup({'invalidstr07': '{% load i18n %}{% blocktrans %}{{ var }}{% endblocktrans %}'})
def test_invalidstr07(self):
output = self.engine.render_to_string('invalidstr07')
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID')
else:
self.assertEqual(output, '')
| bsd-3-clause |
Evzdrop/PynamoDB | pynamodb/constants.py | 3 | 7354 | """
Pynamodb constants
"""
# Operations
BATCH_WRITE_ITEM = 'BatchWriteItem'
DESCRIBE_TABLE = 'DescribeTable'
BATCH_GET_ITEM = 'BatchGetItem'
CREATE_TABLE = 'CreateTable'
UPDATE_TABLE = 'UpdateTable'
DELETE_TABLE = 'DeleteTable'
LIST_TABLES = 'ListTables'
UPDATE_ITEM = 'UpdateItem'
DELETE_ITEM = 'DeleteItem'
GET_ITEM = 'GetItem'
PUT_ITEM = 'PutItem'
QUERY = 'Query'
SCAN = 'Scan'
# Request Parameters
GLOBAL_SECONDARY_INDEX_UPDATES = 'GlobalSecondaryIndexUpdates'
RETURN_ITEM_COLL_METRICS = 'ReturnItemCollectionMetrics'
EXCLUSIVE_START_TABLE_NAME = 'ExclusiveStartTableName'
RETURN_CONSUMED_CAPACITY = 'ReturnConsumedCapacity'
COMPARISON_OPERATOR = 'ComparisonOperator'
SCAN_INDEX_FORWARD = 'ScanIndexForward'
ATTR_DEFINITIONS = 'AttributeDefinitions'
ATTR_VALUE_LIST = 'AttributeValueList'
TABLE_DESCRIPTION = 'TableDescription'
UNPROCESSED_KEYS = 'UnprocessedKeys'
UNPROCESSED_ITEMS = 'UnprocessedItems'
CONSISTENT_READ = 'ConsistentRead'
DELETE_REQUEST = 'DeleteRequest'
RETURN_VALUES = 'ReturnValues'
REQUEST_ITEMS = 'RequestItems'
ATTRS_TO_GET = 'AttributesToGet'
ATTR_UPDATES = 'AttributeUpdates'
TABLE_STATUS = 'TableStatus'
SCAN_FILTER = 'ScanFilter'
TABLE_NAME = 'TableName'
KEY_SCHEMA = 'KeySchema'
ATTR_NAME = 'AttributeName'
ATTR_TYPE = 'AttributeType'
ITEM_COUNT = 'ItemCount'
CAMEL_COUNT = 'Count'
PUT_REQUEST = 'PutRequest'
INDEX_NAME = 'IndexName'
ATTRIBUTES = 'Attributes'
TABLE_KEY = 'Table'
RESPONSES = 'Responses'
RANGE_KEY = 'RangeKey'
KEY_TYPE = 'KeyType'
ACTION = 'Action'
UPDATE = 'Update'
EXISTS = 'Exists'
SELECT = 'Select'
ACTIVE = 'ACTIVE'
LIMIT = 'Limit'
ITEMS = 'Items'
ITEM = 'Item'
KEYS = 'Keys'
UTC = 'UTC'
KEY = 'Key'
# Defaults
DEFAULT_ENCODING = 'utf-8'
DEFAULT_REGION = 'us-east-1'
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f%z'
SERVICE_NAME = 'dynamodb'
HTTP_OK = 200
HTTP_BAD_REQUEST = 400
# Create Table arguments
PROVISIONED_THROUGHPUT = 'ProvisionedThroughput'
READ_CAPACITY_UNITS = 'ReadCapacityUnits'
WRITE_CAPACITY_UNITS = 'WriteCapacityUnits'
STRING_SHORT = 'S'
STRING_SET_SHORT = 'SS'
NUMBER_SHORT = 'N'
NUMBER_SET_SHORT = 'NS'
BINARY_SHORT = 'B'
BINARY_SET_SHORT = 'BS'
STRING = 'String'
STRING_SET = 'StringSet'
NUMBER = 'Number'
NUMBER_SET = 'NumberSet'
BINARY = 'Binary'
BINARY_SET = 'BinarySet'
SHORT_ATTR_TYPES = [STRING_SHORT, STRING_SET_SHORT, NUMBER_SHORT, NUMBER_SET_SHORT, BINARY_SHORT, BINARY_SET_SHORT]
ATTR_TYPE_MAP = {
STRING: STRING_SHORT,
STRING_SET: STRING_SET_SHORT,
NUMBER: NUMBER_SHORT,
NUMBER_SET: NUMBER_SET_SHORT,
BINARY: BINARY_SHORT,
BINARY_SET: BINARY_SET_SHORT,
STRING_SHORT: STRING,
STRING_SET_SHORT: STRING_SET,
NUMBER_SHORT: NUMBER,
NUMBER_SET_SHORT: NUMBER_SET,
BINARY_SHORT: BINARY,
BINARY_SET_SHORT: BINARY_SET
}
# Constants needed for creating indexes
LOCAL_SECONDARY_INDEX = 'LocalSecondaryIndex'
LOCAL_SECONDARY_INDEXES = 'LocalSecondaryIndexes'
GLOBAL_SECONDARY_INDEX = 'GlobalSecondaryIndex'
GLOBAL_SECONDARY_INDEXES = 'GlobalSecondaryIndexes'
PROJECTION = 'Projection'
PROJECTION_TYPE = 'ProjectionType'
NON_KEY_ATTRIBUTES = 'NonKeyAttributes'
KEYS_ONLY = 'KEYS_ONLY'
ALL = 'ALL'
INCLUDE = 'INCLUDE'
# Constants for Dynamodb Streams
STREAM_VIEW_TYPE = 'StreamViewType'
STREAM_SPECIFICATION = 'StreamSpecification'
STREAM_ENABLED = 'StreamEnabled'
STREAM_NEW_IMAGE = 'NEW_IMAGE'
STREAM_OLD_IMAGE = 'OLD_IMAGE'
STREAM_NEW_AND_OLD_IMAGE = 'NEW_AND_OLD_IMAGES'
STREAM_KEYS_ONLY = 'KEYS_ONLY'
# These are constants used in the KeyConditions parameter
# See: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Query.html#DDB-Query-request-KeyConditions
EXCLUSIVE_START_KEY = 'ExclusiveStartKey'
LAST_EVALUATED_KEY = 'LastEvaluatedKey'
QUERY_FILTER = 'QueryFilter'
BEGINS_WITH = 'BEGINS_WITH'
BETWEEN = 'BETWEEN'
EQ = 'EQ'
NE = 'NE'
LE = 'LE'
LT = 'LT'
GE = 'GE'
GT = 'GT'
IN = 'IN'
KEY_CONDITIONS = 'KeyConditions'
COMPARISON_OPERATOR_VALUES = [EQ, LE, LT, GE, GT, BEGINS_WITH, BETWEEN]
QUERY_OPERATOR_MAP = {
'eq': EQ,
'le': LE,
'lt': LT,
'ge': GE,
'gt': GT,
'begins_with': BEGINS_WITH,
'between': BETWEEN
}
# These are the valid select values for the Scan operation
# See: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Scan.html#DDB-Scan-request-Select
NOT_NULL = 'NOT_NULL'
NULL = 'NULL'
CONTAINS = 'CONTAINS'
NOT_CONTAINS = 'NOT_CONTAINS'
ALL_ATTRIBUTES = 'ALL_ATTRIBUTES'
ALL_PROJECTED_ATTRIBUTES = 'ALL_PROJECTED_ATTRIBUTES'
SPECIFIC_ATTRIBUTES = 'SPECIFIC_ATTRIBUTES'
COUNT = 'COUNT'
SELECT_VALUES = [ALL_ATTRIBUTES, ALL_PROJECTED_ATTRIBUTES, SPECIFIC_ATTRIBUTES, COUNT]
SCAN_OPERATOR_MAP = {
'eq': EQ,
'ne': NE,
'le': LE,
'lt': LT,
'ge': GE,
'gt': GT,
'not_null': NOT_NULL,
'null': NULL,
'contains': CONTAINS,
'not_contains': NOT_CONTAINS,
'begins_with': BEGINS_WITH,
'in': IN,
'between': BETWEEN
}
QUERY_FILTER_OPERATOR_MAP = SCAN_OPERATOR_MAP
DELETE_FILTER_OPERATOR_MAP = SCAN_OPERATOR_MAP
UPDATE_FILTER_OPERATOR_MAP = SCAN_OPERATOR_MAP
PUT_FILTER_OPERATOR_MAP = SCAN_OPERATOR_MAP
# These are the valid comparison operators for the Scan operation
# See: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Scan.html#DDB-Scan-request-ScanFilter
SEGMENT = 'Segment'
TOTAL_SEGMENTS = 'TotalSegments'
SCAN_FILTER_VALUES = [EQ, NE, LE, LT, GE, GT, NOT_NULL, NULL, CONTAINS, NOT_CONTAINS, BEGINS_WITH, IN, BETWEEN]
QUERY_FILTER_VALUES = SCAN_FILTER_VALUES
DELETE_FILTER_VALUES = SCAN_FILTER_VALUES
# These are constants used in the expected condition for PutItem
# See: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-Expected
VALUE = 'Value'
EXPECTED = 'Expected'
# These are the valid ReturnConsumedCapacity values used in multiple operations
# See: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchGetItem.html#DDB-BatchGetItem-request-ReturnConsumedCapacity
CONSUMED_CAPACITY = 'ConsumedCapacity'
CAPACITY_UNITS = 'CapacityUnits'
INDEXES = 'INDEXES'
TOTAL = 'TOTAL'
NONE = 'NONE'
RETURN_CONSUMED_CAPACITY_VALUES = [INDEXES, TOTAL, NONE]
# These are the valid ReturnItemCollectionMetrics values used in multiple operations
# See: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchWriteItem.html#DDB-BatchWriteItem-request-ReturnItemCollectionMetrics
SIZE = 'SIZE'
RETURN_ITEM_COLL_METRICS_VALUES = [SIZE, NONE]
# These are the valid ReturnValues values used in the PutItem operation
# See: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_PutItem.html#DDB-PutItem-request-ReturnValues
ALL_OLD = 'ALL_OLD'
UPDATED_OLD = 'UPDATED_OLD'
ALL_NEW = 'ALL_NEW'
UPDATED_NEW = 'UPDATED_NEW'
RETURN_VALUES_VALUES = [NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW]
# These are constants used in the AttributeUpdates parameter for UpdateItem
# See: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html#DDB-UpdateItem-request-AttributeUpdates
PUT = 'PUT'
DELETE = 'DELETE'
ADD = 'ADD'
ATTR_UPDATE_ACTIONS = [PUT, DELETE, ADD]
BATCH_GET_PAGE_LIMIT = 100
BATCH_WRITE_PAGE_LIMIT = 25
META_CLASS_NAME = "Meta"
REGION = "region"
HOST = "host"
# The constants are needed for the ConditionalOperator argument used
# UpdateItem, PutItem and DeleteItem
CONDITIONAL_OPERATOR = 'ConditionalOperator'
AND = 'AND'
OR = 'OR'
CONDITIONAL_OPERATORS = [AND, OR]
| mit |
sileht/aodh | aodh/storage/hbase/inmemory.py | 2 | 9613 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" This is a very crude version of "in-memory HBase", which implements just
enough functionality of HappyBase API to support testing of our driver.
"""
import copy
import re
from oslo_log import log
import six
import aodh
LOG = log.getLogger(__name__)
class MTable(object):
"""HappyBase.Table mock."""
def __init__(self, name, families):
self.name = name
self.families = families
self._rows_with_ts = {}
def row(self, key, columns=None):
if key not in self._rows_with_ts:
return {}
res = copy.copy(sorted(six.iteritems(
self._rows_with_ts.get(key)))[-1][1])
if columns:
keys = res.keys()
for key in keys:
if key not in columns:
res.pop(key)
return res
def rows(self, keys):
return ((k, self.row(k)) for k in keys)
def put(self, key, data, ts=None):
# Note: Now we use 'timestamped' but only for one Resource table.
# That's why we may put ts='0' in case when ts is None. If it is
# needed to use 2 types of put in one table ts=0 cannot be used.
if ts is None:
ts = "0"
if key not in self._rows_with_ts:
self._rows_with_ts[key] = {ts: data}
else:
if ts in self._rows_with_ts[key]:
self._rows_with_ts[key][ts].update(data)
else:
self._rows_with_ts[key].update({ts: data})
def delete(self, key):
del self._rows_with_ts[key]
def _get_latest_dict(self, row):
# The idea here is to return latest versions of columns.
# In _rows_with_ts we store {row: {ts_1: {data}, ts_2: {data}}}.
# res will contain a list of tuples [(ts_1, {data}), (ts_2, {data})]
# sorted by ts, i.e. in this list ts_2 is the most latest.
# To get result as HBase provides we should iterate in reverse order
# and get from "latest" data only key-values that are not in newer data
data = {}
for i in sorted(six.iteritems(self._rows_with_ts[row])):
data.update(i[1])
return data
def scan(self, filter=None, columns=None, row_start=None, row_stop=None,
limit=None):
columns = columns or []
sorted_keys = sorted(self._rows_with_ts)
# copy data between row_start and row_stop into a dict
rows = {}
for row in sorted_keys:
if row_start and row < row_start:
continue
if row_stop and row > row_stop:
break
rows[row] = self._get_latest_dict(row)
if columns:
ret = {}
for row, data in six.iteritems(rows):
for key in data:
if key in columns:
ret[row] = data
rows = ret
if filter:
# TODO(jdanjou): we should really parse this properly,
# but at the moment we are only going to support AND here
filters = filter.split('AND')
for f in filters:
# Extract filter name and its arguments
g = re.search("(.*)\((.*),?\)", f)
fname = g.group(1).strip()
fargs = [s.strip().replace('\'', '')
for s in g.group(2).split(',')]
m = getattr(self, fname)
if callable(m):
# overwrite rows for filtering to take effect
# in case of multiple filters
rows = m(fargs, rows)
else:
raise aodh.NotImplementedError(
"%s filter is not implemented, "
"you may want to add it!")
for k in sorted(rows)[:limit]:
yield k, rows[k]
@staticmethod
def SingleColumnValueFilter(args, rows):
"""This is filter for testing "in-memory HBase".
This method is called from scan() when 'SingleColumnValueFilter'
is found in the 'filter' argument.
"""
op = args[2]
column = "%s:%s" % (args[0], args[1])
value = args[3]
if value.startswith('binary:'):
value = value[7:]
r = {}
for row in rows:
data = rows[row]
if op == '=':
if column in data and data[column] == value:
r[row] = data
elif op == '<':
if column in data and data[column] < value:
r[row] = data
elif op == '<=':
if column in data and data[column] <= value:
r[row] = data
elif op == '>':
if column in data and data[column] > value:
r[row] = data
elif op == '>=':
if column in data and data[column] >= value:
r[row] = data
elif op == '!=':
if column in data and data[column] != value:
r[row] = data
return r
@staticmethod
def ColumnPrefixFilter(args, rows):
"""This is filter for testing "in-memory HBase".
This method is called from scan() when 'ColumnPrefixFilter' is found
in the 'filter' argument.
:param args: a list of filter arguments, contain prefix of column
:param rows: a dict of row prefixes for filtering
"""
value = args[0]
column = 'f:' + value
r = {}
for row, data in rows.items():
column_dict = {}
for key in data:
if key.startswith(column):
column_dict[key] = data[key]
r[row] = column_dict
return r
@staticmethod
def RowFilter(args, rows):
"""This is filter for testing "in-memory HBase".
This method is called from scan() when 'RowFilter' is found in the
'filter' argument.
:param args: a list of filter arguments, it contains operator and
sought string
:param rows: a dict of rows which are filtered
"""
op = args[0]
value = args[1]
if value.startswith('binary:'):
value = value[len('binary:'):]
if value.startswith('regexstring:'):
value = value[len('regexstring:'):]
r = {}
for row, data in rows.items():
try:
g = re.search(value, row).group()
if op == '=':
if g == row:
r[row] = data
else:
raise aodh.NotImplementedError(
"In-memory "
"RowFilter doesn't support "
"the %s operation yet" % op)
except AttributeError:
pass
return r
@staticmethod
def QualifierFilter(args, rows):
"""This is filter for testing "in-memory HBase".
This method is called from scan() when 'QualifierFilter' is found in
the 'filter' argument
"""
op = args[0]
value = args[1]
is_regex = False
if value.startswith('binaryprefix:'):
value = value[len('binaryprefix:'):]
if value.startswith('regexstring:'):
value = value[len('regexstring:'):]
is_regex = True
column = 'f:' + value
r = {}
for row in rows:
data = rows[row]
r_data = {}
for key in data:
if ((op == '=' and key.startswith(column)) or
(op == '>=' and key >= column) or
(op == '<=' and key <= column) or
(op == '>' and key > column) or
(op == '<' and key < column) or
(is_regex and re.search(value, key))):
r_data[key] = data[key]
else:
raise aodh.NotImplementedError(
"In-memory QualifierFilter "
"doesn't support the %s "
"operation yet" % op)
if r_data:
r[row] = r_data
return r
class MConnectionPool(object):
def __init__(self):
self.conn = MConnection()
def connection(self):
return self.conn
class MConnection(object):
"""HappyBase.Connection mock."""
def __init__(self):
self.tables = {}
def __enter__(self, *args, **kwargs):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
@staticmethod
def open():
LOG.debug("Opening in-memory HBase connection")
def create_table(self, n, families=None):
families = families or {}
if n in self.tables:
return self.tables[n]
t = MTable(n, families)
self.tables[n] = t
return t
def delete_table(self, name, use_prefix=True):
del self.tables[name]
def table(self, name):
return self.create_table(name)
| apache-2.0 |
CapOM/ChromiumGStreamerBackend | third_party/protobuf/python/google/protobuf/internal/wire_format_test.py | 571 | 10848 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for google.protobuf.internal.wire_format."""
__author__ = 'robinson@google.com (Will Robinson)'
import unittest
from google.protobuf import message
from google.protobuf.internal import wire_format
class WireFormatTest(unittest.TestCase):
def testPackTag(self):
field_number = 0xabc
tag_type = 2
self.assertEqual((field_number << 3) | tag_type,
wire_format.PackTag(field_number, tag_type))
PackTag = wire_format.PackTag
# Number too high.
self.assertRaises(message.EncodeError, PackTag, field_number, 6)
# Number too low.
self.assertRaises(message.EncodeError, PackTag, field_number, -1)
def testUnpackTag(self):
# Test field numbers that will require various varint sizes.
for expected_field_number in (1, 15, 16, 2047, 2048):
for expected_wire_type in range(6): # Highest-numbered wiretype is 5.
field_number, wire_type = wire_format.UnpackTag(
wire_format.PackTag(expected_field_number, expected_wire_type))
self.assertEqual(expected_field_number, field_number)
self.assertEqual(expected_wire_type, wire_type)
self.assertRaises(TypeError, wire_format.UnpackTag, None)
self.assertRaises(TypeError, wire_format.UnpackTag, 'abc')
self.assertRaises(TypeError, wire_format.UnpackTag, 0.0)
self.assertRaises(TypeError, wire_format.UnpackTag, object())
def testZigZagEncode(self):
Z = wire_format.ZigZagEncode
self.assertEqual(0, Z(0))
self.assertEqual(1, Z(-1))
self.assertEqual(2, Z(1))
self.assertEqual(3, Z(-2))
self.assertEqual(4, Z(2))
self.assertEqual(0xfffffffe, Z(0x7fffffff))
self.assertEqual(0xffffffff, Z(-0x80000000))
self.assertEqual(0xfffffffffffffffe, Z(0x7fffffffffffffff))
self.assertEqual(0xffffffffffffffff, Z(-0x8000000000000000))
self.assertRaises(TypeError, Z, None)
self.assertRaises(TypeError, Z, 'abcd')
self.assertRaises(TypeError, Z, 0.0)
self.assertRaises(TypeError, Z, object())
def testZigZagDecode(self):
Z = wire_format.ZigZagDecode
self.assertEqual(0, Z(0))
self.assertEqual(-1, Z(1))
self.assertEqual(1, Z(2))
self.assertEqual(-2, Z(3))
self.assertEqual(2, Z(4))
self.assertEqual(0x7fffffff, Z(0xfffffffe))
self.assertEqual(-0x80000000, Z(0xffffffff))
self.assertEqual(0x7fffffffffffffff, Z(0xfffffffffffffffe))
self.assertEqual(-0x8000000000000000, Z(0xffffffffffffffff))
self.assertRaises(TypeError, Z, None)
self.assertRaises(TypeError, Z, 'abcd')
self.assertRaises(TypeError, Z, 0.0)
self.assertRaises(TypeError, Z, object())
def NumericByteSizeTestHelper(self, byte_size_fn, value, expected_value_size):
# Use field numbers that cause various byte sizes for the tag information.
for field_number, tag_bytes in ((15, 1), (16, 2), (2047, 2), (2048, 3)):
expected_size = expected_value_size + tag_bytes
actual_size = byte_size_fn(field_number, value)
self.assertEqual(expected_size, actual_size,
'byte_size_fn: %s, field_number: %d, value: %r\n'
'Expected: %d, Actual: %d'% (
byte_size_fn, field_number, value, expected_size, actual_size))
def testByteSizeFunctions(self):
# Test all numeric *ByteSize() functions.
NUMERIC_ARGS = [
# Int32ByteSize().
[wire_format.Int32ByteSize, 0, 1],
[wire_format.Int32ByteSize, 127, 1],
[wire_format.Int32ByteSize, 128, 2],
[wire_format.Int32ByteSize, -1, 10],
# Int64ByteSize().
[wire_format.Int64ByteSize, 0, 1],
[wire_format.Int64ByteSize, 127, 1],
[wire_format.Int64ByteSize, 128, 2],
[wire_format.Int64ByteSize, -1, 10],
# UInt32ByteSize().
[wire_format.UInt32ByteSize, 0, 1],
[wire_format.UInt32ByteSize, 127, 1],
[wire_format.UInt32ByteSize, 128, 2],
[wire_format.UInt32ByteSize, wire_format.UINT32_MAX, 5],
# UInt64ByteSize().
[wire_format.UInt64ByteSize, 0, 1],
[wire_format.UInt64ByteSize, 127, 1],
[wire_format.UInt64ByteSize, 128, 2],
[wire_format.UInt64ByteSize, wire_format.UINT64_MAX, 10],
# SInt32ByteSize().
[wire_format.SInt32ByteSize, 0, 1],
[wire_format.SInt32ByteSize, -1, 1],
[wire_format.SInt32ByteSize, 1, 1],
[wire_format.SInt32ByteSize, -63, 1],
[wire_format.SInt32ByteSize, 63, 1],
[wire_format.SInt32ByteSize, -64, 1],
[wire_format.SInt32ByteSize, 64, 2],
# SInt64ByteSize().
[wire_format.SInt64ByteSize, 0, 1],
[wire_format.SInt64ByteSize, -1, 1],
[wire_format.SInt64ByteSize, 1, 1],
[wire_format.SInt64ByteSize, -63, 1],
[wire_format.SInt64ByteSize, 63, 1],
[wire_format.SInt64ByteSize, -64, 1],
[wire_format.SInt64ByteSize, 64, 2],
# Fixed32ByteSize().
[wire_format.Fixed32ByteSize, 0, 4],
[wire_format.Fixed32ByteSize, wire_format.UINT32_MAX, 4],
# Fixed64ByteSize().
[wire_format.Fixed64ByteSize, 0, 8],
[wire_format.Fixed64ByteSize, wire_format.UINT64_MAX, 8],
# SFixed32ByteSize().
[wire_format.SFixed32ByteSize, 0, 4],
[wire_format.SFixed32ByteSize, wire_format.INT32_MIN, 4],
[wire_format.SFixed32ByteSize, wire_format.INT32_MAX, 4],
# SFixed64ByteSize().
[wire_format.SFixed64ByteSize, 0, 8],
[wire_format.SFixed64ByteSize, wire_format.INT64_MIN, 8],
[wire_format.SFixed64ByteSize, wire_format.INT64_MAX, 8],
# FloatByteSize().
[wire_format.FloatByteSize, 0.0, 4],
[wire_format.FloatByteSize, 1000000000.0, 4],
[wire_format.FloatByteSize, -1000000000.0, 4],
# DoubleByteSize().
[wire_format.DoubleByteSize, 0.0, 8],
[wire_format.DoubleByteSize, 1000000000.0, 8],
[wire_format.DoubleByteSize, -1000000000.0, 8],
# BoolByteSize().
[wire_format.BoolByteSize, False, 1],
[wire_format.BoolByteSize, True, 1],
# EnumByteSize().
[wire_format.EnumByteSize, 0, 1],
[wire_format.EnumByteSize, 127, 1],
[wire_format.EnumByteSize, 128, 2],
[wire_format.EnumByteSize, wire_format.UINT32_MAX, 5],
]
for args in NUMERIC_ARGS:
self.NumericByteSizeTestHelper(*args)
# Test strings and bytes.
for byte_size_fn in (wire_format.StringByteSize, wire_format.BytesByteSize):
# 1 byte for tag, 1 byte for length, 3 bytes for contents.
self.assertEqual(5, byte_size_fn(10, 'abc'))
# 2 bytes for tag, 1 byte for length, 3 bytes for contents.
self.assertEqual(6, byte_size_fn(16, 'abc'))
# 2 bytes for tag, 2 bytes for length, 128 bytes for contents.
self.assertEqual(132, byte_size_fn(16, 'a' * 128))
# Test UTF-8 string byte size calculation.
# 1 byte for tag, 1 byte for length, 8 bytes for content.
self.assertEqual(10, wire_format.StringByteSize(
5, unicode('\xd0\xa2\xd0\xb5\xd1\x81\xd1\x82', 'utf-8')))
class MockMessage(object):
def __init__(self, byte_size):
self.byte_size = byte_size
def ByteSize(self):
return self.byte_size
message_byte_size = 10
mock_message = MockMessage(byte_size=message_byte_size)
# Test groups.
# (2 * 1) bytes for begin and end tags, plus message_byte_size.
self.assertEqual(2 + message_byte_size,
wire_format.GroupByteSize(1, mock_message))
# (2 * 2) bytes for begin and end tags, plus message_byte_size.
self.assertEqual(4 + message_byte_size,
wire_format.GroupByteSize(16, mock_message))
# Test messages.
# 1 byte for tag, plus 1 byte for length, plus contents.
self.assertEqual(2 + mock_message.byte_size,
wire_format.MessageByteSize(1, mock_message))
# 2 bytes for tag, plus 1 byte for length, plus contents.
self.assertEqual(3 + mock_message.byte_size,
wire_format.MessageByteSize(16, mock_message))
# 2 bytes for tag, plus 2 bytes for length, plus contents.
mock_message.byte_size = 128
self.assertEqual(4 + mock_message.byte_size,
wire_format.MessageByteSize(16, mock_message))
# Test message set item byte size.
# 4 bytes for tags, plus 1 byte for length, plus 1 byte for type_id,
# plus contents.
mock_message.byte_size = 10
self.assertEqual(mock_message.byte_size + 6,
wire_format.MessageSetItemByteSize(1, mock_message))
# 4 bytes for tags, plus 2 bytes for length, plus 1 byte for type_id,
# plus contents.
mock_message.byte_size = 128
self.assertEqual(mock_message.byte_size + 7,
wire_format.MessageSetItemByteSize(1, mock_message))
# 4 bytes for tags, plus 2 bytes for length, plus 2 byte for type_id,
# plus contents.
self.assertEqual(mock_message.byte_size + 8,
wire_format.MessageSetItemByteSize(128, mock_message))
# Too-long varint.
self.assertRaises(message.EncodeError,
wire_format.UInt64ByteSize, 1, 1 << 128)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
ISIFoundation/influenzanet-website | apps/journal/tests.py | 4 | 2025 | """
Tests for the journal app
"""
import datetime
from django.test import TestCase
from .models import Entry, published_filter
from .navigation import get_nodes
class EntryTest(TestCase):
def setUp(self):
self.today = datetime.datetime.today()
self.yesterday = self.today - datetime.timedelta(days=1)
self.tomorrow = self.today + datetime.timedelta(days=1)
def tearDown(self):
pass
def test_unpublished(self):
"""
Test if unpublished items are hidden by default
"""
unpublished = Entry.objects.create(
title='Unpublished Entry',
slug='unpublished-entry',
is_published=False,
pub_date=self.yesterday,
)
self.assertEquals(published_filter(Entry.objects).count(), 0)
unpublished.is_published = True
unpublished.save()
self.assertEquals(published_filter(Entry.objects).count(), 1)
unpublished.is_published = False
unpublished.save()
self.assertEquals(published_filter(Entry.objects).count(), 0)
unpublished.delete()
def test_future_published(self):
"""
Tests that items with a future published date are hidden
"""
future_published = Entry.objects.create(
title='Future published entry',
slug='future-published-entry',
is_published=True,
pub_date=self.tomorrow,
)
self.assertEquals(published_filter(Entry.objects).count(), 0)
future_published.pub_date = self.yesterday
future_published.save()
self.assertEquals(published_filter(Entry.objects).count(), 1)
future_published.pub_date = self.tomorrow
future_published.save()
self.assertEquals(published_filter(Entry.objects).count(), 0)
def test_navigation(self):
"""
Tests if the navigation build by navigation.get_nodes is correct
"""
pass
| agpl-3.0 |
sametmax/Django--an-app-at-a-time | apps/app6_template_tools/views.py | 1 | 2017 | # coding: utf-8
import datetime
import random
from django.shortcuts import render
def index(request):
return render(request, 'app6_index.html')
def filters(request):
context = {}
# filter: random, pluralize, length_is, join
context['people'] = (
'Mr Black',
'Mr Pink',
'Mr White',
'Mr Pinkman',
'Yellow Bioman'
)
# filter: add
context['number'] = random.randint(1, 2)
# filter: intcomma
context['money'] = 1000000000
# filter: pluralize
context['random_people'] = context['people'][0:context['number']]
# filter: date
context['today'] = datetime.datetime.now()
# filter: title
context['quote'] = "all your bases are belong to us"
# filter: striptags, safe
context['html'] = '<a id="me" href="#me">Look at <strong>me !</strong></a>'
# filter: yesno
context['answer'] = random.choice((True, False, None))
# filter: urlizetrunc
context['url'] = 'http://averylongurltodisplayastruncated.com/index?foo=bar'
# filter: truncatewords
context['phrase'] = '''
A very very very long phrase that we shall truncate so it doesn't get
to long to read or mess up the layout of our beloved and wonderful
website. Did I mentioned I love platipus ?
'''
return render(request, 'app6_filters.html', context)
def tags(request):
context = {}
# tag: if
context['answer'] = random.choice((True, False, None))
# tag: if
context['number'] = random.randint(1, 2)
# filter: for
context['people'] = (
'Mr Black',
'Mr Pink',
'Mr White',
'Mr Pinkman',
'Yellow Bioman'
)
# filter: for
context['teams'] = {
"A": context['people'][:3],
"B": context['people'][3:],
}
return render(request, 'app6_tags.html', context)
def inheritance(request):
return render(request, 'app6_inheritance.html')
def includes(request):
return render(request, 'app6_includes.html')
| mit |
PatrickOReilly/scikit-learn | sklearn/linear_model/theil_sen.py | 39 | 14339 | # -*- coding: utf-8 -*-
"""
A Theil-Sen Estimator for Multiple Linear Regression Model
"""
# Author: Florian Wilhelm <florian.wilhelm@gmail.com>
#
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import warnings
from itertools import combinations
import numpy as np
from scipy import linalg
from scipy.special import binom
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import check_random_state
from ..utils import check_X_y, _get_n_jobs
from ..utils.random import choice
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange as range
from ..exceptions import ConvergenceWarning
_EPSILON = np.finfo(np.double).eps
def _modified_weiszfeld_step(X, x_old):
"""Modified Weiszfeld step.
This function defines one iteration step in order to approximate the
spatial median (L1 median). It is a form of an iteratively re-weighted
least squares method.
Parameters
----------
X : array, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
x_old : array, shape = [n_features]
Current start vector.
Returns
-------
x_new : array, shape = [n_features]
New iteration step.
References
----------
- On Computation of Spatial Median for Robust Data Mining, 2005
T. Kärkkäinen and S. Äyrämö
http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf
"""
diff = X - x_old
diff_norm = np.sqrt(np.sum(diff ** 2, axis=1))
mask = diff_norm >= _EPSILON
# x_old equals one of our samples
is_x_old_in_X = int(mask.sum() < X.shape[0])
diff = diff[mask]
diff_norm = diff_norm[mask][:, np.newaxis]
quotient_norm = linalg.norm(np.sum(diff / diff_norm, axis=0))
if quotient_norm > _EPSILON: # to avoid division by zero
new_direction = (np.sum(X[mask, :] / diff_norm, axis=0)
/ np.sum(1 / diff_norm, axis=0))
else:
new_direction = 1.
quotient_norm = 1.
return (max(0., 1. - is_x_old_in_X / quotient_norm) * new_direction
+ min(1., is_x_old_in_X / quotient_norm) * x_old)
def _spatial_median(X, max_iter=300, tol=1.e-3):
"""Spatial median (L1 median).
The spatial median is member of a class of so-called M-estimators which
are defined by an optimization problem. Given a number of p points in an
n-dimensional space, the point x minimizing the sum of all distances to the
p other points is called spatial median.
Parameters
----------
X : array, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
max_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if spatial_median has converged. Default is 1.e-3.
Returns
-------
spatial_median : array, shape = [n_features]
Spatial median.
n_iter: int
Number of iterations needed.
References
----------
- On Computation of Spatial Median for Robust Data Mining, 2005
T. Kärkkäinen and S. Äyrämö
http://users.jyu.fi/~samiayr/pdf/ayramo_eurogen05.pdf
"""
if X.shape[1] == 1:
return 1, np.median(X.ravel())
tol **= 2 # We are computing the tol on the squared norm
spatial_median_old = np.mean(X, axis=0)
for n_iter in range(max_iter):
spatial_median = _modified_weiszfeld_step(X, spatial_median_old)
if np.sum((spatial_median_old - spatial_median) ** 2) < tol:
break
else:
spatial_median_old = spatial_median
else:
warnings.warn("Maximum number of iterations {max_iter} reached in "
"spatial median for TheilSen regressor."
"".format(max_iter=max_iter), ConvergenceWarning)
return n_iter, spatial_median
def _breakdown_point(n_samples, n_subsamples):
"""Approximation of the breakdown point.
Parameters
----------
n_samples : int
Number of samples.
n_subsamples : int
Number of subsamples to consider.
Returns
-------
breakdown_point : float
Approximation of breakdown point.
"""
return 1 - (0.5 ** (1 / n_subsamples) * (n_samples - n_subsamples + 1) +
n_subsamples - 1) / n_samples
def _lstsq(X, y, indices, fit_intercept):
"""Least Squares Estimator for TheilSenRegressor class.
This function calculates the least squares method on a subset of rows of X
and y defined by the indices array. Optionally, an intercept column is
added if intercept is set to true.
Parameters
----------
X : array, shape = [n_samples, n_features]
Design matrix, where n_samples is the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target vector, where n_samples is the number of samples.
indices : array, shape = [n_subpopulation, n_subsamples]
Indices of all subsamples with respect to the chosen subpopulation.
fit_intercept : bool
Fit intercept or not.
Returns
-------
weights : array, shape = [n_subpopulation, n_features + intercept]
Solution matrix of n_subpopulation solved least square problems.
"""
fit_intercept = int(fit_intercept)
n_features = X.shape[1] + fit_intercept
n_subsamples = indices.shape[1]
weights = np.empty((indices.shape[0], n_features))
X_subpopulation = np.ones((n_subsamples, n_features))
# gelss need to pad y_subpopulation to be of the max dim of X_subpopulation
y_subpopulation = np.zeros((max(n_subsamples, n_features)))
lstsq, = get_lapack_funcs(('gelss',), (X_subpopulation, y_subpopulation))
for index, subset in enumerate(indices):
X_subpopulation[:, fit_intercept:] = X[subset, :]
y_subpopulation[:n_subsamples] = y[subset]
weights[index] = lstsq(X_subpopulation,
y_subpopulation)[1][:n_features]
return weights
class TheilSenRegressor(LinearModel, RegressorMixin):
"""Theil-Sen Estimator: robust multivariate regression model.
The algorithm calculates least square solutions on subsets with size
n_subsamples of the samples in X. Any value of n_subsamples between the
number of features and samples leads to an estimator with a compromise
between robustness and efficiency. Since the number of least square
solutions is "n_samples choose n_subsamples", it can be extremely large
and can therefore be limited with max_subpopulation. If this limit is
reached, the subsets are chosen randomly. In a final step, the spatial
median (or L1 median) is calculated of all least square solutions.
Read more in the :ref:`User Guide <theil_sen_regression>`.
Parameters
----------
fit_intercept : boolean, optional, default True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
max_subpopulation : int, optional, default 1e4
Instead of computing with a set of cardinality 'n choose k', where n is
the number of samples and k is the number of subsamples (at least
number of features), consider only a stochastic subpopulation of a
given maximal size if 'n choose k' is larger than max_subpopulation.
For other than small problem sizes this parameter will determine
memory usage and runtime if n_subsamples is not changed.
n_subsamples : int, optional, default None
Number of samples to calculate the parameters. This is at least the
number of features (plus 1 if fit_intercept=True) and the number of
samples as a maximum. A lower number leads to a higher breakdown
point and a low efficiency while a high number leads to a low
breakdown point and a high efficiency. If None, take the
minimum number of subsamples leading to maximal robustness.
If n_subsamples is set to n_samples, Theil-Sen is identical to least
squares.
max_iter : int, optional, default 300
Maximum number of iterations for the calculation of spatial median.
tol : float, optional, default 1.e-3
Tolerance when calculating spatial median.
random_state : RandomState or an int seed, optional, default None
A random number generator instance to define the state of the
random permutations generator.
n_jobs : integer, optional, default 1
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (median of distribution).
intercept_ : float
Estimated intercept of regression model.
breakdown_ : float
Approximated breakdown point.
n_iter_ : int
Number of iterations needed for the spatial median.
n_subpopulation_ : int
Number of combinations taken into account from 'n choose k', where n is
the number of samples and k is the number of subsamples.
References
----------
- Theil-Sen Estimators in a Multiple Linear Regression Model, 2009
Xin Dang, Hanxiang Peng, Xueqin Wang and Heping Zhang
http://home.olemiss.edu/~xdang/papers/MTSE.pdf
"""
def __init__(self, fit_intercept=True, copy_X=True,
max_subpopulation=1e4, n_subsamples=None, max_iter=300,
tol=1.e-3, random_state=None, n_jobs=1, verbose=False):
self.fit_intercept = fit_intercept
self.copy_X = copy_X
self.max_subpopulation = int(max_subpopulation)
self.n_subsamples = n_subsamples
self.max_iter = max_iter
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.verbose = verbose
def _check_subparams(self, n_samples, n_features):
n_subsamples = self.n_subsamples
if self.fit_intercept:
n_dim = n_features + 1
else:
n_dim = n_features
if n_subsamples is not None:
if n_subsamples > n_samples:
raise ValueError("Invalid parameter since n_subsamples > "
"n_samples ({0} > {1}).".format(n_subsamples,
n_samples))
if n_samples >= n_features:
if n_dim > n_subsamples:
plus_1 = "+1" if self.fit_intercept else ""
raise ValueError("Invalid parameter since n_features{0} "
"> n_subsamples ({1} > {2})."
"".format(plus_1, n_dim, n_samples))
else: # if n_samples < n_features
if n_subsamples != n_samples:
raise ValueError("Invalid parameter since n_subsamples != "
"n_samples ({0} != {1}) while n_samples "
"< n_features.".format(n_subsamples,
n_samples))
else:
n_subsamples = min(n_dim, n_samples)
if self.max_subpopulation <= 0:
raise ValueError("Subpopulation must be strictly positive "
"({0} <= 0).".format(self.max_subpopulation))
all_combinations = max(1, np.rint(binom(n_samples, n_subsamples)))
n_subpopulation = int(min(self.max_subpopulation, all_combinations))
return n_subsamples, n_subpopulation
def fit(self, X, y):
"""Fit linear model.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
random_state = check_random_state(self.random_state)
X, y = check_X_y(X, y, y_numeric=True)
n_samples, n_features = X.shape
n_subsamples, self.n_subpopulation_ = self._check_subparams(n_samples,
n_features)
self.breakdown_ = _breakdown_point(n_samples, n_subsamples)
if self.verbose:
print("Breakdown point: {0}".format(self.breakdown_))
print("Number of samples: {0}".format(n_samples))
tol_outliers = int(self.breakdown_ * n_samples)
print("Tolerable outliers: {0}".format(tol_outliers))
print("Number of subpopulations: {0}".format(
self.n_subpopulation_))
# Determine indices of subpopulation
if np.rint(binom(n_samples, n_subsamples)) <= self.max_subpopulation:
indices = list(combinations(range(n_samples), n_subsamples))
else:
indices = [choice(n_samples,
size=n_subsamples,
replace=False,
random_state=random_state)
for _ in range(self.n_subpopulation_)]
n_jobs = _get_n_jobs(self.n_jobs)
index_list = np.array_split(indices, n_jobs)
weights = Parallel(n_jobs=n_jobs,
verbose=self.verbose)(
delayed(_lstsq)(X, y, index_list[job], self.fit_intercept)
for job in range(n_jobs))
weights = np.vstack(weights)
self.n_iter_, coefs = _spatial_median(weights,
max_iter=self.max_iter,
tol=self.tol)
if self.fit_intercept:
self.intercept_ = coefs[0]
self.coef_ = coefs[1:]
else:
self.intercept_ = 0.
self.coef_ = coefs
return self
| bsd-3-clause |
lifeinoppo/littlefishlet-scode | RES/REF/python_sourcecode/ipython-master/IPython/utils/_signatures.py | 16 | 29681 | """Function signature objects for callables.
Back port of Python 3.3's function signature tools from the inspect module,
modified to be compatible with Python 2.7 and 3.2+.
"""
#-----------------------------------------------------------------------------
# Python 3.3 stdlib inspect.py is public domain
#
# Backports Copyright (C) 2013 Aaron Iles
# Used under Apache License Version 2.0
#
# Further Changes are Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import itertools
import functools
import re
import types
# patch for single-file
# we don't support 2.6, so we can just import OrderedDict
from collections import OrderedDict
__version__ = '0.3'
# end patch
__all__ = ['BoundArguments', 'Parameter', 'Signature', 'signature']
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_NonUserDefinedCallables = (_WrapperDescriptor,
_MethodWrapper,
types.BuiltinFunctionType)
def formatannotation(annotation, base_module=None):
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', '__builtin__', base_module):
return annotation.__name__
return annotation.__module__+'.'+annotation.__name__
return repr(annotation)
def _get_user_defined_method(cls, method_name, *nested):
try:
if cls is type:
return
meth = getattr(cls, method_name)
for name in nested:
meth = getattr(meth, name, meth)
except AttributeError:
return
else:
if not isinstance(meth, _NonUserDefinedCallables):
# Once '__signature__' will be added to 'C'-level
# callables, this check won't be necessary
return meth
def signature(obj):
'''Get a signature object for the passed callable.'''
if not callable(obj):
raise TypeError('{0!r} is not a callable object'.format(obj))
if isinstance(obj, types.MethodType):
if obj.__self__ is None:
# Unbound method - treat it as a function (no distinction in Py 3)
obj = obj.__func__
else:
# Bound method: trim off the first parameter (typically self or cls)
sig = signature(obj.__func__)
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
try:
sig = obj.__signature__
except AttributeError:
pass
else:
if sig is not None:
return sig
try:
# Was this function wrapped by a decorator?
wrapped = obj.__wrapped__
except AttributeError:
pass
else:
return signature(wrapped)
if isinstance(obj, types.FunctionType):
return Signature.from_function(obj)
if isinstance(obj, functools.partial):
sig = signature(obj.func)
new_params = OrderedDict(sig.parameters.items())
partial_args = obj.args or ()
partial_keywords = obj.keywords or {}
try:
ba = sig.bind_partial(*partial_args, **partial_keywords)
except TypeError as ex:
msg = 'partial object {0!r} has incorrect arguments'.format(obj)
raise ValueError(msg)
for arg_name, arg_value in ba.arguments.items():
param = new_params[arg_name]
if arg_name in partial_keywords:
# We set a new default value, because the following code
# is correct:
#
# >>> def foo(a): print(a)
# >>> print(partial(partial(foo, a=10), a=20)())
# 20
# >>> print(partial(partial(foo, a=10), a=20)(a=30))
# 30
#
# So, with 'partial' objects, passing a keyword argument is
# like setting a new default value for the corresponding
# parameter
#
# We also mark this parameter with '_partial_kwarg'
# flag. Later, in '_bind', the 'default' value of this
# parameter will be added to 'kwargs', to simulate
# the 'functools.partial' real call.
new_params[arg_name] = param.replace(default=arg_value,
_partial_kwarg=True)
elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and
not param._partial_kwarg):
new_params.pop(arg_name)
return sig.replace(parameters=new_params.values())
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = signature(call)
else:
# Now we check if the 'obj' class has a '__new__' method
new = _get_user_defined_method(obj, '__new__')
if new is not None:
sig = signature(new)
else:
# Finally, we should have at least __init__ implemented
init = _get_user_defined_method(obj, '__init__')
if init is not None:
sig = signature(init)
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _get_user_defined_method(type(obj), '__call__', 'im_func')
if call is not None:
sig = signature(call)
if sig is not None:
return sig
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = 'no signature found for builtin function {0!r}'.format(obj)
raise ValueError(msg)
raise ValueError('callable {0!r} is not supported by signature'.format(obj))
class _void(object):
'''A private marker - used in Parameter & Signature'''
class _empty(object):
pass
class _ParameterKind(int):
def __new__(self, *args, **kwargs):
obj = int.__new__(self, *args)
obj._name = kwargs['name']
return obj
def __str__(self):
return self._name
def __repr__(self):
return '<_ParameterKind: {0!r}>'.format(self._name)
_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY')
_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD')
_VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL')
_KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY')
_VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD')
class Parameter(object):
'''Represents a parameter in a function signature.
Has the following public attributes:
* name : str
The name of the parameter as a string.
* default : object
The default value for the parameter if specified. If the
parameter has no default value, this attribute is not set.
* annotation
The annotation for the parameter if specified. If the
parameter has no annotation, this attribute is not set.
* kind : str
Describes how argument values are bound to the parameter.
Possible values: `Parameter.POSITIONAL_ONLY`,
`Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
`Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
'''
__slots__ = ('_name', '_kind', '_default', '_annotation', '_partial_kwarg')
POSITIONAL_ONLY = _POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = _VAR_POSITIONAL
KEYWORD_ONLY = _KEYWORD_ONLY
VAR_KEYWORD = _VAR_KEYWORD
empty = _empty
def __init__(self, name, kind, default=_empty, annotation=_empty,
_partial_kwarg=False):
if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
_VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
raise ValueError("invalid value for 'Parameter.kind' attribute")
self._kind = kind
if default is not _empty:
if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
msg = '{0} parameters cannot have default values'.format(kind)
raise ValueError(msg)
self._default = default
self._annotation = annotation
if name is None:
if kind != _POSITIONAL_ONLY:
raise ValueError("None is not a valid name for a "
"non-positional-only parameter")
self._name = name
else:
name = str(name)
if kind != _POSITIONAL_ONLY and not re.match(r'[a-z_]\w*$', name, re.I):
msg = '{0!r} is not a valid parameter name'.format(name)
raise ValueError(msg)
self._name = name
self._partial_kwarg = _partial_kwarg
@property
def name(self):
return self._name
@property
def default(self):
return self._default
@property
def annotation(self):
return self._annotation
@property
def kind(self):
return self._kind
def replace(self, name=_void, kind=_void, annotation=_void,
default=_void, _partial_kwarg=_void):
'''Creates a customized copy of the Parameter.'''
if name is _void:
name = self._name
if kind is _void:
kind = self._kind
if annotation is _void:
annotation = self._annotation
if default is _void:
default = self._default
if _partial_kwarg is _void:
_partial_kwarg = self._partial_kwarg
return type(self)(name, kind, default=default, annotation=annotation,
_partial_kwarg=_partial_kwarg)
def __str__(self):
kind = self.kind
formatted = self._name
if kind == _POSITIONAL_ONLY:
if formatted is None:
formatted = ''
formatted = '<{0}>'.format(formatted)
# Add annotation and default value
if self._annotation is not _empty:
formatted = '{0}:{1}'.format(formatted,
formatannotation(self._annotation))
if self._default is not _empty:
formatted = '{0}={1}'.format(formatted, repr(self._default))
if kind == _VAR_POSITIONAL:
formatted = '*' + formatted
elif kind == _VAR_KEYWORD:
formatted = '**' + formatted
return formatted
def __repr__(self):
return '<{0} at {1:#x} {2!r}>'.format(self.__class__.__name__,
id(self), self.name)
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
return (issubclass(other.__class__, Parameter) and
self._name == other._name and
self._kind == other._kind and
self._default == other._default and
self._annotation == other._annotation)
def __ne__(self, other):
return not self.__eq__(other)
class BoundArguments(object):
'''Result of :meth:`Signature.bind` call. Holds the mapping of arguments
to the function's parameters.
Has the following public attributes:
arguments : :class:`collections.OrderedDict`
An ordered mutable mapping of parameters' names to arguments' values.
Does not contain arguments' default values.
signature : :class:`Signature`
The Signature object that created this instance.
args : tuple
Tuple of positional arguments values.
kwargs : dict
Dict of keyword arguments values.
'''
def __init__(self, signature, arguments):
self.arguments = arguments
self._signature = signature
@property
def signature(self):
return self._signature
@property
def args(self):
args = []
for param_name, param in self._signature.parameters.items():
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
# Keyword arguments mapped by 'functools.partial'
# (Parameter._partial_kwarg is True) are mapped
# in 'BoundArguments.kwargs', along with VAR_KEYWORD &
# KEYWORD_ONLY
break
try:
arg = self.arguments[param_name]
except KeyError:
# We're done here. Other arguments
# will be mapped in 'BoundArguments.kwargs'
break
else:
if param.kind == _VAR_POSITIONAL:
# *args
args.extend(arg)
else:
# plain argument
args.append(arg)
return tuple(args)
@property
def kwargs(self):
kwargs = {}
kwargs_started = False
for param_name, param in self._signature.parameters.items():
if not kwargs_started:
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
kwargs_started = True
else:
if param_name not in self.arguments:
kwargs_started = True
continue
if not kwargs_started:
continue
try:
arg = self.arguments[param_name]
except KeyError:
pass
else:
if param.kind == _VAR_KEYWORD:
# **kwargs
kwargs.update(arg)
else:
# plain keyword argument
kwargs[param_name] = arg
return kwargs
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
return (issubclass(other.__class__, BoundArguments) and
self.signature == other.signature and
self.arguments == other.arguments)
def __ne__(self, other):
return not self.__eq__(other)
class Signature(object):
'''A Signature object represents the overall signature of a function.
It stores a Parameter object for each parameter accepted by the
function, as well as information specific to the function itself.
A Signature object has the following public attributes:
parameters : :class:`collections.OrderedDict`
An ordered mapping of parameters' names to the corresponding
Parameter objects (keyword-only arguments are in the same order
as listed in `code.co_varnames`).
return_annotation
The annotation for the return type of the function if specified.
If the function has no annotation for its return type, this
attribute is not set.
'''
__slots__ = ('_return_annotation', '_parameters')
_parameter_cls = Parameter
_bound_arguments_cls = BoundArguments
empty = _empty
def __init__(self, parameters=None, return_annotation=_empty,
__validate_parameters__=True):
'''Constructs Signature from the given list of Parameter
objects and 'return_annotation'. All arguments are optional.
'''
if parameters is None:
params = OrderedDict()
else:
if __validate_parameters__:
params = OrderedDict()
top_kind = _POSITIONAL_ONLY
for idx, param in enumerate(parameters):
kind = param.kind
if kind < top_kind:
msg = 'wrong parameter order: {0} before {1}'
msg = msg.format(top_kind, param.kind)
raise ValueError(msg)
else:
top_kind = kind
name = param.name
if name is None:
name = str(idx)
param = param.replace(name=name)
if name in params:
msg = 'duplicate parameter name: {0!r}'.format(name)
raise ValueError(msg)
params[name] = param
else:
params = OrderedDict(((param.name, param)
for param in parameters))
self._parameters = params
self._return_annotation = return_annotation
@classmethod
def from_function(cls, func):
'''Constructs Signature for the given python function'''
if not isinstance(func, types.FunctionType):
raise TypeError('{0!r} is not a Python function'.format(func))
Parameter = cls._parameter_cls
# Parameter information.
func_code = func.__code__
pos_count = func_code.co_argcount
arg_names = func_code.co_varnames
positional = tuple(arg_names[:pos_count])
keyword_only_count = getattr(func_code, 'co_kwonlyargcount', 0)
keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
annotations = getattr(func, '__annotations__', {})
defaults = func.__defaults__
kwdefaults = getattr(func, '__kwdefaults__', None)
if defaults:
pos_default_count = len(defaults)
else:
pos_default_count = 0
parameters = []
# Non-keyword-only parameters w/o defaults.
non_default_count = pos_count - pos_default_count
for name in positional[:non_default_count]:
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD))
# ... w/ defaults.
for offset, name in enumerate(positional[non_default_count:]):
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD,
default=defaults[offset]))
# *args
if func_code.co_flags & 0x04:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_POSITIONAL))
# Keyword-only parameters.
for name in keyword_only:
default = _empty
if kwdefaults is not None:
default = kwdefaults.get(name, _empty)
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_KEYWORD_ONLY,
default=default))
# **kwargs
if func_code.co_flags & 0x08:
index = pos_count + keyword_only_count
if func_code.co_flags & 0x04:
index += 1
name = arg_names[index]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_KEYWORD))
return cls(parameters,
return_annotation=annotations.get('return', _empty),
__validate_parameters__=False)
@property
def parameters(self):
try:
return types.MappingProxyType(self._parameters)
except AttributeError:
return OrderedDict(self._parameters.items())
@property
def return_annotation(self):
return self._return_annotation
def replace(self, parameters=_void, return_annotation=_void):
'''Creates a customized copy of the Signature.
Pass 'parameters' and/or 'return_annotation' arguments
to override them in the new copy.
'''
if parameters is _void:
parameters = self.parameters.values()
if return_annotation is _void:
return_annotation = self._return_annotation
return type(self)(parameters,
return_annotation=return_annotation)
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
if (not issubclass(type(other), Signature) or
self.return_annotation != other.return_annotation or
len(self.parameters) != len(other.parameters)):
return False
other_positions = dict((param, idx)
for idx, param in enumerate(other.parameters.keys()))
for idx, (param_name, param) in enumerate(self.parameters.items()):
if param.kind == _KEYWORD_ONLY:
try:
other_param = other.parameters[param_name]
except KeyError:
return False
else:
if param != other_param:
return False
else:
try:
other_idx = other_positions[param_name]
except KeyError:
return False
else:
if (idx != other_idx or
param != other.parameters[param_name]):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _bind(self, args, kwargs, partial=False):
'''Private method. Don't use directly.'''
arguments = OrderedDict()
parameters = iter(self.parameters.values())
parameters_ex = ()
arg_vals = iter(args)
if partial:
# Support for binding arguments to 'functools.partial' objects.
# See 'functools.partial' case in 'signature()' implementation
# for details.
for param_name, param in self.parameters.items():
if (param._partial_kwarg and param_name not in kwargs):
# Simulating 'functools.partial' behavior
kwargs[param_name] = param.default
while True:
# Let's iterate through the positional arguments and corresponding
# parameters
try:
arg_val = next(arg_vals)
except StopIteration:
# No more positional arguments
try:
param = next(parameters)
except StopIteration:
# No more parameters. That's it. Just need to check that
# we have no `kwargs` after this while loop
break
else:
if param.kind == _VAR_POSITIONAL:
# That's OK, just empty *args. Let's start parsing
# kwargs
break
elif param.name in kwargs:
if param.kind == _POSITIONAL_ONLY:
msg = '{arg!r} parameter is positional only, ' \
'but was passed as a keyword'
msg = msg.format(arg=param.name)
raise TypeError(msg)
parameters_ex = (param,)
break
elif (param.kind == _VAR_KEYWORD or
param.default is not _empty):
# That's fine too - we have a default value for this
# parameter. So, lets start parsing `kwargs`, starting
# with the current parameter
parameters_ex = (param,)
break
else:
if partial:
parameters_ex = (param,)
break
else:
msg = '{arg!r} parameter lacking default value'
msg = msg.format(arg=param.name)
raise TypeError(msg)
else:
# We have a positional argument to process
try:
param = next(parameters)
except StopIteration:
raise TypeError('too many positional arguments')
else:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
# Looks like we have no parameter for this positional
# argument
raise TypeError('too many positional arguments')
if param.kind == _VAR_POSITIONAL:
# We have an '*args'-like argument, let's fill it with
# all positional arguments we have left and move on to
# the next phase
values = [arg_val]
values.extend(arg_vals)
arguments[param.name] = tuple(values)
break
if param.name in kwargs:
raise TypeError('multiple values for argument '
'{arg!r}'.format(arg=param.name))
arguments[param.name] = arg_val
# Now, we iterate through the remaining parameters to process
# keyword arguments
kwargs_param = None
for param in itertools.chain(parameters_ex, parameters):
if param.kind == _POSITIONAL_ONLY:
# This should never happen in case of a properly built
# Signature object (but let's have this check here
# to ensure correct behaviour just in case)
raise TypeError('{arg!r} parameter is positional only, '
'but was passed as a keyword'. \
format(arg=param.name))
if param.kind == _VAR_KEYWORD:
# Memorize that we have a '**kwargs'-like parameter
kwargs_param = param
continue
param_name = param.name
try:
arg_val = kwargs.pop(param_name)
except KeyError:
# We have no value for this parameter. It's fine though,
# if it has a default value, or it is an '*args'-like
# parameter, left alone by the processing of positional
# arguments.
if (not partial and param.kind != _VAR_POSITIONAL and
param.default is _empty):
raise TypeError('{arg!r} parameter lacking default value'. \
format(arg=param_name))
else:
arguments[param_name] = arg_val
if kwargs:
if kwargs_param is not None:
# Process our '**kwargs'-like parameter
arguments[kwargs_param.name] = kwargs
else:
raise TypeError('too many keyword arguments')
return self._bound_arguments_cls(self, arguments)
def bind(self, *args, **kwargs):
'''Get a :class:`BoundArguments` object, that maps the passed `args`
and `kwargs` to the function's signature. Raises :exc:`TypeError`
if the passed arguments can not be bound.
'''
return self._bind(args, kwargs)
def bind_partial(self, *args, **kwargs):
'''Get a :class:`BoundArguments` object, that partially maps the
passed `args` and `kwargs` to the function's signature.
Raises :exc:`TypeError` if the passed arguments can not be bound.
'''
return self._bind(args, kwargs, partial=True)
def __str__(self):
result = []
render_kw_only_separator = True
for idx, param in enumerate(self.parameters.values()):
formatted = str(param)
kind = param.kind
if kind == _VAR_POSITIONAL:
# OK, we have an '*args'-like parameter, so we won't need
# a '*' to separate keyword-only arguments
render_kw_only_separator = False
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
# We have a keyword-only parameter to render and we haven't
# rendered an '*args'-like parameter before, so add a '*'
# separator to the parameters list ("foo(arg1, *, arg2)" case)
result.append('*')
# This condition should be only triggered once, so
# reset the flag
render_kw_only_separator = False
result.append(formatted)
rendered = '({0})'.format(', '.join(result))
if self.return_annotation is not _empty:
anno = formatannotation(self.return_annotation)
rendered += ' -> {0}'.format(anno)
return rendered
| gpl-2.0 |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/benchmarks/bench_tree.py | 1 | 3618 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import gc
from datetime import datetime
import numpy as np
import pylab as pl
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| mit |
ruiaylin/percona-xtrabackup | storage/innobase/xtrabackup/test/kewpie/percona_tests/xtrabackup_disabled/galeraInfo_test.py | 24 | 4569 | #! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import shutil
import unittest
from lib.util.mysqlBaseTestCase import mysqlBaseTestCase
def skip_checks(system_manager):
if system_manager.code_manager.test_type != 'galera':
return True, "Requires galera / wsrep server"
return False, ''
server_requirements = [[]]
servers = []
server_manager = None
test_executor = None
# we explicitly use the --no-timestamp option
# here. We will be using a generic / vanilla backup dir
backup_path = None
class basicTest(mysqlBaseTestCase):
def setUp(self):
master_server = servers[0] # assumption that this is 'master'
backup_path = os.path.join(master_server.vardir, '_xtrabackup')
# remove backup path
if os.path.exists(backup_path):
shutil.rmtree(backup_path)
def test_basic1(self):
self.servers = servers
innobackupex = test_executor.system_manager.innobackupex_path
xtrabackup = test_executor.system_manager.xtrabackup_path
master_server = servers[0] # assumption that this is 'master'
backup_path = os.path.join(master_server.vardir, '_xtrabackup')
output_path = os.path.join(master_server.vardir, 'innobackupex.out')
exec_path = os.path.dirname(innobackupex)
# populate our server with a test bed
test_cmd = "./gentest.pl --gendata=conf/percona/percona.zz"
retcode, output = self.execute_randgen(test_cmd, test_executor, master_server)
# take a backup
cmd = ("%s --defaults-file=%s --galera-info --user=root --port=%d"
" --host=127.0.0.1 --no-timestamp"
" --ibbackup=%s %s" %( innobackupex
, master_server.cnf_file
, master_server.master_port
, xtrabackup
, backup_path))
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertTrue(retcode==0,output)
# prepare our backup
cmd = ("%s --apply-log --galera-info --no-timestamp --use-memory=500M "
"--ibbackup=%s %s" %( innobackupex
, xtrabackup
, backup_path))
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertEqual(retcode, 0, msg= output)
# Get a test value:
query = "SHOW STATUS LIKE 'wsrep_local_state_uuid'"
retcode, result = self.execute_query(query, master_server)
wsrep_local_state_uuid = result[0][1]
# Get our other galera-info value
query = "SHOW STATUS LIKE 'wsrep_last_committed'"
retcode, result = self.execute_query(query, master_server)
wsrep_last_committed = result[0][1]
# check our log
with open(os.path.join(backup_path,'xtrabackup_galera_info'),'r') as galera_info_file:
galera_info = galera_info_file.readline().strip()
logged_wsrep_local_state_uuid, logged_wsrep_last_committed = galera_info.split(':')
self.assertEqual( wsrep_local_state_uuid
, logged_wsrep_local_state_uuid
, msg = (wsrep_local_state_uuid, logged_wsrep_local_state_uuid)
)
self.assertEqual( wsrep_last_committed
, logged_wsrep_last_committed
, msg = (wsrep_last_committed, logged_wsrep_last_committed)
)
| gpl-2.0 |
rizotas/waliki | waliki/pdf/views.py | 2 | 1088 | import tempfile
from sh import rst2pdf
from django.shortcuts import get_object_or_404
from waliki.models import Page
from waliki.utils import send_file
from waliki.settings import WALIKI_PDF_INCLUDE_TITLE
from waliki.settings import WALIKI_PDF_RST2PDF_BIN
from waliki.acl import permission_required
@permission_required('view_page')
def pdf(request, slug):
page = get_object_or_404(Page, slug=slug)
with tempfile.NamedTemporaryFile(suffix='.pdf') as output:
outfile = output.name
if WALIKI_PDF_INCLUDE_TITLE:
line = "/" * len(page.title)
title = "%s\n%s\n%s\n\n" % (line, page.title, line)
with tempfile.NamedTemporaryFile(suffix='.rst', mode="w", delete=False) as infile:
infile.file.write(title + page.raw)
infile = infile.name
else:
infile = page.abspath
if WALIKI_PDF_RST2PDF_BIN:
rst2pdf._path = WALIKI_PDF_RST2PDF_BIN.encode('utf8')
rst2pdf(infile, o=outfile)
filename = page.title.replace('/', '-').replace('..', '-')
return send_file(outfile, filename="%s.pdf" % filename)
| bsd-3-clause |
eroicaleo/LearningPython | interview/leet/218_The_Skyline_Problem_v4.py | 1 | 2103 | #!/usr/bin/env python
# ---- ----------
# | | | |
# | --- |----- |
# | | | | |
# | | | | |
# ---------------------------
from heapq import heapify, heappush, heappop
import math
class Solution:
def getSkyline(self, buildings):
heap, ret = [[0, -math.inf]], []
for (li, ri, hi) in buildings + [[math.inf, math.inf, 0]]:
print(f'li = {li}, ri = {ri}, hi = {hi}')
while heap and -heap[0][1] < li:
ri_top = -heappop(heap)[1]
while heap and -heap[0][1] <= ri_top:
heappop(heap)
ret.append([ri_top, -heap[0][0]])
if ret and ret[-1][0] == li:
ret[-1][1] = max(ret[-1][1], hi)
elif hi > -heap[0][0]:
ret.append([li, hi])
heappush(heap, [-hi, -ri])
print(f'heap = {heap}')
print(f'ret = {ret}')
def getSkyline_named_tuple(self, buildings):
from collections import namedtuple
building_info = namedtuple('building_info', ['neg_hi', 'neg_ri'])
heap, ret = [building_info(0, -math.inf)], []
for (li, ri, hi) in buildings + [[math.inf, math.inf, 0]]:
while -heap[0].neg_ri < li:
ri_top = -heappop(heap).neg_ri
while -heap[0].neg_ri <= ri_top:
heappop(heap)
ret.append([ri_top, -heap[0].neg_hi])
if ret and ret[-1][0] == li:
ret[-1][1] = max(ret[-1][1], hi)
elif hi > -heap[0].neg_hi:
ret.append([li, hi])
heappush(heap, building_info(-hi, -ri))
return ret
sol = Solution()
buildings_list = [
[],
[ [2, 3, 20], [2, 4, 15], [2, 5, 10] ],
[ [2, 3, 10], [3, 4, 10], [4, 5, 10] ],
[ [2, 3, 10], [2, 4, 15], [2, 5, 20] ],
[ [2, 9, 10], [3, 7, 15], [5, 12, 12], [15, 20, 10], [19, 24, 8] ],
]
for buildings in buildings_list:
print(f'buildings = {buildings}')
print(sol.getSkyline_named_tuple(buildings))
| mit |
bdfoster/blumate | tests/components/light/test_mqtt.py | 1 | 12976 | """The tests for the MQTT light platform.
Configuration for RGB Version with brightness:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
qos: 0
payload_on: "on"
payload_off: "off"
config without RGB:
light:
platform: mqtt
name: "Office Light"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
qos: 0
payload_on: "on"
payload_off: "off"
config without RGB and brightness:
light:
platform: mqtt
name: "Office Light"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
qos: 0
payload_on: "on"
payload_off: "off"
config for RGB Version with brightness and scale:
light:
platform: mqtt
name: "Office Light RGB"
state_topic: "office/rgb1/light/status"
command_topic: "office/rgb1/light/switch"
brightness_state_topic: "office/rgb1/brightness/status"
brightness_command_topic: "office/rgb1/brightness/set"
brightness_scale: 99
rgb_state_topic: "office/rgb1/rgb/status"
rgb_command_topic: "office/rgb1/rgb/set"
rgb_scale: 99
qos: 0
payload_on: "on"
payload_off: "off"
"""
import unittest
from blumate.bootstrap import _setup_component
from blumate.const import STATE_ON, STATE_OFF, ATTR_ASSUMED_STATE
import blumate.components.light as light
from tests.common import (
get_test_home_assistant, mock_mqtt_component, fire_mqtt_message)
class TestLightMQTT(unittest.TestCase):
"""Test the MQTT light."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.mock_publish = mock_mqtt_component(self.hass)
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_fail_setup_if_no_command_topic(self):
"""Test if command fails with command topic."""
self.hass.config.components = ['mqtt']
assert not _setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
}
})
self.assertIsNone(self.hass.states.get('light.test'))
def test_no_color_or_brightness_if_no_topics(self):
"""Test if there is no color and brightness if no topic."""
self.hass.config.components = ['mqtt']
assert _setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'test_light_rgb/status',
'command_topic': 'test_light_rgb/set',
}
})
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertIsNone(state.attributes.get('rgb_color'))
self.assertIsNone(state.attributes.get('brightness'))
fire_mqtt_message(self.hass, 'test_light_rgb/status', 'ON')
self.hass.pool.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertIsNone(state.attributes.get('rgb_color'))
self.assertIsNone(state.attributes.get('brightness'))
def test_controlling_state_via_topic(self):
"""Test the controlling of the state via topic."""
self.hass.config.components = ['mqtt']
assert _setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'test_light_rgb/status',
'command_topic': 'test_light_rgb/set',
'brightness_state_topic': 'test_light_rgb/brightness/status',
'brightness_command_topic': 'test_light_rgb/brightness/set',
'rgb_state_topic': 'test_light_rgb/rgb/status',
'rgb_command_topic': 'test_light_rgb/rgb/set',
'qos': '0',
'payload_on': 1,
'payload_off': 0
}
})
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertIsNone(state.attributes.get('rgb_color'))
self.assertIsNone(state.attributes.get('brightness'))
self.assertIsNone(state.attributes.get(ATTR_ASSUMED_STATE))
fire_mqtt_message(self.hass, 'test_light_rgb/status', '1')
self.hass.pool.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertEqual([255, 255, 255], state.attributes.get('rgb_color'))
self.assertEqual(255, state.attributes.get('brightness'))
fire_mqtt_message(self.hass, 'test_light_rgb/status', '0')
self.hass.pool.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
fire_mqtt_message(self.hass, 'test_light_rgb/status', '1')
self.hass.pool.block_till_done()
fire_mqtt_message(self.hass, 'test_light_rgb/brightness/status', '100')
self.hass.pool.block_till_done()
light_state = self.hass.states.get('light.test')
self.hass.pool.block_till_done()
self.assertEqual(100,
light_state.attributes['brightness'])
fire_mqtt_message(self.hass, 'test_light_rgb/status', '1')
self.hass.pool.block_till_done()
fire_mqtt_message(self.hass, 'test_light_rgb/rgb/status',
'125,125,125')
self.hass.pool.block_till_done()
light_state = self.hass.states.get('light.test')
self.assertEqual([125, 125, 125],
light_state.attributes.get('rgb_color'))
def test_controlling_scale(self):
"""Test the controlling scale."""
self.hass.config.components = ['mqtt']
assert _setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'test_scale/status',
'command_topic': 'test_scale/set',
'brightness_state_topic': 'test_scale/brightness/status',
'brightness_command_topic': 'test_scale/brightness/set',
'brightness_scale': '99',
'qos': 0,
'payload_on': 'on',
'payload_off': 'off'
}
})
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertIsNone(state.attributes.get('brightness'))
self.assertIsNone(state.attributes.get(ATTR_ASSUMED_STATE))
fire_mqtt_message(self.hass, 'test_scale/status', 'on')
self.hass.pool.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertEqual(255, state.attributes.get('brightness'))
fire_mqtt_message(self.hass, 'test_scale/status', 'off')
self.hass.pool.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
fire_mqtt_message(self.hass, 'test_scale/status', 'on')
self.hass.pool.block_till_done()
fire_mqtt_message(self.hass, 'test_scale/brightness/status', '99')
self.hass.pool.block_till_done()
light_state = self.hass.states.get('light.test')
self.hass.pool.block_till_done()
self.assertEqual(255,
light_state.attributes['brightness'])
def test_controlling_state_via_topic_with_templates(self):
"""Test the setting og the state with a template."""
self.hass.config.components = ['mqtt']
assert _setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'test_light_rgb/status',
'command_topic': 'test_light_rgb/set',
'brightness_state_topic': 'test_light_rgb/brightness/status',
'rgb_state_topic': 'test_light_rgb/rgb/status',
'state_value_template': '{{ value_json.hello }}',
'brightness_value_template': '{{ value_json.hello }}',
'rgb_value_template': '{{ value_json.hello | join(",") }}',
}
})
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertIsNone(state.attributes.get('brightness'))
self.assertIsNone(state.attributes.get('rgb_color'))
fire_mqtt_message(self.hass, 'test_light_rgb/rgb/status',
'{"hello": [1, 2, 3]}')
fire_mqtt_message(self.hass, 'test_light_rgb/status',
'{"hello": "ON"}')
fire_mqtt_message(self.hass, 'test_light_rgb/brightness/status',
'{"hello": "50"}')
self.hass.pool.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertEqual(50, state.attributes.get('brightness'))
self.assertEqual([1, 2, 3], state.attributes.get('rgb_color'))
def test_sending_mqtt_commands_and_optimistic(self):
"""Test the sending of command in optimistic mode."""
self.hass.config.components = ['mqtt']
assert _setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'command_topic': 'test_light_rgb/set',
'brightness_command_topic': 'test_light_rgb/brightness/set',
'rgb_command_topic': 'test_light_rgb/rgb/set',
'qos': 2,
'payload_on': 'on',
'payload_off': 'off'
}
})
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertTrue(state.attributes.get(ATTR_ASSUMED_STATE))
light.turn_on(self.hass, 'light.test')
self.hass.pool.block_till_done()
self.assertEqual(('test_light_rgb/set', 'on', 2, False),
self.mock_publish.mock_calls[-1][1])
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
light.turn_off(self.hass, 'light.test')
self.hass.pool.block_till_done()
self.assertEqual(('test_light_rgb/set', 'off', 2, False),
self.mock_publish.mock_calls[-1][1])
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
light.turn_on(self.hass, 'light.test', rgb_color=[75, 75, 75],
brightness=50)
self.hass.pool.block_till_done()
# Calls are threaded so we need to reorder them
bright_call, rgb_call, state_call = \
sorted((call[1] for call in self.mock_publish.mock_calls[-3:]),
key=lambda call: call[0])
self.assertEqual(('test_light_rgb/set', 'on', 2, False),
state_call)
self.assertEqual(('test_light_rgb/rgb/set', '75,75,75', 2, False),
rgb_call)
self.assertEqual(('test_light_rgb/brightness/set', 50, 2, False),
bright_call)
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertEqual((75, 75, 75), state.attributes['rgb_color'])
self.assertEqual(50, state.attributes['brightness'])
def test_show_brightness_if_only_command_topic(self):
"""Test the brightness if only a command topic is present."""
self.hass.config.components = ['mqtt']
assert _setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'brightness_command_topic': 'test_light_rgb/brightness/set',
'command_topic': 'test_light_rgb/set',
'state_topic': 'test_light_rgb/status',
}
})
state = self.hass.states.get('light.test')
self.assertEqual(STATE_OFF, state.state)
self.assertIsNone(state.attributes.get('brightness'))
fire_mqtt_message(self.hass, 'test_light_rgb/status', 'ON')
self.hass.pool.block_till_done()
state = self.hass.states.get('light.test')
self.assertEqual(STATE_ON, state.state)
self.assertEqual(255, state.attributes.get('brightness'))
| mit |
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractTigertranslationsOrg.py | 1 | 4472 | def extractTigertranslationsOrg(item):
'''
Parser for 'tigertranslations.org'
'''
ttmp = item['title'].replace("10 Years", "<snip> years").replace("10 Years Later", "<snip> years")
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(ttmp)
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('I Will Not Become an Enemy!', 'I Will Not Become an Enemy!', 'translated'),
('My Sister the Heroine, and I the Villainess', 'My Sister the Heroine, and I the Villainess', 'translated'),
('Isekai ni Kita Boku wa Kiyoubinbode Subaya-sa Tayorina Tabi o Suru', 'Isekai ni Kita Boku wa Kiyoubinbode Subaya-sa Tayorina Tabi o Suru', 'translated'),
('Jack of all Trades', 'Isekai ni Kita Boku wa Kiyoubinbode Subaya-sa Tayorina Tabi o Suru', 'translated'),
('Prison Dungeon and the Exiled Hero', 'Prison Dungeon and the Exiled Hero', 'translated'),
('Two Saints wander off into a Different World', 'Two Saints wander off into a Different World', 'translated'),
('Lioncourt War', 'A History of the Lioncourt War', 'translated'),
('realist demon king', 'The Legendary Rebuilding of a World by a Realist Demon King', 'translated'),
('Koko wa Ore ni Makasete Saki ni Ike to Itte kara 10 Nen ga Tattara Densetsu ni Natteita', 'Koko wa Ore ni Makasete Saki ni Ike to Itte kara 10 Nen ga Tattara Densetsu ni Natteita', 'translated'),
('Tensei Kenja no Isekai Raifu ~Daini no Shokugyo wo Ete, Sekai Saikyou ni Narimashita', 'Tensei Kenja no Isekai Raifu ~Daini no Shokugyo wo Ete, Sekai Saikyou ni Narimashita~', 'translated'),
('the legendary rebuilding of a world by a realist demon king', 'the legendary rebuilding of a world by a realist demon king', 'translated'),
('ohanabatake no maousama', 'ohanabatake no maousama', 'translated'),
('Only with Your Heart', 'Only with Your Heart', 'translated'),
('ryusousha ha shizukani kurashitai', 'ryusousha ha shizukani kurashitai', 'translated'),
('makai hongi', 'makai hongi', 'translated'),
('the cave king will live a paradise life -becoming the strongest with the mining skill?-', 'the cave king will live a paradise life -becoming the strongest with the mining skill?-', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
# Handle annoying series with numbers in the title.
if 'Koko wa Ore ni Makasete Saki ni Ike to Itte kara 10 Nen ga Tattara Densetsu ni Natteita' in item['tags'] and chp == 10:
return False
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | bsd-3-clause |
timlinux/inasafe | safe_extras/pydispatch/robust.py | 20 | 1833 | """Module implementing error-catching version of send (sendRobust)"""
from pydispatch.dispatcher import Any, Anonymous, liveReceivers, getAllReceivers
from pydispatch.robustapply import robustApply
def sendRobust(
signal=Any,
sender=Anonymous,
*arguments, **named
):
"""Send signal from sender to all connected receivers catching errors
signal -- (hashable) signal value, see connect for details
sender -- the sender of the signal
if Any, only receivers registered for Any will receive
the message.
if Anonymous, only receivers registered to receive
messages from Anonymous or Any will receive the message
Otherwise can be any python object (normally one
registered with a connect if you actually want
something to occur).
arguments -- positional arguments which will be passed to
*all* receivers. Note that this may raise TypeErrors
if the receivers do not allow the particular arguments.
Note also that arguments are applied before named
arguments, so they should be used with care.
named -- named arguments which will be filtered according
to the parameters of the receivers to only provide those
acceptable to the receiver.
Return a list of tuple pairs [(receiver, response), ... ]
if any receiver raises an error (specifically any subclass of Exception),
the error instance is returned as the result for that receiver.
"""
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
responses = []
for receiver in liveReceivers(getAllReceivers(sender, signal)):
try:
response = robustApply(
receiver,
signal=signal,
sender=sender,
*arguments,
**named
)
except Exception, err:
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
| gpl-3.0 |
40223227/40223227 | static/Brython3.1.1-20150328-091302/Lib/numbers.py | 883 | 10398 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for numbers, according to PEP 3141.
TODO: Fill out more detailed documentation on the operators."""
from abc import ABCMeta, abstractmethod
__all__ = ["Number", "Complex", "Real", "Rational", "Integral"]
class Number(metaclass=ABCMeta):
"""All numbers inherit from this class.
If you just want to check if an argument x is a number, without
caring what kind, use isinstance(x, Number).
"""
__slots__ = ()
# Concrete numeric types must provide their own hash implementation
__hash__ = None
## Notes on Decimal
## ----------------
## Decimal has all of the methods specified by the Real abc, but it should
## not be registered as a Real because decimals do not interoperate with
## binary floats (i.e. Decimal('3.14') + 2.71828 is undefined). But,
## abstract reals are expected to interoperate (i.e. R1 + R2 should be
## expected to work if R1 and R2 are both Reals).
class Complex(Number):
"""Complex defines the operations that work on the builtin complex type.
In short, those are: a conversion to complex, .real, .imag, +, -,
*, /, abs(), .conjugate, ==, and !=.
If it is given heterogenous arguments, and doesn't have special
knowledge about them, it should fall back to the builtin complex
type as described below.
"""
__slots__ = ()
@abstractmethod
def __complex__(self):
"""Return a builtin complex instance. Called for complex(self)."""
def __bool__(self):
"""True if self != 0. Called for bool(self)."""
return self != 0
@property
@abstractmethod
def real(self):
"""Retrieve the real component of this number.
This should subclass Real.
"""
raise NotImplementedError
@property
@abstractmethod
def imag(self):
"""Retrieve the imaginary component of this number.
This should subclass Real.
"""
raise NotImplementedError
@abstractmethod
def __add__(self, other):
"""self + other"""
raise NotImplementedError
@abstractmethod
def __radd__(self, other):
"""other + self"""
raise NotImplementedError
@abstractmethod
def __neg__(self):
"""-self"""
raise NotImplementedError
@abstractmethod
def __pos__(self):
"""+self"""
raise NotImplementedError
def __sub__(self, other):
"""self - other"""
return self + -other
def __rsub__(self, other):
"""other - self"""
return -self + other
@abstractmethod
def __mul__(self, other):
"""self * other"""
raise NotImplementedError
@abstractmethod
def __rmul__(self, other):
"""other * self"""
raise NotImplementedError
@abstractmethod
def __truediv__(self, other):
"""self / other: Should promote to float when necessary."""
raise NotImplementedError
@abstractmethod
def __rtruediv__(self, other):
"""other / self"""
raise NotImplementedError
@abstractmethod
def __pow__(self, exponent):
"""self**exponent; should promote to float or complex when necessary."""
raise NotImplementedError
@abstractmethod
def __rpow__(self, base):
"""base ** self"""
raise NotImplementedError
@abstractmethod
def __abs__(self):
"""Returns the Real distance from 0. Called for abs(self)."""
raise NotImplementedError
@abstractmethod
def conjugate(self):
"""(x+y*i).conjugate() returns (x-y*i)."""
raise NotImplementedError
@abstractmethod
def __eq__(self, other):
"""self == other"""
raise NotImplementedError
def __ne__(self, other):
"""self != other"""
# The default __ne__ doesn't negate __eq__ until 3.0.
return not (self == other)
Complex.register(complex)
class Real(Complex):
"""To Complex, Real adds the operations that work on real numbers.
In short, those are: a conversion to float, trunc(), divmod,
%, <, <=, >, and >=.
Real also provides defaults for the derived operations.
"""
__slots__ = ()
@abstractmethod
def __float__(self):
"""Any Real can be converted to a native float object.
Called for float(self)."""
raise NotImplementedError
@abstractmethod
def __trunc__(self):
"""trunc(self): Truncates self to an Integral.
Returns an Integral i such that:
* i>0 iff self>0;
* abs(i) <= abs(self);
* for any Integral j satisfying the first two conditions,
abs(i) >= abs(j) [i.e. i has "maximal" abs among those].
i.e. "truncate towards 0".
"""
raise NotImplementedError
@abstractmethod
def __floor__(self):
"""Finds the greatest Integral <= self."""
raise NotImplementedError
@abstractmethod
def __ceil__(self):
"""Finds the least Integral >= self."""
raise NotImplementedError
@abstractmethod
def __round__(self, ndigits=None):
"""Rounds self to ndigits decimal places, defaulting to 0.
If ndigits is omitted or None, returns an Integral, otherwise
returns a Real. Rounds half toward even.
"""
raise NotImplementedError
def __divmod__(self, other):
"""divmod(self, other): The pair (self // other, self % other).
Sometimes this can be computed faster than the pair of
operations.
"""
return (self // other, self % other)
def __rdivmod__(self, other):
"""divmod(other, self): The pair (self // other, self % other).
Sometimes this can be computed faster than the pair of
operations.
"""
return (other // self, other % self)
@abstractmethod
def __floordiv__(self, other):
"""self // other: The floor() of self/other."""
raise NotImplementedError
@abstractmethod
def __rfloordiv__(self, other):
"""other // self: The floor() of other/self."""
raise NotImplementedError
@abstractmethod
def __mod__(self, other):
"""self % other"""
raise NotImplementedError
@abstractmethod
def __rmod__(self, other):
"""other % self"""
raise NotImplementedError
@abstractmethod
def __lt__(self, other):
"""self < other
< on Reals defines a total ordering, except perhaps for NaN."""
raise NotImplementedError
@abstractmethod
def __le__(self, other):
"""self <= other"""
raise NotImplementedError
# Concrete implementations of Complex abstract methods.
def __complex__(self):
"""complex(self) == complex(float(self), 0)"""
return complex(float(self))
@property
def real(self):
"""Real numbers are their real component."""
return +self
@property
def imag(self):
"""Real numbers have no imaginary component."""
return 0
def conjugate(self):
"""Conjugate is a no-op for Reals."""
return +self
Real.register(float)
class Rational(Real):
""".numerator and .denominator should be in lowest terms."""
__slots__ = ()
@property
@abstractmethod
def numerator(self):
raise NotImplementedError
@property
@abstractmethod
def denominator(self):
raise NotImplementedError
# Concrete implementation of Real's conversion to float.
def __float__(self):
"""float(self) = self.numerator / self.denominator
It's important that this conversion use the integer's "true"
division rather than casting one side to float before dividing
so that ratios of huge integers convert without overflowing.
"""
return self.numerator / self.denominator
class Integral(Rational):
"""Integral adds a conversion to int and the bit-string operations."""
__slots__ = ()
@abstractmethod
def __int__(self):
"""int(self)"""
raise NotImplementedError
def __index__(self):
"""Called whenever an index is needed, such as in slicing"""
return int(self)
@abstractmethod
def __pow__(self, exponent, modulus=None):
"""self ** exponent % modulus, but maybe faster.
Accept the modulus argument if you want to support the
3-argument version of pow(). Raise a TypeError if exponent < 0
or any argument isn't Integral. Otherwise, just implement the
2-argument version described in Complex.
"""
raise NotImplementedError
@abstractmethod
def __lshift__(self, other):
"""self << other"""
raise NotImplementedError
@abstractmethod
def __rlshift__(self, other):
"""other << self"""
raise NotImplementedError
@abstractmethod
def __rshift__(self, other):
"""self >> other"""
raise NotImplementedError
@abstractmethod
def __rrshift__(self, other):
"""other >> self"""
raise NotImplementedError
@abstractmethod
def __and__(self, other):
"""self & other"""
raise NotImplementedError
@abstractmethod
def __rand__(self, other):
"""other & self"""
raise NotImplementedError
@abstractmethod
def __xor__(self, other):
"""self ^ other"""
raise NotImplementedError
@abstractmethod
def __rxor__(self, other):
"""other ^ self"""
raise NotImplementedError
@abstractmethod
def __or__(self, other):
"""self | other"""
raise NotImplementedError
@abstractmethod
def __ror__(self, other):
"""other | self"""
raise NotImplementedError
@abstractmethod
def __invert__(self):
"""~self"""
raise NotImplementedError
# Concrete implementations of Rational and Real abstract methods.
def __float__(self):
"""float(self) == float(int(self))"""
return float(int(self))
@property
def numerator(self):
"""Integers are their own numerators."""
return +self
@property
def denominator(self):
"""Integers have a denominator of 1."""
return 1
Integral.register(int)
| gpl-3.0 |
DanteOnline/free-art | venv/lib/python3.4/site-packages/django/contrib/staticfiles/views.py | 581 | 1329 | """
Views and functions for serving static files. These are only to be used during
development, and SHOULD NOT be used in a production setting.
"""
import os
import posixpath
from django.conf import settings
from django.contrib.staticfiles import finders
from django.http import Http404
from django.utils.six.moves.urllib.parse import unquote
from django.views import static
def serve(request, path, insecure=False, **kwargs):
"""
Serve static files below a given point in the directory structure or
from locations inferred from the staticfiles finders.
To use, put a URL pattern such as::
from django.contrib.staticfiles import views
url(r'^(?P<path>.*)$', views.serve)
in your URLconf.
It uses the django.views.static.serve() view to serve the found files.
"""
if not settings.DEBUG and not insecure:
raise Http404
normalized_path = posixpath.normpath(unquote(path)).lstrip('/')
absolute_path = finders.find(normalized_path)
if not absolute_path:
if path.endswith('/') or path == '':
raise Http404("Directory indexes are not allowed here.")
raise Http404("'%s' could not be found" % path)
document_root, path = os.path.split(absolute_path)
return static.serve(request, path, document_root=document_root, **kwargs)
| gpl-3.0 |
runt18/nupic | src/nupic/frameworks/opf/expdescriptionhelpers.py | 1 | 14471 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import imp
from nupic.data.dictutils import rUpdate
# This file contains utility functions that are used
# internally by the prediction framework and may be imported
# by description files. Functions that are used only by
# the prediction framework should be in utils.py
#
# This file provides support for the following experiment description features:
#
# 1. Sub-experiment support
# 2. Lazy evaluators (e.g., DeferredDictLookup, applyValueGettersToContainer)
###############################################################################
# Sub-experiment support
###############################################################################
# Utility methods for description files are organized as a base description
# and an experiment based on that base description.
# The base description calls getConfig to get the configuration from the
# specific experiment, and the specific experiment calls importBaseDescription
# empty initial config allows base experiment to run by itself
_config = dict()
# Save the path to the current sub-experiment here during importBaseDescription()
subExpDir = None
# We will load the description file as a module, which allows us to
# use the debugger and see source code. But description files are frequently
# modified and we want to be able to easily reload them. To facilitate this,
# we reload with a unique module name ("pf_description%d") each time.
baseDescriptionImportCount = 0
def importBaseDescription(path, config):
global baseDescriptionImportCount, _config, subExpDir
if not os.path.isabs(path):
# grab the path to the file doing the import
import inspect
callingFrame = inspect.stack()[1][0]
callingFile = callingFrame.f_globals['__file__']
subExpDir = os.path.dirname(callingFile)
path = os.path.normpath(os.path.join(subExpDir, path))
#print "Importing from: %s" % path
# stash the config in a place where the loading module can find it.
_config = config
mod = imp.load_source("pf_base_description{0:d}".format(baseDescriptionImportCount),
path)
# don't want to override __file__ in our caller
mod.__base_file__ = mod.__file__
del mod.__file__
baseDescriptionImportCount += 1
return mod
def updateConfigFromSubConfig(config):
# Newer method just updates from sub-experiment
# _config is the configuration provided by the sub-experiment
global _config
rUpdate(config, _config)
_config = dict()
def getSubExpDir():
global subExpDir
return subExpDir
###############################################################################
# Lazy evaluators (DeferredDictLookup, applyValueGettersToContainer, and friends)
###############################################################################
class ValueGetterBase(object):
""" Base class for "value getters" (e.g., class DictValueGetter) that are used
to resolve values of sub-fields after the experiment's config dictionary (in
description.py) is defined and possibly updated from a sub-experiment.
This solves the problem of referencing the config dictionary's field from within
the definition of the dictionary itself (before the dictionary's own defintion
is complete).
NOTE: its possible that the referenced value does not yet exist at the
time of instantiation of a given value-getter future. It will be
resolved when the base description.py calls
applyValueGettersToContainer().
NOTE: The constructor of the derived classes MUST call our constructor.
NOTE: The derived classes MUST override handleGetValue(self).
NOTE: may be used by base and sub-experiments to derive their own custom value
getters; however, their use is applicapble only where permitted, as
described in comments within descriptionTemplate.tpl. See class
DictValueGetter for implementation example.
"""
class __NoResult(object):
""" A private class that we use as a special unique value to indicate that
our result cache instance variable does not hold a valid result.
"""
pass
def __init__(self):
#print("NOTE: ValueGetterBase INITIALIZING")
self.__inLookup = False
self.__cachedResult = self.__NoResult
def __call__(self, topContainer):
""" Resolves the referenced value. If the result is already cached,
returns it to caller. Otherwise, invokes the pure virtual method
handleGetValue. If handleGetValue() returns another value-getter, calls
that value-getter to resolve the value. This may result in a chain of calls
that terminates once the value is fully resolved to a non-value-getter value.
Upon return, the value is fully resolved and cached, so subsequent calls will
always return the cached value reference.
topContainer: The top-level container (dict, tuple, or list [sub-]instance)
within whose context the value-getter is applied.
Returns: The fully-resolved value that was referenced by the value-getter
instance
"""
#print("IN ValueGetterBase.__CAll__()")
assert(not self.__inLookup)
if self.__cachedResult is not self.__NoResult:
return self.__cachedResult
self.__cachedResult = self.handleGetValue(topContainer)
if isinstance(self.__cachedResult, ValueGetterBase):
valueGetter = self.__cachedResult
self.__inLookup = True
self.__cachedResult = valueGetter(topContainer)
self.__inLookup = False
# The value should be full resolved at this point
assert(self.__cachedResult is not self.__NoResult)
assert(not isinstance(self.__cachedResult, ValueGetterBase))
return self.__cachedResult
def handleGetValue(self, topContainer):
""" A "pure virtual" method. The derived class MUST override this method
and return the referenced value. The derived class is NOT responsible for
fully resolving the reference'd value in the event the value resolves to
another ValueGetterBase-based instance -- this is handled automatically
within ValueGetterBase implementation.
topContainer: The top-level container (dict, tuple, or list [sub-]instance)
within whose context the value-getter is applied.
Returns: The value referenced by this instance (which may be another
value-getter instance)
"""
raise NotImplementedError("ERROR: ValueGetterBase is an abstract " + \
"class; base class MUST override handleGetValue()")
class DictValueGetter(ValueGetterBase):
"""
Creates a "future" reference to a value within a top-level or a nested
dictionary. See also class DeferredDictLookup.
"""
def __init__(self, referenceDict, *dictKeyChain):
"""
referenceDict: Explicit reference dictionary that contains the field
corresonding to the first key name in dictKeyChain. This may
be the result returned by the built-in globals() function,
when we desire to look up a dictionary value from a dictionary
referenced by a global variable within the calling module.
If None is passed for referenceDict, then the topContainer
parameter supplied to handleGetValue() will be used as the
reference dictionary instead (this allows the desired module
to designate the appropriate reference dictionary for the
value-getters when it calls applyValueGettersToContainer())
dictKeyChain: One or more strings; the first string is a key (that will
eventually be defined) in the reference dictionary. If
additional strings are supplied, then the values
correspnding to prior key strings must be dictionaries, and
each additionl string references a sub-dictionary of the
former. The final string is the key of the field whose value
will be returned by handleGetValue().
NOTE: Its possible that the referenced value does not yet exist at the
time of instantiation of this class. It will be resolved when the
base description.py calls applyValueGettersToContainer().
Example:
config = dict(
_dsEncoderFieldName2_N = 70,
_dsEncoderFieldName2_W = 5,
dsEncoderSchema = [
dict(
base=dict(
fieldname='Name2', type='ScalarEncoder',
name='Name2', minval=0, maxval=270, clipInput=True,
n=DictValueGetter(None, '_dsEncoderFieldName2_N'),
w=DictValueGetter(None, '_dsEncoderFieldName2_W')),
),
],
)
updateConfigFromSubConfig(config)
applyValueGettersToContainer(config)
"""
# First, invoke base constructor
ValueGetterBase.__init__(self)
assert(referenceDict is None or isinstance(referenceDict, dict))
assert(len(dictKeyChain) >= 1)
self.__referenceDict = referenceDict
self.__dictKeyChain = dictKeyChain
def handleGetValue(self, topContainer):
""" This method overrides ValueGetterBase's "pure virtual" method. It
returns the referenced value. The derived class is NOT responsible for
fully resolving the reference'd value in the event the value resolves to
another ValueGetterBase-based instance -- this is handled automatically
within ValueGetterBase implementation.
topContainer: The top-level container (dict, tuple, or list [sub-]instance)
within whose context the value-getter is applied. If
self.__referenceDict is None, then topContainer will be used
as the reference dictionary for resolving our dictionary key
chain.
Returns: The value referenced by this instance (which may be another
value-getter instance)
"""
value = self.__referenceDict if self.__referenceDict is not None else topContainer
for key in self.__dictKeyChain:
value = value[key]
return value
class DeferredDictLookup(DictValueGetter):
"""
Creates a "future" reference to a value within an implicit dictionary that
will be passed to applyValueGettersToContainer() in the future (typically
called by description.py after its config dictionary has been updated from
the sub-experiment). The reference is relative to the dictionary that will
be passed to applyValueGettersToContainer()
"""
def __init__(self, *dictKeyChain):
"""
dictKeyChain: One or more strings; the first string is a key (that will
eventually be defined) in the dictionary that will be passed
to applyValueGettersToContainer(). If additional strings are
supplied, then the values correspnding to prior key strings
must be dictionaries, and each additionl string references a
sub-dictionary of the former. The final string is the key of
the field whose value will be returned by this value-getter
NOTE: its possible that the referenced value does not yet exist at the
time of instantiation of this class. It will be resolved when the
base description.py calls applyValueGettersToContainer().
Example:
config = dict(
_dsEncoderFieldName2_N = 70,
_dsEncoderFieldName2_W = 5,
dsEncoderSchema = [
dict(
base=dict(
fieldname='Name2', type='ScalarEncoder',
name='Name2', minval=0, maxval=270, clipInput=True,
n=DeferredDictLookup('_dsEncoderFieldName2_N'),
w=DeferredDictLookup('_dsEncoderFieldName2_W')),
),
],
)
updateConfigFromSubConfig(config)
applyValueGettersToContainer(config)
"""
# Invoke base (DictValueGetter constructor), passing None for referenceDict,
# which will force it use the dictionary passed via
# applyValueGettersToContainer(), instead.
DictValueGetter.__init__(self, None, *dictKeyChain)
def applyValueGettersToContainer(container):
"""
"""
_applyValueGettersImpl(container=container, currentObj=container,
recursionStack=[])
def _applyValueGettersImpl(container, currentObj, recursionStack):
"""
"""
# Detect cycles
if currentObj in recursionStack:
return
# Sanity-check of our cycle-detection logic
assert(len(recursionStack) < 1000)
# Push the current object on our cycle-detection stack
recursionStack.append(currentObj)
# Resolve value-getters within dictionaries, tuples and lists
if isinstance(currentObj, dict):
for (key, value) in currentObj.items():
if isinstance(value, ValueGetterBase):
currentObj[key] = value(container)
_applyValueGettersImpl(container, currentObj[key], recursionStack)
elif isinstance(currentObj, tuple) or isinstance(currentObj, list):
for (i, value) in enumerate(currentObj):
# NOTE: values within a tuple should never be value-getters, since
# the top-level elements within a tuple are immutable. However,
# if any nested sub-elements might be mutable
if isinstance(value, ValueGetterBase):
currentObj[i] = value(container)
_applyValueGettersImpl(container, currentObj[i], recursionStack)
else:
pass
recursionStack.pop()
return
| agpl-3.0 |
elventear/ansible | lib/ansible/modules/cloud/amazon/cloudtrail.py | 16 | 8672 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: cloudtrail
short_description: manage CloudTrail creation and deletion
description:
- Creates or deletes CloudTrail configuration. Ensures logging is also enabled.
version_added: "2.0"
author:
- "Ansible Core Team"
- "Ted Timmons"
requirements:
- "boto >= 2.21"
options:
state:
description:
- add or remove CloudTrail configuration.
required: true
choices: ['enabled', 'disabled']
name:
description:
- name for given CloudTrail configuration.
- This is a primary key and is used to identify the configuration.
s3_bucket_prefix:
description:
- bucket to place CloudTrail in.
- this bucket should exist and have the proper policy. See U(http://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html)
- required when state=enabled.
required: false
s3_key_prefix:
description:
- prefix to keys in bucket. A trailing slash is not necessary and will be removed.
required: false
include_global_events:
description:
- record API calls from global services such as IAM and STS?
required: false
default: false
choices: ["true", "false"]
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
version_added: "1.5"
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
version_added: "1.5"
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
version_added: "1.5"
extends_documentation_fragment: aws
"""
EXAMPLES = """
- name: enable cloudtrail
local_action:
module: cloudtrail
state: enabled
name: main
s3_bucket_name: ourbucket
s3_key_prefix: cloudtrail
region: us-east-1
- name: enable cloudtrail with different configuration
local_action:
module: cloudtrail
state: enabled
name: main
s3_bucket_name: ourbucket2
s3_key_prefix: ''
region: us-east-1
- name: remove cloudtrail
local_action:
module: cloudtrail
state: disabled
name: main
region: us-east-1
"""
HAS_BOTO = False
try:
import boto
import boto.cloudtrail
from boto.regioninfo import RegionInfo
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_ec2_creds
class CloudTrailManager:
"""Handles cloudtrail configuration"""
def __init__(self, module, region=None, **aws_connect_params):
self.module = module
self.region = region
self.aws_connect_params = aws_connect_params
self.changed = False
try:
self.conn = connect_to_aws(boto.cloudtrail, self.region, **self.aws_connect_params)
except boto.exception.NoAuthHandlerFound as e:
self.module.fail_json(msg=str(e))
def view_status(self, name):
return self.conn.get_trail_status(name)
def view(self, name):
ret = self.conn.describe_trails(trail_name_list=[name])
trailList = ret.get('trailList', [])
if len(trailList) == 1:
return trailList[0]
return None
def exists(self, name=None):
ret = self.view(name)
if ret:
return True
return False
def enable_logging(self, name):
'''Turn on logging for a cloudtrail that already exists. Throws Exception on error.'''
self.conn.start_logging(name)
def enable(self, **create_args):
return self.conn.create_trail(**create_args)
def update(self, **create_args):
return self.conn.update_trail(**create_args)
def delete(self, name):
'''Delete a given cloudtrial configuration. Throws Exception on error.'''
self.conn.delete_trail(name)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True, 'choices': ['enabled', 'disabled']},
name={'required': True, 'type': 'str'},
s3_bucket_name={'required': False, 'type': 'str'},
s3_key_prefix={'default': '', 'required': False, 'type': 'str'},
include_global_events={'default': True, 'required': False, 'type': 'bool'},
))
required_together = (['state', 's3_bucket_name'])
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
if not HAS_BOTO:
module.fail_json(msg='boto is required.')
ec2_url, access_key, secret_key, region = get_ec2_creds(module)
aws_connect_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
ct_name = module.params['name']
s3_bucket_name = module.params['s3_bucket_name']
# remove trailing slash from the key prefix, really messes up the key structure.
s3_key_prefix = module.params['s3_key_prefix'].rstrip('/')
include_global_events = module.params['include_global_events']
#if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
# module.fail_json(msg="ELBs are required for registration or viewing")
cf_man = CloudTrailManager(module, region=region, **aws_connect_params)
results = { 'changed': False }
if module.params['state'] == 'enabled':
results['exists'] = cf_man.exists(name=ct_name)
if results['exists']:
results['view'] = cf_man.view(ct_name)
# only update if the values have changed.
if results['view']['S3BucketName'] != s3_bucket_name or \
results['view'].get('S3KeyPrefix', '') != s3_key_prefix or \
results['view']['IncludeGlobalServiceEvents'] != include_global_events:
if not module.check_mode:
results['update'] = cf_man.update(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events)
results['changed'] = True
else:
if not module.check_mode:
# doesn't exist. create it.
results['enable'] = cf_man.enable(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events)
results['changed'] = True
# given cloudtrail should exist now. Enable the logging.
results['view_status'] = cf_man.view_status(ct_name)
results['was_logging_enabled'] = results['view_status'].get('IsLogging', False)
if not results['was_logging_enabled']:
if not module.check_mode:
cf_man.enable_logging(ct_name)
results['logging_enabled'] = True
results['changed'] = True
# delete the cloudtrai
elif module.params['state'] == 'disabled':
# check to see if it exists before deleting.
results['exists'] = cf_man.exists(name=ct_name)
if results['exists']:
# it exists, so we should delete it and mark changed.
if not module.check_mode:
cf_man.delete(ct_name)
results['changed'] = True
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
alexperusso/Screen | WebContent/Scripts/common/AdminLTE/dependencies/jvectormap/converter/simplifier.py | 234 | 5985 | import argparse
import sys
import os
from osgeo import ogr
from osgeo import osr
import anyjson
import shapely.geometry
import shapely.ops
import codecs
import time
format = '%.8f %.8f'
tolerance = 0.01
infile = '/Users/kirilllebedev/Maps/50m-admin-0-countries/ne_50m_admin_0_countries.shp'
outfile = 'map.shp'
# Open the datasource to operate on.
in_ds = ogr.Open( infile, update = 0 )
in_layer = in_ds.GetLayer( 0 )
in_defn = in_layer.GetLayerDefn()
# Create output file with similar information.
shp_driver = ogr.GetDriverByName( 'ESRI Shapefile' )
if os.path.exists('map.shp'):
shp_driver.DeleteDataSource( outfile )
shp_ds = shp_driver.CreateDataSource( outfile )
shp_layer = shp_ds.CreateLayer( in_defn.GetName(),
geom_type = in_defn.GetGeomType(),
srs = in_layer.GetSpatialRef() )
in_field_count = in_defn.GetFieldCount()
for fld_index in range(in_field_count):
src_fd = in_defn.GetFieldDefn( fld_index )
fd = ogr.FieldDefn( src_fd.GetName(), src_fd.GetType() )
fd.SetWidth( src_fd.GetWidth() )
fd.SetPrecision( src_fd.GetPrecision() )
shp_layer.CreateField( fd )
# Load geometries
geometries = []
for feature in in_layer:
geometry = feature.GetGeometryRef()
geometryType = geometry.GetGeometryType()
if geometryType == ogr.wkbPolygon or geometryType == ogr.wkbMultiPolygon:
shapelyGeometry = shapely.wkb.loads( geometry.ExportToWkb() )
#if not shapelyGeometry.is_valid:
#buffer to fix selfcrosses
#shapelyGeometry = shapelyGeometry.buffer(0)
if shapelyGeometry:
geometries.append(shapelyGeometry)
in_layer.ResetReading()
start = int(round(time.time() * 1000))
# Simplification
points = []
connections = {}
counter = 0
for geom in geometries:
counter += 1
polygons = []
if isinstance(geom, shapely.geometry.Polygon):
polygons.append(geom)
else:
for polygon in geom:
polygons.append(polygon)
for polygon in polygons:
if polygon.area > 0:
lines = []
lines.append(polygon.exterior)
for line in polygon.interiors:
lines.append(line)
for line in lines:
for i in range(len(line.coords)-1):
indexFrom = i
indexTo = i+1
pointFrom = format % line.coords[indexFrom]
pointTo = format % line.coords[indexTo]
if pointFrom == pointTo:
continue
if not (pointFrom in connections):
connections[pointFrom] = {}
connections[pointFrom][pointTo] = 1
if not (pointTo in connections):
connections[pointTo] = {}
connections[pointTo][pointFrom] = 1
print int(round(time.time() * 1000)) - start
simplifiedLines = {}
pivotPoints = {}
def simplifyRing(ring):
coords = list(ring.coords)[0:-1]
simpleCoords = []
isPivot = False
pointIndex = 0
while not isPivot and pointIndex < len(coords):
pointStr = format % coords[pointIndex]
pointIndex += 1
isPivot = ((len(connections[pointStr]) > 2) or (pointStr in pivotPoints))
pointIndex = pointIndex - 1
if not isPivot:
simpleRing = shapely.geometry.LineString(coords).simplify(tolerance)
if len(simpleRing.coords) <= 2:
return None
else:
pivotPoints[format % coords[0]] = True
pivotPoints[format % coords[-1]] = True
simpleLineKey = format % coords[0]+':'+format % coords[1]+':'+format % coords[-1]
simplifiedLines[simpleLineKey] = simpleRing.coords
return simpleRing
else:
points = coords[pointIndex:len(coords)]
points.extend(coords[0:pointIndex+1])
iFrom = 0
for i in range(1, len(points)):
pointStr = format % points[i]
if ((len(connections[pointStr]) > 2) or (pointStr in pivotPoints)):
line = points[iFrom:i+1]
lineKey = format % line[-1]+':'+format % line[-2]+':'+format % line[0]
if lineKey in simplifiedLines:
simpleLine = simplifiedLines[lineKey]
simpleLine = list(reversed(simpleLine))
else:
simpleLine = shapely.geometry.LineString(line).simplify(tolerance).coords
lineKey = format % line[0]+':'+format % line[1]+':'+format % line[-1]
simplifiedLines[lineKey] = simpleLine
simpleCoords.extend( simpleLine[0:-1] )
iFrom = i
if len(simpleCoords) <= 2:
return None
else:
return shapely.geometry.LineString(simpleCoords)
def simplifyPolygon(polygon):
simpleExtRing = simplifyRing(polygon.exterior)
if simpleExtRing is None:
return None
simpleIntRings = []
for ring in polygon.interiors:
simpleIntRing = simplifyRing(ring)
if simpleIntRing is not None:
simpleIntRings.append(simpleIntRing)
return shapely.geometry.Polygon(simpleExtRing, simpleIntRings)
results = []
for geom in geometries:
polygons = []
simplePolygons = []
if isinstance(geom, shapely.geometry.Polygon):
polygons.append(geom)
else:
for polygon in geom:
polygons.append(polygon)
for polygon in polygons:
simplePolygon = simplifyPolygon(polygon)
if not (simplePolygon is None or simplePolygon._geom is None):
simplePolygons.append(simplePolygon)
if len(simplePolygons) > 0:
results.append(shapely.geometry.MultiPolygon(simplePolygons))
else:
results.append(None)
# Process all features in input layer.
in_feat = in_layer.GetNextFeature()
counter = 0
while in_feat is not None:
if results[counter] is not None:
out_feat = ogr.Feature( feature_def = shp_layer.GetLayerDefn() )
out_feat.SetFrom( in_feat )
out_feat.SetGeometryDirectly(
ogr.CreateGeometryFromWkb(
shapely.wkb.dumps(
results[counter]
)
)
)
shp_layer.CreateFeature( out_feat )
out_feat.Destroy()
else:
print 'geometry is too small: '+in_feat.GetField(16)
in_feat.Destroy()
in_feat = in_layer.GetNextFeature()
counter += 1
# Cleanup
shp_ds.Destroy()
in_ds.Destroy()
print int(round(time.time() * 1000)) - start | gpl-3.0 |
lod/zookeepr | zk/model/social_network.py | 3 | 1833 | """The application's model objects"""
import sqlalchemy as sa
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm.collections import attribute_mapped_collection
from meta import Base
from pylons.controllers.util import abort
from meta import Session
from person_social_network_map import PersonSocialNetworkMap
class SocialNetwork(Base):
"""Stores the social networks that people might be members of
"""
__tablename__ = 'social_network'
id = sa.Column(sa.types.Integer, primary_key=True)
name = sa.Column(sa.types.Text, unique=True, nullable=False)
url = sa.Column(sa.types.Text, nullable=False)
logo = sa.Column(sa.types.Text, nullable=False)
by_person = sa.orm.relation(PersonSocialNetworkMap,
collection_class=attribute_mapped_collection('person'),
cascade="all, delete-orphan",
backref='social_network')
people = association_proxy('by_person', 'account_name')
# Note: You can't set via the people attribute
def __init__(self, **kwargs):
super(SocialNetwork, self).__init__(**kwargs)
@classmethod
def find_by_name(self, name, abort_404 = True):
result = Session.query(SocialNetwork).filter_by(name=name).first()
if result is None and abort_404:
abort(404, "No such social network object")
return result
@classmethod
def find_by_id(self, id, abort_404 = True):
result = Session.query(SocialNetwork).filter_by(id=id).first()
if result is None and abort_404:
abort(404, "No such social network object")
return result
@classmethod
def find_all(self):
return Session.query(SocialNetwork).order_by(SocialNetwork.name).all()
def __repr__(self):
return '<SocialNetwork id="%s" name="%s">' % (self.id, self.name)
| gpl-2.0 |
faywong/FFPlayer | project/jni/python/src/Demo/classes/Range.py | 47 | 3126 | """Example of a generator: re-implement the built-in range function
without actually constructing the list of values.
OldStyleRange is coded in the way required to work in a 'for' loop before
iterators were introduced into the language; using __getitem__ and __len__ .
"""
def handleargs(arglist):
"""Take list of arguments and extract/create proper start, stop, and step
values and return in a tuple"""
try:
if len(arglist) == 1:
return 0, int(arglist[0]), 1
elif len(arglist) == 2:
return int(arglist[0]), int(arglist[1]), 1
elif len(arglist) == 3:
if arglist[2] == 0:
raise ValueError("step argument must not be zero")
return tuple(int(x) for x in arglist)
else:
raise TypeError("range() accepts 1-3 arguments, given", len(arglist))
except TypeError:
raise TypeError("range() arguments must be numbers or strings "
"representing numbers")
def genrange(*a):
"""Function to implement 'range' as a generator"""
start, stop, step = handleargs(a)
value = start
while value < stop:
yield value
value += step
class oldrange:
"""Class implementing a range object.
To the user the instances feel like immutable sequences
(and you can't concatenate or slice them)
Done using the old way (pre-iterators; __len__ and __getitem__) to have an
object be used by a 'for' loop.
"""
def __init__(self, *a):
""" Initialize start, stop, and step values along with calculating the
nubmer of values (what __len__ will return) in the range"""
self.start, self.stop, self.step = handleargs(a)
self.len = max(0, (self.stop - self.start) // self.step)
def __repr__(self):
"""implement repr(x) which is also used by print"""
return 'range(%r, %r, %r)' % (self.start, self.stop, self.step)
def __len__(self):
"""implement len(x)"""
return self.len
def __getitem__(self, i):
"""implement x[i]"""
if 0 <= i <= self.len:
return self.start + self.step * i
else:
raise IndexError, 'range[i] index out of range'
def test():
import time, __builtin__
#Just a quick sanity check
correct_result = __builtin__.range(5, 100, 3)
oldrange_result = list(oldrange(5, 100, 3))
genrange_result = list(genrange(5, 100, 3))
if genrange_result != correct_result or oldrange_result != correct_result:
raise Exception("error in implementation:\ncorrect = %s"
"\nold-style = %s\ngenerator = %s" %
(correct_result, oldrange_result, genrange_result))
print "Timings for range(1000):"
t1 = time.time()
for i in oldrange(1000):
pass
t2 = time.time()
for i in genrange(1000):
pass
t3 = time.time()
for i in __builtin__.range(1000):
pass
t4 = time.time()
print t2-t1, 'sec (old-style class)'
print t3-t2, 'sec (generator)'
print t4-t3, 'sec (built-in)'
if __name__ == '__main__':
test()
| lgpl-2.1 |
1d20/INT20H | myapp/views.py | 1 | 2188 | from django.shortcuts import render
from django.http import HttpResponse
from json import dumps as to_json
from json import loads as from_json
from models import *
from neo import *
def type_all(request):
types = Type.objects.filter(is_approved=True)
json = to_json([t.to_json() for t in types])
return HttpResponse(json)
def type_add(request):
if not request.method == "POST":
return HttpResponse('request shoud be POST, but it is %s'%str(request.method))
newtype = Type();
newtype.desc = request.POST['desc']
newtype.name = request.POST['name']
newtype.attrs = request.POST['attrs']
newtype.save()
newtype2type = Type2Type();
newtype2type.src_type = newtype
newtype2type.dst_type = Type.objects.get(pk=int(request.POST['dst']))
newtype2type.save()
return HttpResponse(to_json({'status':'done', 'type':newtype.to_json()}))
def type_by_id(request, pk):
try:
requested_type = Type.objects.get(pk=pk)
except Type.DoesNotExist:
return HttpResponse('not found')
json = to_json(requested_type.to_json())
return HttpResponse(json)
#===========================================================================================
def seed_nodes(request):
seed()
return HttpResponse('done')
def node_add(request):
if not request.method == "POST":
return HttpResponse('request shoud be POST, but it is %s'%str(request.method))
node_type = Type.objects.get(pk=request.POST['type'])
values = request.POST['values']
parent = request.POST['parent']
create(node_type.name, from_json(values), parent)
return HttpResponse('done')
def node_by_label(request, label):
nodes = find_by_label(request.user.id, label)
json = to_json(nodes)
return HttpResponse(json)
def node_by_id(request, pk):
node = find_by_id(request.user.id, pk)
json = to_json(node)
return HttpResponse(json)
def node_like(request, pk):
diff, likes = like(request.user.id, pk)
return HttpResponse(to_json({'action':diff, 'likes':likes}))
def node_top(request, count = 20):
highest = top(request.user.id)[:int(count)]
return HttpResponse(to_json(highest))
| mit |
tdyas/pants | tests/python/pants_test/backend/jvm/tasks/test_jar_task.py | 1 | 13121 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import re
from contextlib import contextmanager
from textwrap import dedent
from pants.backend.jvm.targets.java_agent import JavaAgent
from pants.backend.jvm.targets.jvm_binary import JvmBinary
from pants.backend.jvm.tasks.jar_task import JarBuilderTask, JarTask
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.testutil.jvm.jar_task_test_base import JarTaskTestBase
from pants.util.collections import ensure_str_list
from pants.util.contextutil import open_zip, temporary_dir, temporary_file
from pants.util.dirutil import safe_mkdir, safe_mkdtemp, safe_open, safe_rmtree
class BaseJarTaskTest(JarTaskTestBase):
@classmethod
def alias_groups(cls):
return (
super()
.alias_groups()
.merge(BuildFileAliases(targets={"java_agent": JavaAgent, "jvm_binary": JvmBinary}))
)
def setUp(self):
super().setUp()
self.workdir = safe_mkdtemp()
self.jar_task = self.prepare_execute(self.context())
def tearDown(self):
super().tearDown()
if self.workdir:
safe_rmtree(self.workdir)
@contextmanager
def jarfile(self):
with temporary_file(root_dir=self.workdir, suffix=".jar") as fd:
fd.close()
yield fd.name
def assert_listing(self, jar, *expected_items):
self.assertEqual(
{"META-INF/", "META-INF/MANIFEST.MF"} | set(expected_items), set(jar.namelist())
)
class JarTaskTest(BaseJarTaskTest):
MAX_SUBPROC_ARGS = 50
class TestJarTask(JarTask):
def execute(self):
pass
@classmethod
def task_type(cls):
return cls.TestJarTask
def setUp(self):
super().setUp()
self.set_options(max_subprocess_args=self.MAX_SUBPROC_ARGS)
self.jar_task = self.prepare_execute(self.context())
def test_update_write(self):
with temporary_dir() as chroot:
_path = os.path.join(chroot, "a/b/c")
safe_mkdir(_path)
data_file = os.path.join(_path, "d.txt")
with open(data_file, "w") as fd:
fd.write("e")
with self.jarfile() as existing_jarfile:
with self.jar_task.open_jar(existing_jarfile) as jar:
jar.write(data_file, "f/g/h")
with open_zip(existing_jarfile) as jar:
self.assert_listing(jar, "f/", "f/g/", "f/g/h")
self.assertEqual(b"e", jar.read("f/g/h"))
def test_update_writestr(self):
def assert_writestr(path, contents, *entries):
with self.jarfile() as existing_jarfile:
with self.jar_task.open_jar(existing_jarfile) as jar:
jar.writestr(path, contents)
with open_zip(existing_jarfile) as jar:
self.assert_listing(jar, *entries)
self.assertEqual(contents, jar.read(path))
assert_writestr("a.txt", b"b", "a.txt")
assert_writestr("a/b/c.txt", b"d", "a/", "a/b/", "a/b/c.txt")
def test_overwrite_write(self):
with temporary_dir() as chroot:
_path = os.path.join(chroot, "a/b/c")
safe_mkdir(_path)
data_file = os.path.join(_path, "d.txt")
with open(data_file, "w") as fd:
fd.write("e")
with self.jarfile() as existing_jarfile:
with self.jar_task.open_jar(existing_jarfile, overwrite=True) as jar:
jar.write(data_file, "f/g/h")
with open_zip(existing_jarfile) as jar:
self.assert_listing(jar, "f/", "f/g/", "f/g/h")
self.assertEqual(b"e", jar.read("f/g/h"))
def test_overwrite_writestr(self):
with self.jarfile() as existing_jarfile:
with self.jar_task.open_jar(existing_jarfile, overwrite=True) as jar:
jar.writestr("README", b"42")
with open_zip(existing_jarfile) as jar:
self.assert_listing(jar, "README")
self.assertEqual(b"42", jar.read("README"))
@contextmanager
def _test_custom_manifest(self):
manifest_contents = b"Manifest-Version: 1.0\r\nCreated-By: test\r\n\r\n"
with self.jarfile() as existing_jarfile:
with self.jar_task.open_jar(existing_jarfile, overwrite=True) as jar:
jar.writestr("README", b"42")
with open_zip(existing_jarfile) as jar:
self.assert_listing(jar, "README")
self.assertEqual(b"42", jar.read("README"))
self.assertNotEqual(manifest_contents, jar.read("META-INF/MANIFEST.MF"))
with self.jar_task.open_jar(existing_jarfile, overwrite=False) as jar:
yield jar, manifest_contents
with open_zip(existing_jarfile) as jar:
self.assert_listing(jar, "README")
self.assertEqual(b"42", jar.read("README"))
self.assertEqual(manifest_contents, jar.read("META-INF/MANIFEST.MF"))
def test_custom_manifest_str(self):
with self._test_custom_manifest() as (jar, manifest_contents):
jar.writestr("META-INF/MANIFEST.MF", manifest_contents)
def test_custom_manifest_file(self):
with self._test_custom_manifest() as (jar, manifest_contents):
with safe_open(os.path.join(safe_mkdtemp(), "any_source_file"), "wb") as fp:
fp.write(manifest_contents)
jar.write(fp.name, dest="META-INF/MANIFEST.MF")
def test_custom_manifest_dir(self):
with self._test_custom_manifest() as (jar, manifest_contents):
basedir = safe_mkdtemp()
with safe_open(os.path.join(basedir, "META-INF/MANIFEST.MF"), "wb") as fp:
fp.write(manifest_contents)
jar.write(basedir)
def test_custom_manifest_dir_custom_dest(self):
with self._test_custom_manifest() as (jar, manifest_contents):
basedir = safe_mkdtemp()
with safe_open(os.path.join(basedir, "MANIFEST.MF"), "wb") as fp:
fp.write(manifest_contents)
jar.write(basedir, dest="META-INF")
def test_classpath(self):
def manifest_content(classpath):
return (
(
"Manifest-Version: 1.0\r\n"
+ "Class-Path: {}\r\n"
+ "Created-By: org.pantsbuild.tools.jar.JarBuilder\r\n\r\n"
)
.format(" ".join(ensure_str_list(classpath, allow_single_str=True)))
.encode()
)
def assert_classpath(classpath):
with self.jarfile() as existing_jarfile:
# Note for -classpath, there is no update, it's already overwriting.
# To verify this, first add a random classpath, and verify it's overwritten by
# the supplied classpath value.
with self.jar_task.open_jar(existing_jarfile) as jar:
# prefix with workdir since Class-Path is relative to jarfile.path
jar.append_classpath(
os.path.join(self.workdir, "something_should_be_overwritten.jar")
)
with self.jar_task.open_jar(existing_jarfile) as jar:
jar.append_classpath(
[os.path.join(self.workdir, jar_path) for jar_path in classpath]
)
with open_zip(existing_jarfile) as jar:
self.assertEqual(manifest_content(classpath), jar.read("META-INF/MANIFEST.MF"))
assert_classpath(["a.jar"])
assert_classpath(["a.jar", "b.jar"])
def test_update_jars(self):
with self.jarfile() as main_jar:
with self.jarfile() as included_jar:
with self.jar_task.open_jar(main_jar) as jar:
jar.writestr("a/b", b"c")
with self.jar_task.open_jar(included_jar) as jar:
jar.writestr("e/f", b"g")
with self.jar_task.open_jar(main_jar) as jar:
jar.writejar(included_jar)
with open_zip(main_jar) as jar:
self.assert_listing(jar, "a/", "a/b", "e/", "e/f")
def test_overwrite_jars(self):
with self.jarfile() as main_jar:
with self.jarfile() as included_jar:
with self.jar_task.open_jar(main_jar) as jar:
jar.writestr("a/b", b"c")
with self.jar_task.open_jar(included_jar) as jar:
jar.writestr("e/f", b"g")
# Create lots of included jars (even though they're all the same)
# so the -jars argument to jar-tool will exceed max_args limit thus
# switch to @argfile calling style.
with self.jar_task.open_jar(main_jar, overwrite=True) as jar:
for i in range(self.MAX_SUBPROC_ARGS + 1):
jar.writejar(included_jar)
with open_zip(main_jar) as jar:
self.assert_listing(jar, "e/", "e/f")
class JarBuilderTest(BaseJarTaskTest):
class TestJarBuilderTask(JarBuilderTask):
def execute(self):
pass
@classmethod
def task_type(cls):
return cls.TestJarBuilderTask
def setUp(self):
super().setUp()
self.set_options(max_subprocess_args=100)
def test_agent_manifest(self):
self.add_to_build_file(
"src/java/pants/agents",
dedent(
"""
java_agent(
name='fake_agent',
sources=[],
premain='bob',
agent_class='fred',
can_redefine=True,
can_retransform=True,
can_set_native_method_prefix=True
)"""
).strip(),
)
java_agent = self.target("src/java/pants/agents:fake_agent")
context = self.context(target_roots=[java_agent])
jar_builder_task = self.prepare_execute(context)
self.add_to_runtime_classpath(context, java_agent, {"FakeAgent.class": "0xCAFEBABE"})
with self.jarfile() as existing_jarfile:
with jar_builder_task.open_jar(existing_jarfile) as jar:
with jar_builder_task.create_jar_builder(jar) as jar_builder:
jar_builder.add_target(java_agent)
with open_zip(existing_jarfile) as jar:
self.assert_listing(jar, "FakeAgent.class")
self.assertEqual(b"0xCAFEBABE", jar.read("FakeAgent.class"))
manifest = jar.read("META-INF/MANIFEST.MF").decode().strip()
all_entries = dict(
tuple(re.split(r"\s*:\s*", line, 1)) for line in manifest.splitlines()
)
expected_entries = {
"Agent-Class": "fred",
"Premain-Class": "bob",
"Can-Redefine-Classes": "true",
"Can-Retransform-Classes": "true",
"Can-Set-Native-Method-Prefix": "true",
}
self.assertEqual(
set(expected_entries.items()),
set(expected_entries.items()).intersection(set(all_entries.items())),
)
def test_manifest_items(self):
self.add_to_build_file(
"src/java/hello",
dedent(
"""
jvm_binary(
name='hello',
main='hello.Hello',
manifest_entries = {
'Foo': 'foo-value',
'Implementation-Version': '1.2.3',
},
)"""
).strip(),
)
binary_target = self.target("src/java/hello:hello")
context = self.context(target_roots=[binary_target])
self.add_to_runtime_classpath(context, binary_target, {"Hello.class": "0xDEADBEEF"})
jar_builder_task = self.prepare_execute(context)
with self.jarfile() as existing_jarfile:
with jar_builder_task.open_jar(existing_jarfile) as jar:
with jar_builder_task.create_jar_builder(jar) as jar_builder:
jar_builder.add_target(binary_target)
with open_zip(existing_jarfile) as jar:
manifest = jar.read("META-INF/MANIFEST.MF").decode().strip()
all_entries = dict(
tuple(re.split(r"\s*:\s*", line, 1)) for line in manifest.splitlines()
)
expected_entries = {
"Foo": "foo-value",
"Implementation-Version": "1.2.3",
}
self.assertEqual(
set(expected_entries.items()),
set(expected_entries.items()).intersection(set(all_entries.items())),
)
| apache-2.0 |
sgraham/nope | build/android/buildbot/bb_device_status_check.py | 3 | 15555 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A class to keep track of devices across builds and report state."""
import json
import logging
import optparse
import os
import psutil
import re
import signal
import smtplib
import subprocess
import sys
import time
import urllib
import bb_annotations
import bb_utils
sys.path.append(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, 'util', 'lib',
'common'))
import perf_tests_results_helper # pylint: disable=F0401
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from pylib import android_commands
from pylib import constants
from pylib.cmd_helper import GetCmdOutput
from pylib.device import device_blacklist
from pylib.device import device_list
from pylib.device import device_utils
def DeviceInfo(serial, options):
"""Gathers info on a device via various adb calls.
Args:
serial: The serial of the attached device to construct info about.
Returns:
Tuple of device type, build id, report as a string, error messages, and
boolean indicating whether or not device can be used for testing.
"""
device_adb = device_utils.DeviceUtils(serial)
device_type = device_adb.build_product
device_build = device_adb.build_id
device_build_type = device_adb.build_type
device_product_name = device_adb.product_name
try:
battery_info = device_adb.old_interface.GetBatteryInfo()
except Exception as e:
battery_info = {}
logging.error('Unable to obtain battery info for %s, %s', serial, e)
def _GetData(re_expression, line, lambda_function=lambda x: x):
if not line:
return 'Unknown'
found = re.findall(re_expression, line)
if found and len(found):
return lambda_function(found[0])
return 'Unknown'
battery_level = int(battery_info.get('level', 100))
imei_slice = _GetData(r'Device ID = (\d+)',
device_adb.old_interface.GetSubscriberInfo(),
lambda x: x[-6:])
json_data = {
'serial': serial,
'type': device_type,
'build': device_build,
'build_detail': device_adb.GetProp('ro.build.fingerprint'),
'battery': battery_info,
'imei_slice': imei_slice,
'wifi_ip': device_adb.GetProp('dhcp.wlan0.ipaddress'),
}
report = ['Device %s (%s)' % (serial, device_type),
' Build: %s (%s)' %
(device_build, json_data['build_detail']),
' Current Battery Service state: ',
'\n'.join([' %s: %s' % (k, v)
for k, v in battery_info.iteritems()]),
' IMEI slice: %s' % imei_slice,
' Wifi IP: %s' % json_data['wifi_ip'],
'']
errors = []
dev_good = True
if battery_level < 15:
errors += ['Device critically low in battery. Will add to blacklist.']
dev_good = False
if not device_adb.old_interface.IsDeviceCharging():
if device_adb.old_interface.CanControlUsbCharging():
device_adb.old_interface.EnableUsbCharging()
else:
logging.error('Device %s is not charging' % serial)
if not options.no_provisioning_check:
setup_wizard_disabled = (
device_adb.GetProp('ro.setupwizard.mode') == 'DISABLED')
if not setup_wizard_disabled and device_build_type != 'user':
errors += ['Setup wizard not disabled. Was it provisioned correctly?']
if (device_product_name == 'mantaray' and
battery_info.get('AC powered', None) != 'true'):
errors += ['Mantaray device not connected to AC power.']
full_report = '\n'.join(report)
return (device_type, device_build, battery_level, full_report, errors,
dev_good, json_data)
def CheckForMissingDevices(options, adb_online_devs):
"""Uses file of previous online devices to detect broken phones.
Args:
options: out_dir parameter of options argument is used as the base
directory to load and update the cache file.
adb_online_devs: A list of serial numbers of the currently visible
and online attached devices.
"""
# TODO(navabi): remove this once the bug that causes different number
# of devices to be detected between calls is fixed.
logger = logging.getLogger()
logger.setLevel(logging.INFO)
out_dir = os.path.abspath(options.out_dir)
# last_devices denotes all known devices prior to this run
last_devices_path = os.path.join(out_dir, device_list.LAST_DEVICES_FILENAME)
last_missing_devices_path = os.path.join(out_dir,
device_list.LAST_MISSING_DEVICES_FILENAME)
try:
last_devices = device_list.GetPersistentDeviceList(last_devices_path)
except IOError:
# Ignore error, file might not exist
last_devices = []
try:
last_missing_devices = device_list.GetPersistentDeviceList(
last_missing_devices_path)
except IOError:
last_missing_devices = []
missing_devs = list(set(last_devices) - set(adb_online_devs))
new_missing_devs = list(set(missing_devs) - set(last_missing_devices))
if new_missing_devs and os.environ.get('BUILDBOT_SLAVENAME'):
logging.info('new_missing_devs %s' % new_missing_devs)
devices_missing_msg = '%d devices not detected.' % len(missing_devs)
bb_annotations.PrintSummaryText(devices_missing_msg)
from_address = 'chrome-bot@chromium.org'
to_addresses = ['chrome-labs-tech-ticket@google.com',
'chrome-android-device-alert@google.com']
cc_addresses = ['chrome-android-device-alert@google.com']
subject = 'Devices offline on %s, %s, %s' % (
os.environ.get('BUILDBOT_SLAVENAME'),
os.environ.get('BUILDBOT_BUILDERNAME'),
os.environ.get('BUILDBOT_BUILDNUMBER'))
msg = ('Please reboot the following devices:\n%s' %
'\n'.join(map(str, new_missing_devs)))
SendEmail(from_address, to_addresses, cc_addresses, subject, msg)
all_known_devices = list(set(adb_online_devs) | set(last_devices))
device_list.WritePersistentDeviceList(last_devices_path, all_known_devices)
device_list.WritePersistentDeviceList(last_missing_devices_path, missing_devs)
if not all_known_devices:
# This can happen if for some reason the .last_devices file is not
# present or if it was empty.
return ['No online devices. Have any devices been plugged in?']
if missing_devs:
devices_missing_msg = '%d devices not detected.' % len(missing_devs)
bb_annotations.PrintSummaryText(devices_missing_msg)
# TODO(navabi): Debug by printing both output from GetCmdOutput and
# GetAttachedDevices to compare results.
crbug_link = ('https://code.google.com/p/chromium/issues/entry?summary='
'%s&comment=%s&labels=Restrict-View-Google,OS-Android,Infra' %
(urllib.quote('Device Offline'),
urllib.quote('Buildbot: %s %s\n'
'Build: %s\n'
'(please don\'t change any labels)' %
(os.environ.get('BUILDBOT_BUILDERNAME'),
os.environ.get('BUILDBOT_SLAVENAME'),
os.environ.get('BUILDBOT_BUILDNUMBER')))))
return ['Current online devices: %s' % adb_online_devs,
'%s are no longer visible. Were they removed?\n' % missing_devs,
'SHERIFF:\n',
'@@@STEP_LINK@Click here to file a bug@%s@@@\n' % crbug_link,
'Cache file: %s\n\n' % last_devices_path,
'adb devices: %s' % GetCmdOutput(['adb', 'devices']),
'adb devices(GetAttachedDevices): %s' % adb_online_devs]
else:
new_devs = set(adb_online_devs) - set(last_devices)
if new_devs and os.path.exists(last_devices_path):
bb_annotations.PrintWarning()
bb_annotations.PrintSummaryText(
'%d new devices detected' % len(new_devs))
print ('New devices detected %s. And now back to your '
'regularly scheduled program.' % list(new_devs))
def SendEmail(from_address, to_addresses, cc_addresses, subject, msg):
msg_body = '\r\n'.join(['From: %s' % from_address,
'To: %s' % ', '.join(to_addresses),
'CC: %s' % ', '.join(cc_addresses),
'Subject: %s' % subject, '', msg])
try:
server = smtplib.SMTP('localhost')
server.sendmail(from_address, to_addresses, msg_body)
server.quit()
except Exception as e:
print 'Failed to send alert email. Error: %s' % e
def RestartUsb():
if not os.path.isfile('/usr/bin/restart_usb'):
print ('ERROR: Could not restart usb. /usr/bin/restart_usb not installed '
'on host (see BUG=305769).')
return False
lsusb_proc = bb_utils.SpawnCmd(['lsusb'], stdout=subprocess.PIPE)
lsusb_output, _ = lsusb_proc.communicate()
if lsusb_proc.returncode:
print 'Error: Could not get list of USB ports (i.e. lsusb).'
return lsusb_proc.returncode
usb_devices = [re.findall(r'Bus (\d\d\d) Device (\d\d\d)', lsusb_line)[0]
for lsusb_line in lsusb_output.strip().split('\n')]
all_restarted = True
# Walk USB devices from leaves up (i.e reverse sorted) restarting the
# connection. If a parent node (e.g. usb hub) is restarted before the
# devices connected to it, the (bus, dev) for the hub can change, making the
# output we have wrong. This way we restart the devices before the hub.
for (bus, dev) in reversed(sorted(usb_devices)):
# Can not restart root usb connections
if dev != '001':
return_code = bb_utils.RunCmd(['/usr/bin/restart_usb', bus, dev])
if return_code:
print 'Error restarting USB device /dev/bus/usb/%s/%s' % (bus, dev)
all_restarted = False
else:
print 'Restarted USB device /dev/bus/usb/%s/%s' % (bus, dev)
return all_restarted
def KillAllAdb():
def GetAllAdb():
for p in psutil.process_iter():
try:
if 'adb' in p.name:
yield p
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
for sig in [signal.SIGTERM, signal.SIGQUIT, signal.SIGKILL]:
for p in GetAllAdb():
try:
print 'kill %d %d (%s [%s])' % (sig, p.pid, p.name,
' '.join(p.cmdline))
p.send_signal(sig)
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
for p in GetAllAdb():
try:
print 'Unable to kill %d (%s [%s])' % (p.pid, p.name, ' '.join(p.cmdline))
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
def main():
parser = optparse.OptionParser()
parser.add_option('', '--out-dir',
help='Directory where the device path is stored',
default=os.path.join(constants.DIR_SOURCE_ROOT, 'out'))
parser.add_option('--no-provisioning-check', action='store_true',
help='Will not check if devices are provisioned properly.')
parser.add_option('--device-status-dashboard', action='store_true',
help='Output device status data for dashboard.')
parser.add_option('--restart-usb', action='store_true',
help='Restart USB ports before running device check.')
parser.add_option('--json-output',
help='Output JSON information into a specified file.')
options, args = parser.parse_args()
if args:
parser.error('Unknown options %s' % args)
# Remove the last build's "bad devices" before checking device statuses.
device_blacklist.ResetBlacklist()
try:
expected_devices = device_list.GetPersistentDeviceList(
os.path.join(options.out_dir, device_list.LAST_DEVICES_FILENAME))
except IOError:
expected_devices = []
devices = android_commands.GetAttachedDevices()
# Only restart usb if devices are missing.
if set(expected_devices) != set(devices):
print 'expected_devices: %s, devices: %s' % (expected_devices, devices)
KillAllAdb()
retries = 5
usb_restarted = True
if options.restart_usb:
if not RestartUsb():
usb_restarted = False
bb_annotations.PrintWarning()
print 'USB reset stage failed, wait for any device to come back.'
while retries:
print 'retry adb devices...'
time.sleep(1)
devices = android_commands.GetAttachedDevices()
if set(expected_devices) == set(devices):
# All devices are online, keep going.
break
if not usb_restarted and devices:
# The USB wasn't restarted, but there's at least one device online.
# No point in trying to wait for all devices.
break
retries -= 1
# TODO(navabi): Test to make sure this fails and then fix call
offline_devices = android_commands.GetAttachedDevices(
hardware=False, emulator=False, offline=True)
types, builds, batteries, reports, errors, json_data = [], [], [], [], [], []
fail_step_lst = []
if devices:
types, builds, batteries, reports, errors, fail_step_lst, json_data = (
zip(*[DeviceInfo(dev, options) for dev in devices]))
# Write device info to file for buildbot info display.
if os.path.exists('/home/chrome-bot'):
with open('/home/chrome-bot/.adb_device_info', 'w') as f:
for device in json_data:
try:
f.write('%s %s %s %.1fC %s%%\n' % (device['serial'], device['type'],
device['build'], float(device['battery']['temperature']) / 10,
device['battery']['level']))
except Exception:
pass
err_msg = CheckForMissingDevices(options, devices) or []
unique_types = list(set(types))
unique_builds = list(set(builds))
bb_annotations.PrintMsg('Online devices: %d. Device types %s, builds %s'
% (len(devices), unique_types, unique_builds))
print '\n'.join(reports)
for serial, dev_errors in zip(devices, errors):
if dev_errors:
err_msg += ['%s errors:' % serial]
err_msg += [' %s' % error for error in dev_errors]
if err_msg:
bb_annotations.PrintWarning()
msg = '\n'.join(err_msg)
print msg
from_address = 'buildbot@chromium.org'
to_addresses = ['chromium-android-device-alerts@google.com']
bot_name = os.environ.get('BUILDBOT_BUILDERNAME')
slave_name = os.environ.get('BUILDBOT_SLAVENAME')
subject = 'Device status check errors on %s, %s.' % (slave_name, bot_name)
SendEmail(from_address, to_addresses, [], subject, msg)
if options.device_status_dashboard:
perf_tests_results_helper.PrintPerfResult('BotDevices', 'OnlineDevices',
[len(devices)], 'devices')
perf_tests_results_helper.PrintPerfResult('BotDevices', 'OfflineDevices',
[len(offline_devices)], 'devices',
'unimportant')
for serial, battery in zip(devices, batteries):
perf_tests_results_helper.PrintPerfResult('DeviceBattery', serial,
[battery], '%',
'unimportant')
if options.json_output:
with open(options.json_output, 'wb') as f:
f.write(json.dumps(json_data, indent=4))
num_failed_devs = 0
for fail_status, device in zip(fail_step_lst, devices):
if not fail_status:
device_blacklist.ExtendBlacklist([str(device)])
num_failed_devs += 1
if num_failed_devs == len(devices):
return 2
if not devices:
return 1
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
laserson/luigi | luigi/contrib/esindex.py | 13 | 14139 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Support for Elasticsearch (1.0.0 or newer).
Provides an :class:`ElasticsearchTarget` and a :class:`CopyToIndex` template task.
Modeled after :class:`luigi.contrib.rdbms.CopyToTable`.
A minimal example (assuming elasticsearch is running on localhost:9200):
.. code-block:: python
class ExampleIndex(CopyToIndex):
index = 'example'
def docs(self):
return [{'_id': 1, 'title': 'An example document.'}]
if __name__ == '__main__':
task = ExampleIndex()
luigi.build([task], local_scheduler=True)
All options:
.. code-block:: python
class ExampleIndex(CopyToIndex):
host = 'localhost'
port = 9200
index = 'example'
doc_type = 'default'
purge_existing_index = True
marker_index_hist_size = 1
def docs(self):
return [{'_id': 1, 'title': 'An example document.'}]
if __name__ == '__main__':
task = ExampleIndex()
luigi.build([task], local_scheduler=True)
`Host`, `port`, `index`, `doc_type` parameters are standard elasticsearch.
`purge_existing_index` will delete the index, whenever an update is required.
This is useful, when one deals with "dumps" that represent the whole data, not just updates.
`marker_index_hist_size` sets the maximum number of entries in the 'marker'
index:
* 0 (default) keeps all updates,
* 1 to only remember the most recent update to the index.
This can be useful, if an index needs to recreated, even though
the corresponding indexing task has been run sometime in the past - but
a later indexing task might have altered the index in the meantime.
There are a two luigi `client.cfg` configuration options:
.. code-block:: ini
[elasticsearch]
marker-index = update_log
marker-doc-type = entry
"""
# pylint: disable=F0401,E1101,C0103
import abc
import datetime
import hashlib
import json
import logging
import itertools
import luigi
from luigi import six
logger = logging.getLogger('luigi-interface')
try:
from elasticsearch.helpers import bulk_index
from elasticsearch.connection import Urllib3HttpConnection
import elasticsearch
if elasticsearch.__version__ < (1, 0, 0):
logger.warning("This module works with elasticsearch 1.0.0 "
"or newer only.")
except ImportError:
logger.warning("Loading esindex module without elasticsearch installed. "
"Will crash at runtime if esindex functionality is used.")
class ElasticsearchTarget(luigi.Target):
""" Target for a resource in Elasticsearch."""
marker_index = luigi.configuration.get_config().get('elasticsearch',
'marker-index', 'update_log')
marker_doc_type = luigi.configuration.get_config().get('elasticsearch',
'marker-doc-type', 'entry')
def __init__(self, host, port, index, doc_type, update_id,
marker_index_hist_size=0, http_auth=None, timeout=10,
extra_elasticsearch_args={}):
"""
:param host: Elasticsearch server host
:type host: str
:param port: Elasticsearch server port
:type port: int
:param index: index name
:type index: str
:param doc_type: doctype name
:type doc_type: str
:param update_id: an identifier for this data set
:type update_id: str
:param marker_index_hist_size: list of changes to the index to remember
:type marker_index_hist_size: int
:param timeout: Elasticsearch connection timeout
:type timeout: int
:param extra_elasticsearch_args: extra args for Elasticsearch
:type Extra: dict
"""
self.host = host
self.port = port
self.http_auth = http_auth
self.index = index
self.doc_type = doc_type
self.update_id = update_id
self.marker_index_hist_size = marker_index_hist_size
self.timeout = timeout
self.extra_elasticsearch_args = extra_elasticsearch_args
self.es = elasticsearch.Elasticsearch(
connection_class=Urllib3HttpConnection,
host=self.host,
port=self.port,
http_auth=self.http_auth,
timeout=self.timeout,
**self.extra_elasticsearch_args
)
def marker_index_document_id(self):
"""
Generate an id for the indicator document.
"""
params = '%s:%s:%s' % (self.index, self.doc_type, self.update_id)
return hashlib.sha1(params.encode('utf-8')).hexdigest()
def touch(self):
"""
Mark this update as complete.
The document id would be sufficent but,
for documentation,
we index the parameters `update_id`, `target_index`, `target_doc_type` and `date` as well.
"""
self.create_marker_index()
self.es.index(index=self.marker_index, doc_type=self.marker_doc_type,
id=self.marker_index_document_id(), body={
'update_id': self.update_id,
'target_index': self.index,
'target_doc_type': self.doc_type,
'date': datetime.datetime.now()})
self.es.indices.flush(index=self.marker_index)
self.ensure_hist_size()
def exists(self):
"""
Test, if this task has been run.
"""
try:
self.es.get(index=self.marker_index, doc_type=self.marker_doc_type, id=self.marker_index_document_id())
return True
except elasticsearch.NotFoundError:
logger.debug('Marker document not found.')
except elasticsearch.ElasticsearchException as err:
logger.warn(err)
return False
def create_marker_index(self):
"""
Create the index that will keep track of the tasks if necessary.
"""
if not self.es.indices.exists(index=self.marker_index):
self.es.indices.create(index=self.marker_index)
def ensure_hist_size(self):
"""
Shrink the history of updates for
a `index/doc_type` combination down to `self.marker_index_hist_size`.
"""
if self.marker_index_hist_size == 0:
return
result = self.es.search(index=self.marker_index,
doc_type=self.marker_doc_type,
body={'query': {
'term': {'target_index': self.index}}},
sort=('date:desc',))
for i, hit in enumerate(result.get('hits').get('hits'), start=1):
if i > self.marker_index_hist_size:
marker_document_id = hit.get('_id')
self.es.delete(id=marker_document_id, index=self.marker_index,
doc_type=self.marker_doc_type)
self.es.indices.flush(index=self.marker_index)
class CopyToIndex(luigi.Task):
"""
Template task for inserting a data set into Elasticsearch.
Usage:
1. Subclass and override the required `index` attribute.
2. Implement a custom `docs` method, that returns an iterable over the documents.
A document can be a JSON string,
e.g. from a newline-delimited JSON (ldj) file (default implementation)
or some dictionary.
Optional attributes:
* doc_type (default),
* host (localhost),
* port (9200),
* settings ({'settings': {}})
* mapping (None),
* chunk_size (2000),
* raise_on_error (True),
* purge_existing_index (False),
* marker_index_hist_size (0)
If settings are defined, they are only applied at index creation time.
"""
@property
def host(self):
"""
ES hostname.
"""
return 'localhost'
@property
def port(self):
"""
ES port.
"""
return 9200
@property
def http_auth(self):
"""
ES optional http auth information as either ‘:’ separated string or a tuple,
e.g. `('user', 'pass')` or `"user:pass"`.
"""
return None
@abc.abstractproperty
def index(self):
"""
The target index.
May exist or not.
"""
return None
@property
def doc_type(self):
"""
The target doc_type.
"""
return 'default'
@property
def mapping(self):
"""
Dictionary with custom mapping or `None`.
"""
return None
@property
def settings(self):
"""
Settings to be used at index creation time.
"""
return {'settings': {}}
@property
def chunk_size(self):
"""
Single API call for this number of docs.
"""
return 2000
@property
def raise_on_error(self):
"""
Whether to fail fast.
"""
return True
@property
def purge_existing_index(self):
"""
Whether to delete the `index` completely before any indexing.
"""
return False
@property
def marker_index_hist_size(self):
"""
Number of event log entries in the marker index. 0: unlimited.
"""
return 0
@property
def timeout(self):
"""
Timeout.
"""
return 10
@property
def extra_elasticsearch_args(self):
"""
Extra arguments to pass to the Elasticsearch constructor
"""
return {}
def docs(self):
"""
Return the documents to be indexed.
Beside the user defined fields, the document may contain an `_index`, `_type` and `_id`.
"""
with self.input().open('r') as fobj:
for line in fobj:
yield line
# everything below will rarely have to be overridden
def _docs(self):
"""
Since `self.docs` may yield documents that do not explicitly contain `_index` or `_type`,
add those attributes here, if necessary.
"""
iterdocs = iter(self.docs())
first = next(iterdocs)
needs_parsing = False
if isinstance(first, six.string_types):
needs_parsing = True
elif isinstance(first, dict):
pass
else:
raise RuntimeError('Document must be either JSON strings or dict.')
for doc in itertools.chain([first], iterdocs):
if needs_parsing:
doc = json.loads(doc)
if '_index' not in doc:
doc['_index'] = self.index
if '_type' not in doc:
doc['_type'] = self.doc_type
yield doc
def _init_connection(self):
return elasticsearch.Elasticsearch(
connection_class=Urllib3HttpConnection,
host=self.host,
port=self.port,
http_auth=self.http_auth,
timeout=self.timeout,
**self.extra_elasticsearch_args
)
def create_index(self):
"""
Override to provide code for creating the target index.
By default it will be created without any special settings or mappings.
"""
es = self._init_connection()
if not es.indices.exists(index=self.index):
es.indices.create(index=self.index, body=self.settings)
def delete_index(self):
"""
Delete the index, if it exists.
"""
es = self._init_connection()
if es.indices.exists(index=self.index):
es.indices.delete(index=self.index)
def update_id(self):
"""
This id will be a unique identifier for this indexing task.
"""
return self.task_id
def output(self):
"""
Returns a ElasticsearchTarget representing the inserted dataset.
Normally you don't override this.
"""
return ElasticsearchTarget(
host=self.host,
port=self.port,
http_auth=self.http_auth,
index=self.index,
doc_type=self.doc_type,
update_id=self.update_id(),
marker_index_hist_size=self.marker_index_hist_size,
timeout=self.timeout,
extra_elasticsearch_args=self.extra_elasticsearch_args
)
def run(self):
"""
Run task, namely:
* purge existing index, if requested (`purge_existing_index`),
* create the index, if missing,
* apply mappings, if given,
* set refresh interval to -1 (disable) for performance reasons,
* bulk index in batches of size `chunk_size` (2000),
* set refresh interval to 1s,
* refresh Elasticsearch,
* create entry in marker index.
"""
if self.purge_existing_index:
self.delete_index()
self.create_index()
es = self._init_connection()
if self.mapping:
es.indices.put_mapping(index=self.index, doc_type=self.doc_type,
body=self.mapping)
es.indices.put_settings({"index": {"refresh_interval": "-1"}},
index=self.index)
bulk_index(es, self._docs(), chunk_size=self.chunk_size,
raise_on_error=self.raise_on_error)
es.indices.put_settings({"index": {"refresh_interval": "1s"}},
index=self.index)
es.indices.refresh()
self.output().touch()
| apache-2.0 |
edoko/AirKernel_NS_JBN | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
ladybug-analysis-tools/honeybee | honeybee/radiance/command/getinfo.py | 1 | 1803 | # coding=utf-8
"""getinfo - get header information from a RADIANCE file"""
from _commandbase import RadianceCommand
from ..datatype import RadiancePath, RadianceBoolFlag
import os
class Getinfo(RadianceCommand):
get_dimensions = RadianceBoolFlag('d', 'get_dimensions')
output_file = RadiancePath('output', 'getinfo details', check_exists=False)
def __init__(self, get_dimensions=None, header_suppress=None, rad_files=None,
output_file=None):
"""Init command."""
RadianceCommand.__init__(self)
self.get_dimensions = get_dimensions
self.header_suppress = header_suppress
self.input_file = rad_files
self.output_file = output_file
@property
def input_file(self):
"""Get and set rad files."""
return self.__input_file
@input_file.setter
def input_file(self, files):
if files:
if isinstance(files, basestring):
files = [files]
self.__input_file = [os.path.normpath(f) for f in files]
else:
self.__input_file = []
def to_rad_string(self, relative_path=False):
warning = self.get_dimensions.to_rad_string()
rad_files = " ".join(self.normspace(f) for f in self.input_file)
cmd_path = self.normspace(os.path.join(self.radbin_path, 'getinfo'))
output_file_path = self.output_file.to_rad_string()
output_file = ">%s" % output_file_path if output_file_path else ''
rad_string = "{0} {1} {2} {3}".format(cmd_path, warning, rad_files,
output_file)
self.check_input_files(rad_string)
return rad_string
@property
def input_files(self):
"""Return input files by user."""
return self.input_file
| gpl-3.0 |
fkorotkov/pants | contrib/android/tests/python/pants_test/contrib/android/tasks/test_aapt_builder.py | 14 | 2438 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants_test.contrib.android.test_android_base import TestAndroidBase, distribution
from pants.contrib.android.tasks.aapt_builder import AaptBuilder
class TestAaptBuilder(TestAndroidBase):
@classmethod
def task_type(cls):
return AaptBuilder
def test_aapt_builder_smoke(self):
task = self.create_task(self.context())
task.execute()
def test_creates_apk(self):
with self.android_binary(target_name='example', package_name='org.pantsbuild.example') as apk:
self.assertTrue(AaptBuilder.package_name(apk).endswith('.apk'))
def test_unique_package_name(self):
with self.android_binary(target_name='example', package_name='org.pantsbuild.example') as bin1:
with self.android_binary(target_name='hello', package_name='org.pantsbuild.hello') as bin2:
self.assertNotEqual(AaptBuilder.package_name(bin1), AaptBuilder.package_name(bin2))
def test_render_args(self):
with distribution() as dist:
with self.android_binary() as android_binary:
self.set_options(sdk_path=dist)
task = self.create_task(self.context())
rendered_args = task._render_args(android_binary, 'res', ['classes.dex'])
self.assertEquals(os.path.basename(rendered_args[0]), 'aapt')
self.assertEquals(rendered_args[-1], 'classes.dex')
def test_resource_order_in_args(self):
with distribution() as dist:
with self.android_resources(target_name='binary_resources') as res1:
with self.android_resources(target_name='library_resources') as res2:
with self.android_library(dependencies=[res2]) as library:
with self.android_binary(dependencies=[res1, library]) as target:
self.set_options(sdk_path=dist)
task = self.create_task(self.context())
res_dirs = [res1.resource_dir, res2.resource_dir]
rendered_args = task._render_args(target, res_dirs, 'classes.dex')
args_string = ' '.join(rendered_args)
self.assertIn('--auto-add-overlay -S {} -S '
'{}'.format(res1.resource_dir, res2.resource_dir), args_string)
| apache-2.0 |
infinitewarp/poketrainer | library/api/pgoapi/__init__.py | 5 | 2311 | """
pgoapi - Pokemon Go API
Copyright (c) 2016 tjado <https://github.com/tejado>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
Author: tjado <https://github.com/tejado>
"""
from __future__ import absolute_import
from pgoapi.exceptions import PleaseInstallProtobufVersion3
import pkg_resources
import logging
__title__ = 'pgoapi'
__version__ = '1.1.7'
__author__ = 'tjado'
__license__ = 'MIT License'
__copyright__ = 'Copyright (c) 2016 tjado <https://github.com/tejado>'
protobuf_exist = False
protobuf_version = 0
try:
protobuf_version = pkg_resources.get_distribution("protobuf").version
protobuf_exist = True
except:
pass
if (not protobuf_exist) or (int(protobuf_version[:1]) < 3):
raise PleaseInstallProtobufVersion3()
from pgoapi.pgoapi import PGoApi
from pgoapi.rpc_api import RpcApi
from pgoapi.auth import Auth
logging.getLogger("pgoapi").addHandler(logging.NullHandler())
logging.getLogger("rpc_api").addHandler(logging.NullHandler())
logging.getLogger("utilities").addHandler(logging.NullHandler())
logging.getLogger("auth").addHandler(logging.NullHandler())
logging.getLogger("auth_ptc").addHandler(logging.NullHandler())
logging.getLogger("auth_google").addHandler(logging.NullHandler())
try:
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
except:
pass | mit |
daodaoliang/python-phonenumbers | python/phonenumbers/data/region_NE.py | 4 | 1866 | """Auto-generated file, do not edit by hand. NE metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_NE = PhoneMetadata(id='NE', country_code=227, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[0289]\\d{7}', possible_number_pattern='\\d{8}'),
fixed_line=PhoneNumberDesc(national_number_pattern='2(?:0(?:20|3[1-7]|4[134]|5[14]|6[14578]|7[1-578])|1(?:4[145]|5[14]|6[14-68]|7[169]|88))\\d{4}', possible_number_pattern='\\d{8}', example_number='20201234'),
mobile=PhoneNumberDesc(national_number_pattern='(?:8[89]|9\\d)\\d{6}', possible_number_pattern='\\d{8}', example_number='93123456'),
toll_free=PhoneNumberDesc(national_number_pattern='08\\d{6}', possible_number_pattern='\\d{8}', example_number='08123456'),
premium_rate=PhoneNumberDesc(national_number_pattern='09\\d{6}', possible_number_pattern='\\d{8}', example_number='09123456'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
number_format=[NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['[289]|09']),
NumberFormat(pattern='(08)(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['08'])],
leading_zero_possible=True)
| apache-2.0 |
chinghanyu/Cognet-RPi-linux | tools/perf/util/setup.py | 242 | 1531 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
liblk = getenv('LIBLK')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, liblk],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
sjwilks/mavlink | pymavlink/examples/apmsetrate.py | 11 | 2047 | #!/usr/bin/env python
'''
set stream rate on an APM
'''
import sys, struct, time, os
from optparse import OptionParser
parser = OptionParser("apmsetrate.py [options]")
parser.add_option("--baudrate", dest="baudrate", type='int',
help="master port baud rate", default=115200)
parser.add_option("--device", dest="device", default=None, help="serial device")
parser.add_option("--rate", dest="rate", default=4, type='int', help="requested stream rate")
parser.add_option("--source-system", dest='SOURCE_SYSTEM', type='int',
default=255, help='MAVLink source system for this GCS')
parser.add_option("--showmessages", dest="showmessages", action='store_true',
help="show incoming messages", default=False)
(opts, args) = parser.parse_args()
from pymavlink import mavutil
if opts.device is None:
print("You must specify a serial device")
sys.exit(1)
def wait_heartbeat(m):
'''wait for a heartbeat so we know the target system IDs'''
print("Waiting for APM heartbeat")
m.wait_heartbeat()
print("Heartbeat from APM (system %u component %u)" % (m.target_system, m.target_system))
def show_messages(m):
'''show incoming mavlink messages'''
while True:
msg = m.recv_match(blocking=True)
if not msg:
return
if msg.get_type() == "BAD_DATA":
if mavutil.all_printable(msg.data):
sys.stdout.write(msg.data)
sys.stdout.flush()
else:
print(msg)
# create a mavlink serial instance
master = mavutil.mavlink_connection(opts.device, baud=opts.baudrate)
# wait for the heartbeat msg to find the system ID
wait_heartbeat(master)
print("Sending all stream request for rate %u" % opts.rate)
for i in range(0, 3):
master.mav.request_data_stream_send(master.target_system, master.target_component,
mavutil.mavlink.MAV_DATA_STREAM_ALL, opts.rate, 1)
if opts.showmessages:
show_messages(master)
| lgpl-3.0 |
savoirfairelinux/OpenUpgrade | addons/product_visible_discount/product_visible_discount.py | 41 | 3950 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class product_pricelist(osv.osv):
_inherit = 'product.pricelist'
_columns ={
'visible_discount': fields.boolean('Visible Discount'),
}
_defaults = {
'visible_discount': True,
}
class sale_order_line(osv.osv):
_inherit = "sale.order.line"
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False,
fiscal_position=False, flag=False, context=None):
def get_real_price(res_dict, product_id, qty, uom, pricelist):
item_obj = self.pool.get('product.pricelist.item')
price_type_obj = self.pool.get('product.price.type')
product_obj = self.pool.get('product.product')
field_name = 'list_price'
product = product_obj.browse(cr, uid, product_id, context)
product_read = product_obj.read(cr, uid, product_id, [field_name], context=context)
factor = 1.0
if uom and uom != product.uom_id.id:
product_uom_obj = self.pool.get('product.uom')
uom_data = product_uom_obj.browse(cr, uid, product.uom_id.id)
factor = uom_data.factor
return product_read[field_name] * factor
res=super(sale_order_line, self).product_id_change(cr, uid, ids, pricelist, product, qty,
uom, qty_uos, uos, name, partner_id,
lang, update_tax, date_order, packaging=packaging, fiscal_position=fiscal_position, flag=flag, context=context)
context = {'lang': lang, 'partner_id': partner_id}
result=res['value']
pricelist_obj=self.pool.get('product.pricelist')
product_obj = self.pool.get('product.product')
if product:
if result.get('price_unit',False):
price=result['price_unit']
else:
return res
product = product_obj.browse(cr, uid, product, context)
list_price = pricelist_obj.price_get(cr, uid, [pricelist],
product.id, qty or 1.0, partner_id, {'uom': uom,'date': date_order })
pricelists = pricelist_obj.read(cr,uid,[pricelist],['visible_discount'])
new_list_price = get_real_price(list_price, product.id, qty, uom, pricelist)
if len(pricelists)>0 and pricelists[0]['visible_discount'] and list_price[pricelist] != 0 and new_list_price != 0:
discount = (new_list_price - price) / new_list_price * 100
if discount > 0:
result['price_unit'] = new_list_price
result['discount'] = discount
else:
result['discount'] = 0.0
else:
result['discount'] = 0.0
return res
| agpl-3.0 |
Tatsh-ansible/ansible-modules-core | network/netvisor/pn_vrouterbgp.py | 29 | 15078 | #!/usr/bin/python
""" PN-CLI vrouter-bgp-add/vrouter-bgp-remove/vrouter-bgp-modify """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import shlex
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: pn_vrouterbgp
author: "Pluribus Networks (@amitsi)"
version_added: "2.2"
version: 1.0
short_description: CLI command to add/remove/modify vrouter-bgp.
description:
- Execute vrouter-bgp-add, vrouter-bgp-remove, vrouter-bgp-modify command.
- Each fabric, cluster, standalone switch, or virtual network (VNET) can
provide its tenants with a vRouter service that forwards traffic between
networks and implements Layer 4 protocols.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the cli on.
required: False
state:
description:
- State the action to perform. Use 'present' to add bgp,
'absent' to remove bgp and 'update' to modify bgp.
required: True
choices: ['present', 'absent', 'update']
pn_vrouter_name:
description:
- Specify a name for the vRouter service.
required: True
pn_neighbor:
description:
- Specify a neighbor IP address to use for BGP.
- Required for vrouter-bgp-add.
pn_remote_as:
description:
- Specify the remote Autonomous System(AS) number. This value is between
1 and 4294967295.
- Required for vrouter-bgp-add.
pn_next_hop_self:
description:
- Specify if the next-hop is the same router or not.
pn_password:
description:
- Specify a password, if desired.
pn_ebgp:
description:
- Specify a value for external BGP to accept or attempt BGP connections
to external peers, not directly connected, on the network. This is a
value between 1 and 255.
pn_prefix_listin:
description:
- Specify the prefix list to filter traffic inbound.
pn_prefix_listout:
description:
- Specify the prefix list to filter traffic outbound.
pn_route_reflector:
description:
- Specify if a route reflector client is used.
pn_override_capability:
description:
- Specify if you want to override capability.
pn_soft_reconfig:
description:
- Specify if you want a soft reconfiguration of inbound traffic.
pn_max_prefix:
description:
- Specify the maximum number of prefixes.
pn_max_prefix_warn:
description:
- Specify if you want a warning message when the maximum number of
prefixes is exceeded.
pn_bfd:
description:
- Specify if you want BFD protocol support for fault detection.
pn_multiprotocol:
description:
- Specify a multi-protocol for BGP.
choices: ['ipv4-unicast', 'ipv6-unicast']
pn_weight:
description:
- Specify a default weight value between 0 and 65535 for the neighbor
routes.
pn_default_originate:
description:
- Specify if you want announce default routes to the neighbor or not.
pn_keepalive:
description:
- Specify BGP neighbor keepalive interval in seconds.
pn_holdtime:
description:
- Specify BGP neighbor holdtime in seconds.
pn_route_mapin:
description:
- Specify inbound route map for neighbor.
pn_route_mapout:
description:
- Specify outbound route map for neighbor.
"""
EXAMPLES = """
- name: add vrouter-bgp
pn_vrouterbgp:
state: 'present'
pn_vrouter_name: 'ansible-vrouter'
pn_neighbor: 104.104.104.1
pn_remote_as: 1800
- name: remove vrouter-bgp
pn_vrouterbgp:
state: 'absent'
pn_name: 'ansible-vrouter'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
stdout:
description: The set of responses from the vrouterbpg command.
returned: always
type: list
stderr:
description: The set of error responses from the vrouterbgp command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
VROUTER_EXISTS = None
NEIGHBOR_EXISTS = None
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
"""
This method checks if vRouter exists on the target node.
This method also checks for idempotency using the vrouter-bgp-show command.
If the given vRouter exists, return VROUTER_EXISTS as True else False.
If a BGP neighbor with the given ip exists on the given vRouter,
return NEIGHBOR_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Global Booleans: VROUTER_EXISTS, NEIGHBOR_EXISTS
"""
vrouter_name = module.params['pn_vrouter_name']
neighbor = module.params['pn_neighbor']
# Global flags
global VROUTER_EXISTS, NEIGHBOR_EXISTS
# Check for vRouter
check_vrouter = cli + ' vrouter-show format name no-show-headers '
check_vrouter = shlex.split(check_vrouter)
out = module.run_command(check_vrouter)[1]
out = out.split()
if vrouter_name in out:
VROUTER_EXISTS = True
else:
VROUTER_EXISTS = False
# Check for BGP neighbors
show = cli + ' vrouter-bgp-show vrouter-name %s ' % vrouter_name
show += 'format neighbor no-show-headers'
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
if neighbor in out:
NEIGHBOR_EXISTS = True
else:
NEIGHBOR_EXISTS = False
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'vrouter-bgp-add'
if state == 'absent':
command = 'vrouter-bgp-remove'
if state == 'update':
command = 'vrouter-bgp-modify'
return command
def main():
""" This portion is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state=dict(required=True, type='str',
choices=['present', 'absent', 'update']),
pn_vrouter_name=dict(required=True, type='str'),
pn_neighbor=dict(type='str'),
pn_remote_as=dict(type='str'),
pn_next_hop_self=dict(type='bool'),
pn_password=dict(type='str', no_log=True),
pn_ebgp=dict(type='int'),
pn_prefix_listin=dict(type='str'),
pn_prefix_listout=dict(type='str'),
pn_route_reflector=dict(type='bool'),
pn_override_capability=dict(type='bool'),
pn_soft_reconfig=dict(type='bool'),
pn_max_prefix=dict(type='int'),
pn_max_prefix_warn=dict(type='bool'),
pn_bfd=dict(type='bool'),
pn_multiprotocol=dict(type='str',
choices=['ipv4-unicast', 'ipv6-unicast']),
pn_weight=dict(type='int'),
pn_default_originate=dict(type='bool'),
pn_keepalive=dict(type='str'),
pn_holdtime=dict(type='str'),
pn_route_mapin=dict(type='str'),
pn_route_mapout=dict(type='str')
),
required_if=(
["state", "present",
["pn_vrouter_name", "pn_neighbor", "pn_remote_as"]],
["state", "absent",
["pn_vrouter_name", "pn_neighbor"]],
["state", "update",
["pn_vrouter_name", "pn_neighbor"]]
)
)
# Accessing the arguments
state= module.params['state']
vrouter_name = module.params['pn_vrouter_name']
neighbor = module.params['pn_neighbor']
remote_as = module.params['pn_remote_as']
next_hop_self = module.params['pn_next_hop_self']
password = module.params['pn_password']
ebgp = module.params['pn_ebgp']
prefix_listin = module.params['pn_prefix_listin']
prefix_listout = module.params['pn_prefix_listout']
route_reflector = module.params['pn_route_reflector']
override_capability = module.params['pn_override_capability']
soft_reconfig = module.params['pn_soft_reconfig']
max_prefix = module.params['pn_max_prefix']
max_prefix_warn = module.params['pn_max_prefix_warn']
bfd = module.params['pn_bfd']
multiprotocol = module.params['pn_multiprotocol']
weight = module.params['pn_weight']
default_originate = module.params['pn_default_originate']
keepalive = module.params['pn_keepalive']
holdtime = module.params['pn_holdtime']
route_mapin = module.params['pn_route_mapin']
route_mapout = module.params['pn_route_mapout']
# Building the CLI command string
cli = pn_cli(module)
command = get_command_from_state(state)
if command == 'vrouter-bgp-remove':
check_cli(module, cli)
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if NEIGHBOR_EXISTS is False:
module.exit_json(
skipped=True,
msg=('BGP neighbor with IP %s does not exist on %s'
% (neighbor, vrouter_name))
)
cli += (' %s vrouter-name %s neighbor %s '
% (command, vrouter_name, neighbor))
else:
if command == 'vrouter-bgp-add':
check_cli(module, cli)
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if NEIGHBOR_EXISTS is True:
module.exit_json(
skipped=True,
msg=('BGP neighbor with IP %s already exists on %s'
% (neighbor, vrouter_name))
)
cli += (' %s vrouter-name %s neighbor %s '
% (command, vrouter_name, neighbor))
if remote_as:
cli += ' remote-as ' + str(remote_as)
if next_hop_self is True:
cli += ' next-hop-self '
if next_hop_self is False:
cli += ' no-next-hop-self '
if password:
cli += ' password ' + password
if ebgp:
cli += ' ebgp-multihop ' + str(ebgp)
if prefix_listin:
cli += ' prefix-list-in ' + prefix_listin
if prefix_listout:
cli += ' prefix-list-out ' + prefix_listout
if route_reflector is True:
cli += ' route-reflector-client '
if route_reflector is False:
cli += ' no-route-reflector-client '
if override_capability is True:
cli += ' override-capability '
if override_capability is False:
cli += ' no-override-capability '
if soft_reconfig is True:
cli += ' soft-reconfig-inbound '
if soft_reconfig is False:
cli += ' no-soft-reconfig-inbound '
if max_prefix:
cli += ' max-prefix ' + str(max_prefix)
if max_prefix_warn is True:
cli += ' max-prefix-warn-only '
if max_prefix_warn is False:
cli += ' no-max-prefix-warn-only '
if bfd is True:
cli += ' bfd '
if bfd is False:
cli += ' no-bfd '
if multiprotocol:
cli += ' multi-protocol ' + multiprotocol
if weight:
cli += ' weight ' + str(weight)
if default_originate is True:
cli += ' default-originate '
if default_originate is False:
cli += ' no-default-originate '
if keepalive:
cli += ' neighbor-keepalive-interval ' + keepalive
if holdtime:
cli += ' neighbor-holdtime ' + holdtime
if route_mapin:
cli += ' route-map-in ' + route_mapin
if route_mapout:
cli += ' route-map-out ' + route_mapout
run_cli(module, cli)
# Ansible boiler-plate
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| gpl-3.0 |
embeddedarm/linux-3.14-pxa16x | tools/perf/scripts/python/net_dropmonitor.py | 2669 | 1738 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
hynnet/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/lib2to3/tests/test_pytree.py | 131 | 17346 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Unit tests for pytree.py.
NOTE: Please *don't* add doc strings to individual test methods!
In verbose mode, printing of the module, class and method name is much
more helpful than printing of (the first line of) the docstring,
especially when debugging a test.
"""
from __future__ import with_statement
import sys
import warnings
# Testing imports
from . import support
from lib2to3 import pytree
try:
sorted
except NameError:
def sorted(lst):
l = list(lst)
l.sort()
return l
class TestNodes(support.TestCase):
"""Unit tests for nodes (Base, Leaf, Node)."""
if sys.version_info >= (2,6):
# warnings.catch_warnings is new in 2.6.
def test_deprecated_prefix_methods(self):
l = pytree.Leaf(100, "foo")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DeprecationWarning)
self.assertEqual(l.get_prefix(), "")
l.set_prefix("hi")
self.assertEqual(l.prefix, "hi")
self.assertEqual(len(w), 2)
for warning in w:
self.assertTrue(warning.category is DeprecationWarning)
self.assertEqual(str(w[0].message), "get_prefix() is deprecated; " \
"use the prefix property")
self.assertEqual(str(w[1].message), "set_prefix() is deprecated; " \
"use the prefix property")
def test_instantiate_base(self):
if __debug__:
# Test that instantiating Base() raises an AssertionError
self.assertRaises(AssertionError, pytree.Base)
def test_leaf(self):
l1 = pytree.Leaf(100, "foo")
self.assertEqual(l1.type, 100)
self.assertEqual(l1.value, "foo")
def test_leaf_repr(self):
l1 = pytree.Leaf(100, "foo")
self.assertEqual(repr(l1), "Leaf(100, 'foo')")
def test_leaf_str(self):
l1 = pytree.Leaf(100, "foo")
self.assertEqual(str(l1), "foo")
l2 = pytree.Leaf(100, "foo", context=(" ", (10, 1)))
self.assertEqual(str(l2), " foo")
def test_leaf_str_numeric_value(self):
# Make sure that the Leaf's value is stringified. Failing to
# do this can cause a TypeError in certain situations.
l1 = pytree.Leaf(2, 5)
l1.prefix = "foo_"
self.assertEqual(str(l1), "foo_5")
def test_leaf_equality(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "foo", context=(" ", (1, 0)))
self.assertEqual(l1, l2)
l3 = pytree.Leaf(101, "foo")
l4 = pytree.Leaf(100, "bar")
self.assertNotEqual(l1, l3)
self.assertNotEqual(l1, l4)
def test_leaf_prefix(self):
l1 = pytree.Leaf(100, "foo")
self.assertEqual(l1.prefix, "")
self.assertFalse(l1.was_changed)
l1.prefix = " ##\n\n"
self.assertEqual(l1.prefix, " ##\n\n")
self.assertTrue(l1.was_changed)
def test_node(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(200, "bar")
n1 = pytree.Node(1000, [l1, l2])
self.assertEqual(n1.type, 1000)
self.assertEqual(n1.children, [l1, l2])
def test_node_repr(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "bar", context=(" ", (1, 0)))
n1 = pytree.Node(1000, [l1, l2])
self.assertEqual(repr(n1),
"Node(1000, [%s, %s])" % (repr(l1), repr(l2)))
def test_node_str(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "bar", context=(" ", (1, 0)))
n1 = pytree.Node(1000, [l1, l2])
self.assertEqual(str(n1), "foo bar")
def test_node_prefix(self):
l1 = pytree.Leaf(100, "foo")
self.assertEqual(l1.prefix, "")
n1 = pytree.Node(1000, [l1])
self.assertEqual(n1.prefix, "")
n1.prefix = " "
self.assertEqual(n1.prefix, " ")
self.assertEqual(l1.prefix, " ")
def test_get_suffix(self):
l1 = pytree.Leaf(100, "foo", prefix="a")
l2 = pytree.Leaf(100, "bar", prefix="b")
n1 = pytree.Node(1000, [l1, l2])
self.assertEqual(l1.get_suffix(), l2.prefix)
self.assertEqual(l2.get_suffix(), "")
self.assertEqual(n1.get_suffix(), "")
l3 = pytree.Leaf(100, "bar", prefix="c")
n2 = pytree.Node(1000, [n1, l3])
self.assertEqual(n1.get_suffix(), l3.prefix)
self.assertEqual(l3.get_suffix(), "")
self.assertEqual(n2.get_suffix(), "")
def test_node_equality(self):
n1 = pytree.Node(1000, ())
n2 = pytree.Node(1000, [], context=(" ", (1, 0)))
self.assertEqual(n1, n2)
n3 = pytree.Node(1001, ())
self.assertNotEqual(n1, n3)
def test_node_recursive_equality(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "foo")
n1 = pytree.Node(1000, [l1])
n2 = pytree.Node(1000, [l2])
self.assertEqual(n1, n2)
l3 = pytree.Leaf(100, "bar")
n3 = pytree.Node(1000, [l3])
self.assertNotEqual(n1, n3)
def test_replace(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "+")
l3 = pytree.Leaf(100, "bar")
n1 = pytree.Node(1000, [l1, l2, l3])
self.assertEqual(n1.children, [l1, l2, l3])
self.assertTrue(isinstance(n1.children, list))
self.assertFalse(n1.was_changed)
l2new = pytree.Leaf(100, "-")
l2.replace(l2new)
self.assertEqual(n1.children, [l1, l2new, l3])
self.assertTrue(isinstance(n1.children, list))
self.assertTrue(n1.was_changed)
def test_replace_with_list(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "+")
l3 = pytree.Leaf(100, "bar")
n1 = pytree.Node(1000, [l1, l2, l3])
l2.replace([pytree.Leaf(100, "*"), pytree.Leaf(100, "*")])
self.assertEqual(str(n1), "foo**bar")
self.assertTrue(isinstance(n1.children, list))
def test_leaves(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "bar")
l3 = pytree.Leaf(100, "fooey")
n2 = pytree.Node(1000, [l1, l2])
n3 = pytree.Node(1000, [l3])
n1 = pytree.Node(1000, [n2, n3])
self.assertEqual(list(n1.leaves()), [l1, l2, l3])
def test_depth(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "bar")
n2 = pytree.Node(1000, [l1, l2])
n3 = pytree.Node(1000, [])
n1 = pytree.Node(1000, [n2, n3])
self.assertEqual(l1.depth(), 2)
self.assertEqual(n3.depth(), 1)
self.assertEqual(n1.depth(), 0)
def test_post_order(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "bar")
l3 = pytree.Leaf(100, "fooey")
c1 = pytree.Node(1000, [l1, l2])
n1 = pytree.Node(1000, [c1, l3])
self.assertEqual(list(n1.post_order()), [l1, l2, c1, l3, n1])
def test_pre_order(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "bar")
l3 = pytree.Leaf(100, "fooey")
c1 = pytree.Node(1000, [l1, l2])
n1 = pytree.Node(1000, [c1, l3])
self.assertEqual(list(n1.pre_order()), [n1, c1, l1, l2, l3])
def test_changed(self):
l1 = pytree.Leaf(100, "f")
self.assertFalse(l1.was_changed)
l1.changed()
self.assertTrue(l1.was_changed)
l1 = pytree.Leaf(100, "f")
n1 = pytree.Node(1000, [l1])
self.assertFalse(n1.was_changed)
n1.changed()
self.assertTrue(n1.was_changed)
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "+")
l3 = pytree.Leaf(100, "bar")
n1 = pytree.Node(1000, [l1, l2, l3])
n2 = pytree.Node(1000, [n1])
self.assertFalse(l1.was_changed)
self.assertFalse(n1.was_changed)
self.assertFalse(n2.was_changed)
n1.changed()
self.assertTrue(n1.was_changed)
self.assertTrue(n2.was_changed)
self.assertFalse(l1.was_changed)
def test_leaf_constructor_prefix(self):
for prefix in ("xyz_", ""):
l1 = pytree.Leaf(100, "self", prefix=prefix)
self.assertTrue(str(l1), prefix + "self")
self.assertEqual(l1.prefix, prefix)
def test_node_constructor_prefix(self):
for prefix in ("xyz_", ""):
l1 = pytree.Leaf(100, "self")
l2 = pytree.Leaf(100, "foo", prefix="_")
n1 = pytree.Node(1000, [l1, l2], prefix=prefix)
self.assertTrue(str(n1), prefix + "self_foo")
self.assertEqual(n1.prefix, prefix)
self.assertEqual(l1.prefix, prefix)
self.assertEqual(l2.prefix, "_")
def test_remove(self):
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "foo")
n1 = pytree.Node(1000, [l1, l2])
n2 = pytree.Node(1000, [n1])
self.assertEqual(n1.remove(), 0)
self.assertEqual(n2.children, [])
self.assertEqual(l1.parent, n1)
self.assertEqual(n1.parent, None)
self.assertEqual(n2.parent, None)
self.assertFalse(n1.was_changed)
self.assertTrue(n2.was_changed)
self.assertEqual(l2.remove(), 1)
self.assertEqual(l1.remove(), 0)
self.assertEqual(n1.children, [])
self.assertEqual(l1.parent, None)
self.assertEqual(n1.parent, None)
self.assertEqual(n2.parent, None)
self.assertTrue(n1.was_changed)
self.assertTrue(n2.was_changed)
def test_remove_parentless(self):
n1 = pytree.Node(1000, [])
n1.remove()
self.assertEqual(n1.parent, None)
l1 = pytree.Leaf(100, "foo")
l1.remove()
self.assertEqual(l1.parent, None)
def test_node_set_child(self):
l1 = pytree.Leaf(100, "foo")
n1 = pytree.Node(1000, [l1])
l2 = pytree.Leaf(100, "bar")
n1.set_child(0, l2)
self.assertEqual(l1.parent, None)
self.assertEqual(l2.parent, n1)
self.assertEqual(n1.children, [l2])
n2 = pytree.Node(1000, [l1])
n2.set_child(0, n1)
self.assertEqual(l1.parent, None)
self.assertEqual(n1.parent, n2)
self.assertEqual(n2.parent, None)
self.assertEqual(n2.children, [n1])
self.assertRaises(IndexError, n1.set_child, 4, l2)
# I don't care what it raises, so long as it's an exception
self.assertRaises(Exception, n1.set_child, 0, list)
def test_node_insert_child(self):
l1 = pytree.Leaf(100, "foo")
n1 = pytree.Node(1000, [l1])
l2 = pytree.Leaf(100, "bar")
n1.insert_child(0, l2)
self.assertEqual(l2.parent, n1)
self.assertEqual(n1.children, [l2, l1])
l3 = pytree.Leaf(100, "abc")
n1.insert_child(2, l3)
self.assertEqual(n1.children, [l2, l1, l3])
# I don't care what it raises, so long as it's an exception
self.assertRaises(Exception, n1.insert_child, 0, list)
def test_node_append_child(self):
n1 = pytree.Node(1000, [])
l1 = pytree.Leaf(100, "foo")
n1.append_child(l1)
self.assertEqual(l1.parent, n1)
self.assertEqual(n1.children, [l1])
l2 = pytree.Leaf(100, "bar")
n1.append_child(l2)
self.assertEqual(l2.parent, n1)
self.assertEqual(n1.children, [l1, l2])
# I don't care what it raises, so long as it's an exception
self.assertRaises(Exception, n1.append_child, list)
def test_node_next_sibling(self):
n1 = pytree.Node(1000, [])
n2 = pytree.Node(1000, [])
p1 = pytree.Node(1000, [n1, n2])
self.assertTrue(n1.next_sibling is n2)
self.assertEqual(n2.next_sibling, None)
self.assertEqual(p1.next_sibling, None)
def test_leaf_next_sibling(self):
l1 = pytree.Leaf(100, "a")
l2 = pytree.Leaf(100, "b")
p1 = pytree.Node(1000, [l1, l2])
self.assertTrue(l1.next_sibling is l2)
self.assertEqual(l2.next_sibling, None)
self.assertEqual(p1.next_sibling, None)
def test_node_prev_sibling(self):
n1 = pytree.Node(1000, [])
n2 = pytree.Node(1000, [])
p1 = pytree.Node(1000, [n1, n2])
self.assertTrue(n2.prev_sibling is n1)
self.assertEqual(n1.prev_sibling, None)
self.assertEqual(p1.prev_sibling, None)
def test_leaf_prev_sibling(self):
l1 = pytree.Leaf(100, "a")
l2 = pytree.Leaf(100, "b")
p1 = pytree.Node(1000, [l1, l2])
self.assertTrue(l2.prev_sibling is l1)
self.assertEqual(l1.prev_sibling, None)
self.assertEqual(p1.prev_sibling, None)
class TestPatterns(support.TestCase):
"""Unit tests for tree matching patterns."""
def test_basic_patterns(self):
# Build a tree
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "bar")
l3 = pytree.Leaf(100, "foo")
n1 = pytree.Node(1000, [l1, l2])
n2 = pytree.Node(1000, [l3])
root = pytree.Node(1000, [n1, n2])
# Build a pattern matching a leaf
pl = pytree.LeafPattern(100, "foo", name="pl")
r = {}
self.assertFalse(pl.match(root, results=r))
self.assertEqual(r, {})
self.assertFalse(pl.match(n1, results=r))
self.assertEqual(r, {})
self.assertFalse(pl.match(n2, results=r))
self.assertEqual(r, {})
self.assertTrue(pl.match(l1, results=r))
self.assertEqual(r, {"pl": l1})
r = {}
self.assertFalse(pl.match(l2, results=r))
self.assertEqual(r, {})
# Build a pattern matching a node
pn = pytree.NodePattern(1000, [pl], name="pn")
self.assertFalse(pn.match(root, results=r))
self.assertEqual(r, {})
self.assertFalse(pn.match(n1, results=r))
self.assertEqual(r, {})
self.assertTrue(pn.match(n2, results=r))
self.assertEqual(r, {"pn": n2, "pl": l3})
r = {}
self.assertFalse(pn.match(l1, results=r))
self.assertEqual(r, {})
self.assertFalse(pn.match(l2, results=r))
self.assertEqual(r, {})
def test_wildcard(self):
# Build a tree for testing
l1 = pytree.Leaf(100, "foo")
l2 = pytree.Leaf(100, "bar")
l3 = pytree.Leaf(100, "foo")
n1 = pytree.Node(1000, [l1, l2])
n2 = pytree.Node(1000, [l3])
root = pytree.Node(1000, [n1, n2])
# Build a pattern
pl = pytree.LeafPattern(100, "foo", name="pl")
pn = pytree.NodePattern(1000, [pl], name="pn")
pw = pytree.WildcardPattern([[pn], [pl, pl]], name="pw")
r = {}
self.assertFalse(pw.match_seq([root], r))
self.assertEqual(r, {})
self.assertFalse(pw.match_seq([n1], r))
self.assertEqual(r, {})
self.assertTrue(pw.match_seq([n2], r))
# These are easier to debug
self.assertEqual(sorted(r.keys()), ["pl", "pn", "pw"])
self.assertEqual(r["pl"], l1)
self.assertEqual(r["pn"], n2)
self.assertEqual(r["pw"], [n2])
# But this is equivalent
self.assertEqual(r, {"pl": l1, "pn": n2, "pw": [n2]})
r = {}
self.assertTrue(pw.match_seq([l1, l3], r))
self.assertEqual(r, {"pl": l3, "pw": [l1, l3]})
self.assertTrue(r["pl"] is l3)
r = {}
def test_generate_matches(self):
la = pytree.Leaf(1, "a")
lb = pytree.Leaf(1, "b")
lc = pytree.Leaf(1, "c")
ld = pytree.Leaf(1, "d")
le = pytree.Leaf(1, "e")
lf = pytree.Leaf(1, "f")
leaves = [la, lb, lc, ld, le, lf]
root = pytree.Node(1000, leaves)
pa = pytree.LeafPattern(1, "a", "pa")
pb = pytree.LeafPattern(1, "b", "pb")
pc = pytree.LeafPattern(1, "c", "pc")
pd = pytree.LeafPattern(1, "d", "pd")
pe = pytree.LeafPattern(1, "e", "pe")
pf = pytree.LeafPattern(1, "f", "pf")
pw = pytree.WildcardPattern([[pa, pb, pc], [pd, pe],
[pa, pb], [pc, pd], [pe, pf]],
min=1, max=4, name="pw")
self.assertEqual([x[0] for x in pw.generate_matches(leaves)],
[3, 5, 2, 4, 6])
pr = pytree.NodePattern(type=1000, content=[pw], name="pr")
matches = list(pytree.generate_matches([pr], [root]))
self.assertEqual(len(matches), 1)
c, r = matches[0]
self.assertEqual(c, 1)
self.assertEqual(str(r["pr"]), "abcdef")
self.assertEqual(r["pw"], [la, lb, lc, ld, le, lf])
for c in "abcdef":
self.assertEqual(r["p" + c], pytree.Leaf(1, c))
def test_has_key_example(self):
pattern = pytree.NodePattern(331,
(pytree.LeafPattern(7),
pytree.WildcardPattern(name="args"),
pytree.LeafPattern(8)))
l1 = pytree.Leaf(7, "(")
l2 = pytree.Leaf(3, "x")
l3 = pytree.Leaf(8, ")")
node = pytree.Node(331, [l1, l2, l3])
r = {}
self.assertTrue(pattern.match(node, r))
self.assertEqual(r["args"], [l2])
| gpl-2.0 |
cpc26/python-oauth2 | example/client.py | 375 | 6700 | """
The MIT License
Copyright (c) 2007 Leah Culver
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Example consumer. This is not recommended for production.
Instead, you'll want to create your own subclass of OAuthClient
or find one that works with your web framework.
"""
import httplib
import time
import oauth.oauth as oauth
# settings for the local test consumer
SERVER = 'localhost'
PORT = 8080
# fake urls for the test server (matches ones in server.py)
REQUEST_TOKEN_URL = 'https://photos.example.net/request_token'
ACCESS_TOKEN_URL = 'https://photos.example.net/access_token'
AUTHORIZATION_URL = 'https://photos.example.net/authorize'
CALLBACK_URL = 'http://printer.example.com/request_token_ready'
RESOURCE_URL = 'http://photos.example.net/photos'
# key and secret granted by the service provider for this consumer application - same as the MockOAuthDataStore
CONSUMER_KEY = 'key'
CONSUMER_SECRET = 'secret'
# example client using httplib with headers
class SimpleOAuthClient(oauth.OAuthClient):
def __init__(self, server, port=httplib.HTTP_PORT, request_token_url='', access_token_url='', authorization_url=''):
self.server = server
self.port = port
self.request_token_url = request_token_url
self.access_token_url = access_token_url
self.authorization_url = authorization_url
self.connection = httplib.HTTPConnection("%s:%d" % (self.server, self.port))
def fetch_request_token(self, oauth_request):
# via headers
# -> OAuthToken
self.connection.request(oauth_request.http_method, self.request_token_url, headers=oauth_request.to_header())
response = self.connection.getresponse()
return oauth.OAuthToken.from_string(response.read())
def fetch_access_token(self, oauth_request):
# via headers
# -> OAuthToken
self.connection.request(oauth_request.http_method, self.access_token_url, headers=oauth_request.to_header())
response = self.connection.getresponse()
return oauth.OAuthToken.from_string(response.read())
def authorize_token(self, oauth_request):
# via url
# -> typically just some okay response
self.connection.request(oauth_request.http_method, oauth_request.to_url())
response = self.connection.getresponse()
return response.read()
def access_resource(self, oauth_request):
# via post body
# -> some protected resources
headers = {'Content-Type' :'application/x-www-form-urlencoded'}
self.connection.request('POST', RESOURCE_URL, body=oauth_request.to_postdata(), headers=headers)
response = self.connection.getresponse()
return response.read()
def run_example():
# setup
print '** OAuth Python Library Example **'
client = SimpleOAuthClient(SERVER, PORT, REQUEST_TOKEN_URL, ACCESS_TOKEN_URL, AUTHORIZATION_URL)
consumer = oauth.OAuthConsumer(CONSUMER_KEY, CONSUMER_SECRET)
signature_method_plaintext = oauth.OAuthSignatureMethod_PLAINTEXT()
signature_method_hmac_sha1 = oauth.OAuthSignatureMethod_HMAC_SHA1()
pause()
# get request token
print '* Obtain a request token ...'
pause()
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer, callback=CALLBACK_URL, http_url=client.request_token_url)
oauth_request.sign_request(signature_method_plaintext, consumer, None)
print 'REQUEST (via headers)'
print 'parameters: %s' % str(oauth_request.parameters)
pause()
token = client.fetch_request_token(oauth_request)
print 'GOT'
print 'key: %s' % str(token.key)
print 'secret: %s' % str(token.secret)
print 'callback confirmed? %s' % str(token.callback_confirmed)
pause()
print '* Authorize the request token ...'
pause()
oauth_request = oauth.OAuthRequest.from_token_and_callback(token=token, http_url=client.authorization_url)
print 'REQUEST (via url query string)'
print 'parameters: %s' % str(oauth_request.parameters)
pause()
# this will actually occur only on some callback
response = client.authorize_token(oauth_request)
print 'GOT'
print response
# sad way to get the verifier
import urlparse, cgi
query = urlparse.urlparse(response)[4]
params = cgi.parse_qs(query, keep_blank_values=False)
verifier = params['oauth_verifier'][0]
print 'verifier: %s' % verifier
pause()
# get access token
print '* Obtain an access token ...'
pause()
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, verifier=verifier, http_url=client.access_token_url)
oauth_request.sign_request(signature_method_plaintext, consumer, token)
print 'REQUEST (via headers)'
print 'parameters: %s' % str(oauth_request.parameters)
pause()
token = client.fetch_access_token(oauth_request)
print 'GOT'
print 'key: %s' % str(token.key)
print 'secret: %s' % str(token.secret)
pause()
# access some protected resources
print '* Access protected resources ...'
pause()
parameters = {'file': 'vacation.jpg', 'size': 'original'} # resource specific params
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=token, http_method='POST', http_url=RESOURCE_URL, parameters=parameters)
oauth_request.sign_request(signature_method_hmac_sha1, consumer, token)
print 'REQUEST (via post body)'
print 'parameters: %s' % str(oauth_request.parameters)
pause()
params = client.access_resource(oauth_request)
print 'GOT'
print 'non-oauth parameters: %s' % params
pause()
def pause():
print ''
time.sleep(1)
if __name__ == '__main__':
run_example()
print 'Done.' | mit |
SujaySKumar/django | tests/invalid_models_tests/test_models.py | 144 | 24524 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import unittest
from django.conf import settings
from django.core.checks import Error
from django.db import connections, models
from django.test.utils import override_settings
from .base import IsolatedModelsTestCase
def get_max_column_name_length():
allowed_len = None
db_alias = None
for db in settings.DATABASES.keys():
connection = connections[db]
max_name_length = connection.ops.max_name_length()
if max_name_length is None or connection.features.truncates_names:
continue
else:
if allowed_len is None:
allowed_len = max_name_length
db_alias = db
elif max_name_length < allowed_len:
allowed_len = max_name_length
db_alias = db
return (allowed_len, db_alias)
class IndexTogetherTests(IsolatedModelsTestCase):
def test_non_iterable(self):
class Model(models.Model):
class Meta:
index_together = 42
errors = Model.check()
expected = [
Error(
"'index_together' must be a list or tuple.",
hint=None,
obj=Model,
id='models.E008',
),
]
self.assertEqual(errors, expected)
def test_non_list(self):
class Model(models.Model):
class Meta:
index_together = 'not-a-list'
errors = Model.check()
expected = [
Error(
"'index_together' must be a list or tuple.",
hint=None,
obj=Model,
id='models.E008',
),
]
self.assertEqual(errors, expected)
def test_list_containing_non_iterable(self):
class Model(models.Model):
class Meta:
index_together = [('a', 'b'), 42]
errors = Model.check()
expected = [
Error(
"All 'index_together' elements must be lists or tuples.",
hint=None,
obj=Model,
id='models.E009',
),
]
self.assertEqual(errors, expected)
def test_pointing_to_missing_field(self):
class Model(models.Model):
class Meta:
index_together = [
["missing_field"],
]
errors = Model.check()
expected = [
Error(
"'index_together' refers to the non-existent field 'missing_field'.",
hint=None,
obj=Model,
id='models.E012',
),
]
self.assertEqual(errors, expected)
def test_pointing_to_non_local_field(self):
class Foo(models.Model):
field1 = models.IntegerField()
class Bar(Foo):
field2 = models.IntegerField()
class Meta:
index_together = [
["field2", "field1"],
]
errors = Bar.check()
expected = [
Error(
"'index_together' refers to field 'field1' which is not "
"local to model 'Bar'.",
hint=("This issue may be caused by multi-table inheritance."),
obj=Bar,
id='models.E016',
),
]
self.assertEqual(errors, expected)
def test_pointing_to_m2m_field(self):
class Model(models.Model):
m2m = models.ManyToManyField('self')
class Meta:
index_together = [
["m2m"],
]
errors = Model.check()
expected = [
Error(
"'index_together' refers to a ManyToManyField 'm2m', but "
"ManyToManyFields are not permitted in 'index_together'.",
hint=None,
obj=Model,
id='models.E013',
),
]
self.assertEqual(errors, expected)
# unique_together tests are very similar to index_together tests.
class UniqueTogetherTests(IsolatedModelsTestCase):
def test_non_iterable(self):
class Model(models.Model):
class Meta:
unique_together = 42
errors = Model.check()
expected = [
Error(
"'unique_together' must be a list or tuple.",
hint=None,
obj=Model,
id='models.E010',
),
]
self.assertEqual(errors, expected)
def test_list_containing_non_iterable(self):
class Model(models.Model):
one = models.IntegerField()
two = models.IntegerField()
class Meta:
unique_together = [('a', 'b'), 42]
errors = Model.check()
expected = [
Error(
"All 'unique_together' elements must be lists or tuples.",
hint=None,
obj=Model,
id='models.E011',
),
]
self.assertEqual(errors, expected)
def test_non_list(self):
class Model(models.Model):
class Meta:
unique_together = 'not-a-list'
errors = Model.check()
expected = [
Error(
"'unique_together' must be a list or tuple.",
hint=None,
obj=Model,
id='models.E010',
),
]
self.assertEqual(errors, expected)
def test_valid_model(self):
class Model(models.Model):
one = models.IntegerField()
two = models.IntegerField()
class Meta:
# unique_together can be a simple tuple
unique_together = ('one', 'two')
errors = Model.check()
self.assertEqual(errors, [])
def test_pointing_to_missing_field(self):
class Model(models.Model):
class Meta:
unique_together = [
["missing_field"],
]
errors = Model.check()
expected = [
Error(
"'unique_together' refers to the non-existent field 'missing_field'.",
hint=None,
obj=Model,
id='models.E012',
),
]
self.assertEqual(errors, expected)
def test_pointing_to_m2m(self):
class Model(models.Model):
m2m = models.ManyToManyField('self')
class Meta:
unique_together = [
["m2m"],
]
errors = Model.check()
expected = [
Error(
"'unique_together' refers to a ManyToManyField 'm2m', but "
"ManyToManyFields are not permitted in 'unique_together'.",
hint=None,
obj=Model,
id='models.E013',
),
]
self.assertEqual(errors, expected)
class FieldNamesTests(IsolatedModelsTestCase):
def test_ending_with_underscore(self):
class Model(models.Model):
field_ = models.CharField(max_length=10)
m2m_ = models.ManyToManyField('self')
errors = Model.check()
expected = [
Error(
'Field names must not end with an underscore.',
hint=None,
obj=Model._meta.get_field('field_'),
id='fields.E001',
),
Error(
'Field names must not end with an underscore.',
hint=None,
obj=Model._meta.get_field('m2m_'),
id='fields.E001',
),
]
self.assertEqual(errors, expected)
max_column_name_length, column_limit_db_alias = get_max_column_name_length()
@unittest.skipIf(max_column_name_length is None,
"The database doesn't have a column name length limit.")
def test_M2M_long_column_name(self):
"""
#13711 -- Model check for long M2M column names when database has
column name length limits.
"""
allowed_len, db_alias = get_max_column_name_length()
# A model with very long name which will be used to set relations to.
class VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz(models.Model):
title = models.CharField(max_length=11)
# Main model for which checks will be performed.
class ModelWithLongField(models.Model):
m2m_field = models.ManyToManyField(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
related_name="rn1"
)
m2m_field2 = models.ManyToManyField(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
related_name="rn2", through='m2msimple'
)
m2m_field3 = models.ManyToManyField(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
related_name="rn3",
through='m2mcomplex'
)
fk = models.ForeignKey(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
models.CASCADE,
related_name="rn4",
)
# Models used for setting `through` in M2M field.
class m2msimple(models.Model):
id2 = models.ForeignKey(ModelWithLongField, models.CASCADE)
class m2mcomplex(models.Model):
id2 = models.ForeignKey(ModelWithLongField, models.CASCADE)
long_field_name = 'a' * (self.max_column_name_length + 1)
models.ForeignKey(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
models.CASCADE,
).contribute_to_class(m2msimple, long_field_name)
models.ForeignKey(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
models.CASCADE,
db_column=long_field_name
).contribute_to_class(m2mcomplex, long_field_name)
errors = ModelWithLongField.check()
# First error because of M2M field set on the model with long name.
m2m_long_name = "verylongmodelnamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz_id"
if self.max_column_name_length > len(m2m_long_name):
# Some databases support names longer than the test name.
expected = []
else:
expected = [
Error(
'Autogenerated column name too long for M2M field "%s". '
'Maximum length is "%s" for database "%s".'
% (m2m_long_name, self.max_column_name_length, self.column_limit_db_alias),
hint=("Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'."),
obj=ModelWithLongField,
id='models.E019',
)
]
# Second error because the FK specified in the `through` model
# `m2msimple` has auto-genererated name longer than allowed.
# There will be no check errors in the other M2M because it
# specifies db_column for the FK in `through` model even if the actual
# name is longer than the limits of the database.
expected.append(
Error(
'Autogenerated column name too long for M2M field "%s_id". '
'Maximum length is "%s" for database "%s".'
% (long_field_name, self.max_column_name_length, self.column_limit_db_alias),
hint=("Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'."),
obj=ModelWithLongField,
id='models.E019',
)
)
self.assertEqual(errors, expected)
@unittest.skipIf(max_column_name_length is None,
"The database doesn't have a column name length limit.")
def test_local_field_long_column_name(self):
"""
#13711 -- Model check for long column names
when database does not support long names.
"""
allowed_len, db_alias = get_max_column_name_length()
class ModelWithLongField(models.Model):
title = models.CharField(max_length=11)
long_field_name = 'a' * (self.max_column_name_length + 1)
long_field_name2 = 'b' * (self.max_column_name_length + 1)
models.CharField(max_length=11).contribute_to_class(ModelWithLongField, long_field_name)
models.CharField(max_length=11, db_column='vlmn').contribute_to_class(ModelWithLongField, long_field_name2)
errors = ModelWithLongField.check()
# Error because of the field with long name added to the model
# without specifying db_column
expected = [
Error(
'Autogenerated column name too long for field "%s". '
'Maximum length is "%s" for database "%s".'
% (long_field_name, self.max_column_name_length, self.column_limit_db_alias),
hint="Set the column name manually using 'db_column'.",
obj=ModelWithLongField,
id='models.E018',
)
]
self.assertEqual(errors, expected)
def test_including_separator(self):
class Model(models.Model):
some__field = models.IntegerField()
errors = Model.check()
expected = [
Error(
'Field names must not contain "__".',
hint=None,
obj=Model._meta.get_field('some__field'),
id='fields.E002',
)
]
self.assertEqual(errors, expected)
def test_pk(self):
class Model(models.Model):
pk = models.IntegerField()
errors = Model.check()
expected = [
Error(
"'pk' is a reserved word that cannot be used as a field name.",
hint=None,
obj=Model._meta.get_field('pk'),
id='fields.E003',
)
]
self.assertEqual(errors, expected)
class ShadowingFieldsTests(IsolatedModelsTestCase):
def test_field_name_clash_with_child_accessor(self):
class Parent(models.Model):
pass
class Child(Parent):
child = models.CharField(max_length=100)
errors = Child.check()
expected = [
Error(
"The field 'child' clashes with the field "
"'child' from model 'invalid_models_tests.parent'.",
hint=None,
obj=Child._meta.get_field('child'),
id='models.E006',
)
]
self.assertEqual(errors, expected)
def test_multiinheritance_clash(self):
class Mother(models.Model):
clash = models.IntegerField()
class Father(models.Model):
clash = models.IntegerField()
class Child(Mother, Father):
# Here we have two clashed: id (automatic field) and clash, because
# both parents define these fields.
pass
errors = Child.check()
expected = [
Error(
"The field 'id' from parent model "
"'invalid_models_tests.mother' clashes with the field 'id' "
"from parent model 'invalid_models_tests.father'.",
hint=None,
obj=Child,
id='models.E005',
),
Error(
"The field 'clash' from parent model "
"'invalid_models_tests.mother' clashes with the field 'clash' "
"from parent model 'invalid_models_tests.father'.",
hint=None,
obj=Child,
id='models.E005',
)
]
self.assertEqual(errors, expected)
def test_inheritance_clash(self):
class Parent(models.Model):
f_id = models.IntegerField()
class Target(models.Model):
# This field doesn't result in a clash.
f_id = models.IntegerField()
class Child(Parent):
# This field clashes with parent "f_id" field.
f = models.ForeignKey(Target, models.CASCADE)
errors = Child.check()
expected = [
Error(
"The field 'f' clashes with the field 'f_id' "
"from model 'invalid_models_tests.parent'.",
hint=None,
obj=Child._meta.get_field('f'),
id='models.E006',
)
]
self.assertEqual(errors, expected)
def test_multigeneration_inheritance(self):
class GrandParent(models.Model):
clash = models.IntegerField()
class Parent(GrandParent):
pass
class Child(Parent):
pass
class GrandChild(Child):
clash = models.IntegerField()
errors = GrandChild.check()
expected = [
Error(
"The field 'clash' clashes with the field 'clash' "
"from model 'invalid_models_tests.grandparent'.",
hint=None,
obj=GrandChild._meta.get_field('clash'),
id='models.E006',
)
]
self.assertEqual(errors, expected)
def test_id_clash(self):
class Target(models.Model):
pass
class Model(models.Model):
fk = models.ForeignKey(Target, models.CASCADE)
fk_id = models.IntegerField()
errors = Model.check()
expected = [
Error(
"The field 'fk_id' clashes with the field 'fk' from model "
"'invalid_models_tests.model'.",
hint=None,
obj=Model._meta.get_field('fk_id'),
id='models.E006',
)
]
self.assertEqual(errors, expected)
class OtherModelTests(IsolatedModelsTestCase):
def test_unique_primary_key(self):
invalid_id = models.IntegerField(primary_key=False)
class Model(models.Model):
id = invalid_id
errors = Model.check()
expected = [
Error(
"'id' can only be used as a field name if the field also sets "
"'primary_key=True'.",
hint=None,
obj=Model,
id='models.E004',
),
]
self.assertEqual(errors, expected)
def test_ordering_non_iterable(self):
class Model(models.Model):
class Meta:
ordering = "missing_field"
errors = Model.check()
expected = [
Error(
"'ordering' must be a tuple or list "
"(even if you want to order by only one field).",
hint=None,
obj=Model,
id='models.E014',
),
]
self.assertEqual(errors, expected)
def test_just_ordering_no_errors(self):
class Model(models.Model):
order = models.PositiveIntegerField()
class Meta:
ordering = ['order']
self.assertEqual(Model.check(), [])
def test_just_order_with_respect_to_no_errors(self):
class Question(models.Model):
pass
class Answer(models.Model):
question = models.ForeignKey(Question, models.CASCADE)
class Meta:
order_with_respect_to = 'question'
self.assertEqual(Answer.check(), [])
def test_ordering_with_order_with_respect_to(self):
class Question(models.Model):
pass
class Answer(models.Model):
question = models.ForeignKey(Question, models.CASCADE)
order = models.IntegerField()
class Meta:
order_with_respect_to = 'question'
ordering = ['order']
errors = Answer.check()
expected = [
Error(
"'ordering' and 'order_with_respect_to' cannot be used together.",
hint=None,
obj=Answer,
id='models.E021',
),
]
self.assertEqual(errors, expected)
def test_non_valid(self):
class RelationModel(models.Model):
pass
class Model(models.Model):
relation = models.ManyToManyField(RelationModel)
class Meta:
ordering = ['relation']
errors = Model.check()
expected = [
Error(
"'ordering' refers to the non-existent field 'relation'.",
hint=None,
obj=Model,
id='models.E015',
),
]
self.assertEqual(errors, expected)
def test_ordering_pointing_to_missing_field(self):
class Model(models.Model):
class Meta:
ordering = ("missing_field",)
errors = Model.check()
expected = [
Error(
"'ordering' refers to the non-existent field 'missing_field'.",
hint=None,
obj=Model,
id='models.E015',
)
]
self.assertEqual(errors, expected)
def test_ordering_pointing_to_missing_foreignkey_field(self):
# refs #22711
class Model(models.Model):
missing_fk_field = models.IntegerField()
class Meta:
ordering = ("missing_fk_field_id",)
errors = Model.check()
expected = [
Error(
"'ordering' refers to the non-existent field 'missing_fk_field_id'.",
hint=None,
obj=Model,
id='models.E015',
)
]
self.assertEqual(errors, expected)
def test_ordering_pointing_to_existing_foreignkey_field(self):
# refs #22711
class Parent(models.Model):
pass
class Child(models.Model):
parent = models.ForeignKey(Parent, models.CASCADE)
class Meta:
ordering = ("parent_id",)
self.assertFalse(Child.check())
@override_settings(TEST_SWAPPED_MODEL_BAD_VALUE='not-a-model')
def test_swappable_missing_app_name(self):
class Model(models.Model):
class Meta:
swappable = 'TEST_SWAPPED_MODEL_BAD_VALUE'
errors = Model.check()
expected = [
Error(
"'TEST_SWAPPED_MODEL_BAD_VALUE' is not of the form 'app_label.app_name'.",
hint=None,
obj=None,
id='models.E001',
),
]
self.assertEqual(errors, expected)
@override_settings(TEST_SWAPPED_MODEL_BAD_MODEL='not_an_app.Target')
def test_swappable_missing_app(self):
class Model(models.Model):
class Meta:
swappable = 'TEST_SWAPPED_MODEL_BAD_MODEL'
errors = Model.check()
expected = [
Error(
"'TEST_SWAPPED_MODEL_BAD_MODEL' references 'not_an_app.Target', "
'which has not been installed, or is abstract.',
hint=None,
obj=None,
id='models.E002',
),
]
self.assertEqual(errors, expected)
def test_two_m2m_through_same_relationship(self):
class Person(models.Model):
pass
class Group(models.Model):
primary = models.ManyToManyField(Person,
through="Membership", related_name="primary")
secondary = models.ManyToManyField(Person, through="Membership",
related_name="secondary")
class Membership(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
group = models.ForeignKey(Group, models.CASCADE)
errors = Group.check()
expected = [
Error(
"The model has two many-to-many relations through "
"the intermediate model 'invalid_models_tests.Membership'.",
hint=None,
obj=Group,
id='models.E003',
)
]
self.assertEqual(errors, expected)
| bsd-3-clause |
olemb/mido | mido/backends/portmidi_init.py | 1 | 4274 | """
Low-level wrapper for PortMidi library
Copied straight from Grant Yoshida's portmidizero, with slight
modifications.
"""
import sys
from ctypes import (CDLL, CFUNCTYPE, POINTER, Structure, c_char_p,
c_int, c_long, c_uint, c_void_p, cast,
create_string_buffer)
import ctypes.util
dll_name = ''
if sys.platform == 'darwin':
dll_name = ctypes.util.find_library('libportmidi.dylib')
elif sys.platform in ('win32', 'cygwin'):
dll_name = 'portmidi.dll'
else:
dll_name = 'libportmidi.so'
lib = CDLL(dll_name)
null = None
false = 0
true = 1
# portmidi.h
# From portmidi.h
PM_HOST_ERROR_MSG_LEN = 256
def get_host_error_message():
"""Return host error message."""
buf = create_string_buffer(PM_HOST_ERROR_MSG_LEN)
lib.Pm_GetHostErrorText(buf, PM_HOST_ERROR_MSG_LEN)
return buf.raw.decode().rstrip('\0')
PmError = c_int
# PmError enum
pmNoError = 0
pmHostError = -10000
pmInvalidDeviceId = -9999
pmInsufficientMemory = -9989
pmBufferTooSmall = -9979
pmBufferOverflow = -9969
pmBadPtr = -9959
pmBadData = -9994
pmInternalError = -9993
pmBufferMaxSize = -9992
lib.Pm_Initialize.restype = PmError
lib.Pm_Terminate.restype = PmError
PmDeviceID = c_int
PortMidiStreamPtr = c_void_p
PmStreamPtr = PortMidiStreamPtr
PortMidiStreamPtrPtr = POINTER(PortMidiStreamPtr)
lib.Pm_HasHostError.restype = c_int
lib.Pm_HasHostError.argtypes = [PortMidiStreamPtr]
lib.Pm_GetErrorText.restype = c_char_p
lib.Pm_GetErrorText.argtypes = [PmError]
lib.Pm_GetHostErrorText.argtypes = [c_char_p, c_uint]
pmNoDevice = -1
class PmDeviceInfo(Structure):
_fields_ = [("structVersion", c_int),
("interface", c_char_p),
("name", c_char_p),
("is_input", c_int),
("is_output", c_int),
("opened", c_int)]
PmDeviceInfoPtr = POINTER(PmDeviceInfo)
lib.Pm_CountDevices.restype = c_int
lib.Pm_GetDefaultOutputDeviceID.restype = PmDeviceID
lib.Pm_GetDefaultInputDeviceID.restype = PmDeviceID
PmTimestamp = c_long
PmTimeProcPtr = CFUNCTYPE(PmTimestamp, c_void_p)
NullTimeProcPtr = cast(null, PmTimeProcPtr)
# PmBefore is not defined
lib.Pm_GetDeviceInfo.argtypes = [PmDeviceID]
lib.Pm_GetDeviceInfo.restype = PmDeviceInfoPtr
lib.Pm_OpenInput.restype = PmError
lib.Pm_OpenInput.argtypes = [PortMidiStreamPtrPtr,
PmDeviceID,
c_void_p,
c_long,
PmTimeProcPtr,
c_void_p]
lib.Pm_OpenOutput.restype = PmError
lib.Pm_OpenOutput.argtypes = [PortMidiStreamPtrPtr,
PmDeviceID,
c_void_p,
c_long,
PmTimeProcPtr,
c_void_p,
c_long]
lib.Pm_SetFilter.restype = PmError
lib.Pm_SetFilter.argtypes = [PortMidiStreamPtr, c_long]
lib.Pm_SetChannelMask.restype = PmError
lib.Pm_SetChannelMask.argtypes = [PortMidiStreamPtr, c_int]
lib.Pm_Abort.restype = PmError
lib.Pm_Abort.argtypes = [PortMidiStreamPtr]
lib.Pm_Close.restype = PmError
lib.Pm_Close.argtypes = [PortMidiStreamPtr]
PmMessage = c_long
class PmEvent(Structure):
_fields_ = [("message", PmMessage),
("timestamp", PmTimestamp)]
PmEventPtr = POINTER(PmEvent)
lib.Pm_Read.restype = PmError
lib.Pm_Read.argtypes = [PortMidiStreamPtr, PmEventPtr, c_long]
lib.Pm_Poll.restype = PmError
lib.Pm_Poll.argtypes = [PortMidiStreamPtr]
lib.Pm_Write.restype = PmError
lib.Pm_Write.argtypes = [PortMidiStreamPtr, PmEventPtr, c_long]
lib.Pm_WriteShort.restype = PmError
lib.Pm_WriteShort.argtypes = [PortMidiStreamPtr, PmTimestamp, c_long]
lib.Pm_WriteSysEx.restype = PmError
lib.Pm_WriteSysEx.argtypes = [PortMidiStreamPtr, PmTimestamp, c_char_p]
# porttime.h
# PtError enum
PtError = c_int
ptNoError = 0
ptHostError = -10000
ptAlreadyStarted = -9999
ptAlreadyStopped = -9998
ptInsufficientMemory = -9997
PtTimestamp = c_long
PtCallback = CFUNCTYPE(PmTimestamp, c_void_p)
lib.Pt_Start.restype = PtError
lib.Pt_Start.argtypes = [c_int, PtCallback, c_void_p]
lib.Pt_Stop.restype = PtError
lib.Pt_Started.restype = c_int
lib.Pt_Time.restype = PtTimestamp
| mit |
godiard/wikipedia-activity | tools2/make_selection.py | 1 | 12925 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# take a list of pages
# select a level default = 1
# prepare a list of links in the pages from the original list
# create a file with the titles of all the selected pages
# create a file with the content of all the selected pages
import codecs
import re
from xml.sax import make_parser, handler
import os
import sys
from operator import itemgetter
import config
from wikitools_utils import normalize_title
from wikitools_utils import FileListReader, RedirectParser, RedirectsUsedWriter
from wikitools_utils import TemplatesCounter, TemplatesCounterWriter
from wikitools_utils import LinksFilter
from wikitools_utils import CountedTemplatesReader, normalize_title
try:
from hashlib import md5
except ImportError:
from md5 import md5
class PagesLinksFilter():
def __init__(self, file_name, redirects_checker):
"""
Read the list of pages from the .links file
"""
self.pages = []
input_links = codecs.open('%s.links' % file_name,
encoding='utf-8', mode='r')
line = input_links.readline()
while line:
words = line.split()
if len(words) > 0:
page = words[0]
print "Adding page %s" % page
redirected = redirects_checker.get_redirected(page)
if redirected is not None:
page = redirected
if not page in self.pages:
self.pages.append(page)
line = input_links.readline()
input_links.close()
class PagesProcessor(handler.ContentHandler):
def __init__(self, file_name, selected_pages_list, pages_blacklist):
handler.ContentHandler.__init__(self)
self._page_counter = 0
self._page = None
self._output = codecs.open('%s.processed' % file_name,
encoding='utf-8', mode='w')
self._output_page_images = codecs.open('%s.page_images' % file_name,
encoding='utf-8', mode='w')
self.image_re = re.compile('\[\[%s.*?\]\]' % config.FILE_TAG)
self._selected_pages_list = selected_pages_list
self._pages_blacklist = pages_blacklist
def startElement(self, name, attrs):
if name == "page":
self._page = {}
self._page_counter += 1
self._text = ""
def characters(self, content):
self._text = self._text + content
def _register_page(self, register, title, content):
register.write('\01\n')
register.write('%s\n' % normalize_title(title))
register.write('%d\n' % len(content))
register.write('\02\n')
register.write('%s\n' % content)
register.write('\03\n')
def _hashpath(self, name):
name = name.replace(' ', '_')
name = name[:1].upper() + name[1:]
d = md5(name.encode('utf-8')).hexdigest()
return "/".join([d[0], d[:2], name])
def _get_url_image(self, image_wiki):
"""
[[Archivo:Johann Sebastian Bach.jpg|thumb|200px|right|[[J. S. Bach]]
"""
# remove [[ and ]]
image_wiki = image_wiki[2:-2]
parts = image_wiki.split('|')
name = parts[0]
name = name[len(config.FILE_TAG):]
image_size = config.MAX_IMAGE_SIZE
# check if there are a size defined
for part in parts:
# this image sizes are copied from server.py
if part.strip() == 'thumb':
image_size = 180
break
if part.find('px') > -1:
try:
image_size = int(part[:part.find('px')])
except:
pass
hashed_name = unicode(self._hashpath(name)) # .encode('utf8')
url = 'http://upload.wikimedia.org/wikipedia/commons/thumb/' \
+ hashed_name + ('/%dpx-' % image_size) + name.replace(' ', '_')
# the svg files are requested as png
if re.match(r'.*\.svg$', url, re.IGNORECASE):
url = url + '.png'
return url
def get_images(self, title):
# find images used in the pages
images = self.image_re.findall(unicode(self._page))
images_list = []
for image in images:
url = self._get_url_image(image)
# only add one time by page
if not url in images_list:
images_list.append(url)
if len(images_list) > 0:
self._output_page_images.write('%s ' % title)
for image in images_list:
self._output_page_images.write('%s ' % image)
self._output_page_images.write('\n')
def endElement(self, name):
if name == "title":
self._title = self._text
elif name == "text":
self._page = self._text
elif name == "page":
for namespace in config.BLACKLISTED_NAMESPACES:
if unicode(self._title).startswith(namespace):
return
title = normalize_title(self._title)
for namespace in config.TEMPLATE_NAMESPACES:
if unicode(self._title).startswith(namespace):
self.get_images(title)
return
for tag in config.REDIRECT_TAGS:
if unicode(self._page).startswith(tag):
return
if (title not in self._pages_blacklist) and \
(title in self._selected_pages_list):
print "%d Page '%s', length %d \r" % \
(self._page_counter,
title.encode('ascii', 'replace'), len(self._page)),
# processed
self._register_page(self._output, title, self._page)
self.get_images(title)
elif name == "mediawiki":
self._output.close()
self._output_page_images.close()
print "Processed %d pages." % self._page_counter
class TemplatesLoader():
def __init__(self, file_name, templates_used, select_all=False):
_file = codecs.open('%s.templates' % file_name,
encoding='utf-8', mode='r')
self._output = codecs.open('%s.processed' % file_name,
encoding='utf-8', mode='a')
line = _file.readline()
while line:
if len(line) == 2:
if ord(line[0]) == 1:
title = _file.readline()
size = _file.readline()
separator = _file.readline()
finish = False
template_content = ''
while not finish:
line = _file.readline()
#print line
if len(line) == 2:
if ord(line[0]) == 3:
finish = True
break
template_content += line
template_namespace = title[:title.find(':')]
template_name = title[title.find(':') + 1:]
template_name = normalize_title(template_name)
#print "checking", template_name,
if select_all or template_name in templates_used.keys():
#print "Adding Template", template_name.encode('ascii', 'replace'), '\r',
title = template_namespace + ":" + template_name
self._register_page(title, template_content.strip())
line = _file.readline()
def _register_page(self, title, content):
self._output.write('\01\n')
self._output.write('%s\n' % normalize_title(title))
self._output.write('%d\n' % len(content))
self._output.write('\02\n')
self._output.write('%s\n' % content)
self._output.write('\03\n')
if __name__ == '__main__':
select_all = False
if len(sys.argv) > 1:
for argn in range(1, len(sys.argv)):
arg = sys.argv[argn]
if arg == '--all':
select_all = True
print "Selecting all the pages"
MAX_LEVELS = 1
if not select_all:
fav_reader = FileListReader(config.favorites_file_name)
print "Loaded %d favorite pages" % len(fav_reader.list)
if os.path.exists(config.blacklist_file_name):
pages_blacklisted_reader = FileListReader(config.blacklist_file_name)
pages_blacklist = pages_blacklisted_reader.list
print "Loaded %d blacklisted pages" % len(pages_blacklist)
else:
pages_blacklist = []
input_xml_file_name = config.input_xml_file_name
print "Init redirects checker"
redirect_checker = RedirectParser(input_xml_file_name)
level = 1
if not select_all:
selected_pages_file_name = '%s.pages_selected-level-%d' % \
(input_xml_file_name, MAX_LEVELS)
else:
selected_pages_file_name = '%s.pages_selected' % input_xml_file_name
if not os.path.exists(selected_pages_file_name):
if not select_all:
while level <= MAX_LEVELS:
print "Processing links level %d" % level
links_filter = LinksFilter(input_xml_file_name,
redirect_checker, fav_reader.list)
fav_reader.list.extend(links_filter.links)
level += 1
print "Writing pages_selected-level-%d file" % MAX_LEVELS
output_file = codecs.open(selected_pages_file_name,
encoding='utf-8', mode='w')
for page in fav_reader.list:
output_file.write('%s\n' % page)
output_file.close()
selected_pages_list = fav_reader.list
else:
print "Processing links"
links_filter = PagesLinksFilter(input_xml_file_name,
redirect_checker)
print "Writing pages_selected file %d pages" % \
len(links_filter.pages)
output_file = codecs.open(selected_pages_file_name,
encoding='utf-8', mode='w')
for page in links_filter.pages:
output_file.write('%s\n' % page)
output_file.close()
selected_pages_list = links_filter.pages
else:
print "Loading selected pages"
pages_selected_reader = FileListReader(selected_pages_file_name)
selected_pages_list = pages_selected_reader.list
if not os.path.exists('%s.processed' % input_xml_file_name):
print "Writing .processed file"
parser = make_parser()
parser.setContentHandler(PagesProcessor(input_xml_file_name,
selected_pages_list, pages_blacklist))
parser.parse(input_xml_file_name)
# if there are a .templates_counted file should be removed
# because we need recalculate it
if os.path.exists('%s.templates_counted' % input_xml_file_name):
os.remove('%s.templates_counted' % input_xml_file_name)
templates_used_reader = None
if not os.path.exists('%s.templates_counted' % input_xml_file_name):
if select_all:
templates_loader = TemplatesLoader(input_xml_file_name, [], True)
else:
print "Processing templates"
templates_counter = TemplatesCounter(input_xml_file_name,
selected_pages_list, redirect_checker)
print "Sorting counted templates"
items = templates_counter.templates_to_counter.items()
items.sort(key=itemgetter(1), reverse=True)
print "Writing templates_counted file"
_writer = TemplatesCounterWriter(input_xml_file_name, items)
print "Loading templates used"
templates_used_reader = CountedTemplatesReader(input_xml_file_name)
print "Readed %d templates used" % len(
templates_used_reader.templates)
print "Adding used templates to .processed file"
templates_loader = TemplatesLoader(input_xml_file_name,
templates_used_reader.templates)
if not os.path.exists('%s.redirects_used' % input_xml_file_name):
if select_all:
os.link('%s.redirects' % input_xml_file_name,
'%s.redirects_used' % input_xml_file_name)
else:
if templates_used_reader is None:
print "Loading templates used"
templates_used_reader = \
CountedTemplatesReader(input_xml_file_name)
print "Readed %d templates used" % \
len(templates_used_reader.templates)
redirects_used_writer = RedirectsUsedWriter(input_xml_file_name,
selected_pages_list, templates_used_reader.templates,
redirect_checker)
| gpl-2.0 |
DLu/rosbridge_suite | rosbridge_library/test/internal/test_ros_loader.py | 12 | 11569 | #!/usr/bin/env python
import sys
import rospy
import rostest
import unittest
from rosbridge_library.internal import ros_loader
class TestROSLoader(unittest.TestCase):
def setUp(self):
rospy.init_node("test_ros_loader")
def test_bad_msgnames(self):
bad = ["", "/", "//", "///", "////", "/////", "bad", "stillbad",
"not/better/still", "not//better//still", "not///better///still",
"better/", "better//", "better///", "/better", "//better", "///better",
"this\isbad", "\\"]
for x in bad:
self.assertRaises(ros_loader.InvalidTypeStringException,
ros_loader.get_message_class, x)
self.assertRaises(ros_loader.InvalidTypeStringException,
ros_loader.get_message_instance, x)
def test_irregular_msgnames(self):
irregular = ["std_msgs//String", "//std_msgs/String",
"/std_msgs//String", "/std_msgs/String", "//std_msgs//String",
"/std_msgs/String/", "//std_msgs//String//", "std_msgs/String/",
"std_msgs//String//"]
for x in irregular:
self.assertNotEqual(ros_loader.get_message_class(x), None)
self.assertNotEqual(ros_loader.get_message_instance(x), None)
def test_std_msgnames(self):
stdmsgs = ["std_msgs/Bool", "std_msgs/Byte", "std_msgs/ByteMultiArray",
"std_msgs/ColorRGBA", "std_msgs/Duration", "std_msgs/Empty",
"std_msgs/Float32", "std_msgs/Float32MultiArray", "std_msgs/Float64",
"std_msgs/Header", "std_msgs/Int16", "std_msgs/Int16MultiArray",
"std_msgs/Int32", "std_msgs/Int32MultiArray", "std_msgs/Int64",
"std_msgs/Int64MultiArray", "std_msgs/Int8", "std_msgs/Int8MultiArray",
"std_msgs/MultiArrayDimension", "std_msgs/MultiArrayLayout",
"std_msgs/String", "std_msgs/Time", "std_msgs/UInt16",
"std_msgs/UInt16MultiArray", "std_msgs/UInt32MultiArray",
"std_msgs/UInt64MultiArray", "std_msgs/UInt32", "std_msgs/UInt64",
"std_msgs/UInt8", "std_msgs/UInt8MultiArray"]
for x in stdmsgs:
self.assertNotEqual(ros_loader.get_message_class(x), None)
inst = ros_loader.get_message_instance(x)
self.assertNotEqual(inst, None)
self.assertEqual(x, inst._type)
def test_msg_cache(self):
stdmsgs = ["std_msgs/Bool", "std_msgs/Byte", "std_msgs/ByteMultiArray",
"std_msgs/ColorRGBA", "std_msgs/Duration", "std_msgs/Empty",
"std_msgs/Float32", "std_msgs/Float32MultiArray", "std_msgs/Float64",
"std_msgs/Header", "std_msgs/Int16", "std_msgs/Int16MultiArray",
"std_msgs/Int32", "std_msgs/Int32MultiArray", "std_msgs/Int64",
"std_msgs/Int64MultiArray", "std_msgs/Int8", "std_msgs/Int8MultiArray",
"std_msgs/MultiArrayDimension", "std_msgs/MultiArrayLayout",
"std_msgs/String", "std_msgs/Time", "std_msgs/UInt16",
"std_msgs/UInt16MultiArray", "std_msgs/UInt32MultiArray",
"std_msgs/UInt64MultiArray", "std_msgs/UInt32", "std_msgs/UInt64",
"std_msgs/UInt8", "std_msgs/UInt8MultiArray"]
for x in stdmsgs:
self.assertNotEqual(ros_loader.get_message_class(x), None)
inst = ros_loader.get_message_instance(x)
self.assertNotEqual(inst, None)
self.assertEqual(x, inst._type)
self.assertTrue(x in ros_loader._loaded_msgs)
def test_assorted_msgnames(self):
assortedmsgs = ["geometry_msgs/Pose", "actionlib_msgs/GoalStatus",
"geometry_msgs/WrenchStamped", "stereo_msgs/DisparityImage",
"nav_msgs/OccupancyGrid", "geometry_msgs/Point32", "std_msgs/String",
"trajectory_msgs/JointTrajectoryPoint", "diagnostic_msgs/KeyValue",
"visualization_msgs/InteractiveMarkerUpdate", "nav_msgs/GridCells",
"sensor_msgs/PointCloud2"]
for x in assortedmsgs:
self.assertNotEqual(ros_loader.get_message_class(x), None)
inst = ros_loader.get_message_instance(x)
self.assertNotEqual(inst, None)
self.assertEqual(x, inst._type)
def test_invalid_msgnames_primitives(self):
invalid = ["bool", "int8", "uint8", "int16", "uint16", "int32",
"uint32", "int64", "uint64", "float32", "float64", "string", "time",
"duration"]
for x in invalid:
self.assertRaises(ros_loader.InvalidTypeStringException,
ros_loader.get_message_class, x)
self.assertRaises(ros_loader.InvalidTypeStringException,
ros_loader.get_message_instance, x)
def test_nonexistent_packagenames(self):
nonexistent = ["wangle_msgs/Jam", "whistleblower_msgs/Document",
"sexual_harrassment_msgs/UnwantedAdvance", "coercion_msgs/Bribe",
"airconditioning_msgs/Cold", "pr2thoughts_msgs/Escape"]
for x in nonexistent:
self.assertRaises(ros_loader.InvalidPackageException,
ros_loader.get_message_class, x)
self.assertRaises(ros_loader.InvalidPackageException,
ros_loader.get_message_instance, x)
def test_packages_without_msgs(self):
no_msgs = ["roslib/Time", "roslib/Duration", "roslib/Header",
"std_srvs/ConflictedMsg", "topic_tools/MessageMessage"]
for x in no_msgs:
self.assertRaises(ros_loader.InvalidModuleException,
ros_loader.get_message_class, x)
self.assertRaises(ros_loader.InvalidModuleException,
ros_loader.get_message_instance, x)
def test_nonexistent_msg_classnames(self):
nonexistent = ["roscpp/Time", "roscpp/Duration", "roscpp/Header",
"rospy/Time", "rospy/Duration", "rospy/Header", "std_msgs/Spool",
"geometry_msgs/Tetrahedron", "sensor_msgs/TelepathyUnit"]
for x in nonexistent:
self.assertRaises(ros_loader.InvalidClassException,
ros_loader.get_message_class, x)
self.assertRaises(ros_loader.InvalidClassException,
ros_loader.get_message_instance, x)
def test_bad_servicenames(self):
bad = ["", "/", "//", "///", "////", "/////", "bad", "stillbad",
"not/better/still", "not//better//still", "not///better///still",
"better/", "better//", "better///", "/better", "//better", "///better",
"this\isbad", "\\"]
for x in bad:
self.assertRaises(ros_loader.InvalidTypeStringException,
ros_loader.get_service_class, x)
self.assertRaises(ros_loader.InvalidTypeStringException,
ros_loader.get_service_instance, x)
self.assertRaises(ros_loader.InvalidTypeStringException,
ros_loader.get_service_request_instance, x)
self.assertRaises(ros_loader.InvalidTypeStringException,
ros_loader.get_service_response_instance, x)
def test_irregular_servicenames(self):
irregular = ["roscpp//GetLoggers", "/roscpp/GetLoggers/",
"/roscpp/GetLoggers", "//roscpp/GetLoggers", "/roscpp//GetLoggers",
"roscpp/GetLoggers//", "/roscpp/GetLoggers//", "roscpp/GetLoggers/",
"roscpp//GetLoggers//"]
for x in irregular:
self.assertNotEqual(ros_loader.get_service_class(x), None)
self.assertNotEqual(ros_loader.get_service_instance(x), None)
self.assertNotEqual(ros_loader.get_service_request_instance(x), None)
self.assertNotEqual(ros_loader.get_service_response_instance(x), None)
def test_common_servicenames(self):
common = ["roscpp/GetLoggers", "roscpp/SetLoggerLevel",
"std_srvs/Empty", "nav_msgs/GetMap", "nav_msgs/GetPlan",
"sensor_msgs/SetCameraInfo", "topic_tools/MuxAdd",
"topic_tools/MuxSelect", "tf2_msgs/FrameGraph",
"rospy_tutorials/BadTwoInts", "rospy_tutorials/AddTwoInts"]
for x in common:
self.assertNotEqual(ros_loader.get_service_class(x), None)
self.assertNotEqual(ros_loader.get_service_instance(x), None)
self.assertNotEqual(ros_loader.get_service_request_instance(x), None)
self.assertNotEqual(ros_loader.get_service_response_instance(x), None)
self.assertEqual(x, ros_loader.get_service_instance(x)._type)
def test_srv_cache(self):
common = ["roscpp/GetLoggers", "roscpp/SetLoggerLevel",
"std_srvs/Empty", "nav_msgs/GetMap", "nav_msgs/GetPlan",
"sensor_msgs/SetCameraInfo", "topic_tools/MuxAdd",
"topic_tools/MuxSelect", "tf2_msgs/FrameGraph",
"rospy_tutorials/BadTwoInts", "rospy_tutorials/AddTwoInts"]
for x in common:
self.assertNotEqual(ros_loader.get_service_class(x), None)
self.assertNotEqual(ros_loader.get_service_instance(x), None)
self.assertNotEqual(ros_loader.get_service_request_instance(x), None)
self.assertNotEqual(ros_loader.get_service_response_instance(x), None)
self.assertTrue(x in ros_loader._loaded_srvs)
def test_packages_without_srvs(self):
no_msgs = ["roslib/A", "roslib/B", "roslib/C",
"std_msgs/CuriousSrv"]
for x in no_msgs:
self.assertRaises(ros_loader.InvalidModuleException,
ros_loader.get_service_class, x)
self.assertRaises(ros_loader.InvalidModuleException,
ros_loader.get_service_instance, x)
self.assertRaises(ros_loader.InvalidModuleException,
ros_loader.get_service_request_instance, x)
self.assertRaises(ros_loader.InvalidModuleException,
ros_loader.get_service_response_instance, x)
def test_nonexistent_service_packagenames(self):
nonexistent = ["butler_srvs/FetchDrink", "money_srvs/MoreMoney",
"snoopdogg_srvs/SipOnGinAndJuice", "revenge_srvs/BackStab"]
for x in nonexistent:
self.assertRaises(ros_loader.InvalidPackageException,
ros_loader.get_service_class, x)
self.assertRaises(ros_loader.InvalidPackageException,
ros_loader.get_service_instance, x)
self.assertRaises(ros_loader.InvalidPackageException,
ros_loader.get_service_request_instance, x)
self.assertRaises(ros_loader.InvalidPackageException,
ros_loader.get_service_response_instance, x)
def test_nonexistent_service_classnames(self):
nonexistent = ["std_srvs/KillAllHumans", "std_srvs/Full",
"rospy_tutorials/SubtractTwoInts", "nav_msgs/LoseMap",
"topic_tools/TellMeWhatThisTopicIsActuallyAbout"]
for x in nonexistent:
self.assertRaises(ros_loader.InvalidClassException,
ros_loader.get_service_class, x)
self.assertRaises(ros_loader.InvalidClassException,
ros_loader.get_service_instance, x)
self.assertRaises(ros_loader.InvalidClassException,
ros_loader.get_service_request_instance, x)
self.assertRaises(ros_loader.InvalidClassException,
ros_loader.get_service_response_instance, x)
PKG = 'rosbridge_library'
NAME = 'test_ros_loader'
if __name__ == '__main__':
rostest.unitrun(PKG, NAME, TestROSLoader)
| bsd-3-clause |
Bysmyyr/chromium-crosswalk | tools/perf/page_sets/intl_es_fr_pt-BR.py | 13 | 1640 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
class IntlEsFrPtBrPage(page_module.Page):
def __init__(self, url, page_set):
super(IntlEsFrPtBrPage, self).__init__(
url=url, page_set=page_set,
shared_page_state_class=shared_page_state.SharedDesktopPageState)
self.archive_data_file = 'data/intl_es_fr_pt-BR.json'
class IntlEsFrPtBrPageSet(story.StorySet):
"""
Popular pages in Romance languages Spanish, French and Brazilian Portuguese.
"""
def __init__(self):
super(IntlEsFrPtBrPageSet, self).__init__(
archive_data_file='data/intl_es_fr_pt-BR.json',
cloud_storage_bucket=story.PARTNER_BUCKET)
urls_list = [
'http://elmundo.es/',
'http://terra.es/',
# pylint: disable=C0301
'http://www.ebay.es/sch/i.html?_sacat=382&_trkparms=clkid%3D6548971389060485883&_qi=RTM1381637',
'http://www.eltiempo.es/talavera-de-la-reina.html',
'http://www.free.fr/adsl/index.html',
'http://www.voila.fr/',
'http://www.leboncoin.fr/annonces/offres/limousin/',
'http://www.orange.fr/',
# Why: #5 site in Brazil
'http://www.uol.com.br/',
# Why: #10 site in Brazil
# pylint: disable=C0301
'http://produto.mercadolivre.com.br/MLB-468424957-pelicula-protetora-smartphone-h5500-e-h5300-43-frete-free-_JM'
]
for url in urls_list:
self.AddStory(IntlEsFrPtBrPage(url, self))
| bsd-3-clause |
weolar/miniblink49 | third_party/skia/tools/builder_name_schema.py | 26 | 6247 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Utilities for dealing with builder names. This module obtains its attributes
dynamically from builder_name_schema.json. """
import json
import os
# All of these global variables are filled in by _LoadSchema().
# The full schema.
BUILDER_NAME_SCHEMA = None
# Character which separates parts of a builder name.
BUILDER_NAME_SEP = None
# Builder roles.
BUILDER_ROLE_CANARY = 'Canary'
BUILDER_ROLE_BUILD = 'Build'
BUILDER_ROLE_HOUSEKEEPER = 'Housekeeper'
BUILDER_ROLE_PERF = 'Perf'
BUILDER_ROLE_TEST = 'Test'
BUILDER_ROLES = (BUILDER_ROLE_CANARY,
BUILDER_ROLE_BUILD,
BUILDER_ROLE_HOUSEKEEPER,
BUILDER_ROLE_PERF,
BUILDER_ROLE_TEST)
# Suffix which distinguishes trybots from normal bots.
TRYBOT_NAME_SUFFIX = None
def _LoadSchema():
""" Load the builder naming schema from the JSON file. """
def _UnicodeToStr(obj):
""" Convert all unicode strings in obj to Python strings. """
if isinstance(obj, unicode):
return str(obj)
elif isinstance(obj, dict):
return dict(map(_UnicodeToStr, obj.iteritems()))
elif isinstance(obj, list):
return list(map(_UnicodeToStr, obj))
elif isinstance(obj, tuple):
return tuple(map(_UnicodeToStr, obj))
else:
return obj
builder_name_json_filename = os.path.join(
os.path.dirname(__file__), 'builder_name_schema.json')
builder_name_schema_json = json.load(open(builder_name_json_filename))
global BUILDER_NAME_SCHEMA
BUILDER_NAME_SCHEMA = _UnicodeToStr(
builder_name_schema_json['builder_name_schema'])
global BUILDER_NAME_SEP
BUILDER_NAME_SEP = _UnicodeToStr(
builder_name_schema_json['builder_name_sep'])
global TRYBOT_NAME_SUFFIX
TRYBOT_NAME_SUFFIX = _UnicodeToStr(
builder_name_schema_json['trybot_name_suffix'])
# Since the builder roles are dictionary keys, just assert that the global
# variables above account for all of them.
assert len(BUILDER_ROLES) == len(BUILDER_NAME_SCHEMA)
for role in BUILDER_ROLES:
assert role in BUILDER_NAME_SCHEMA
_LoadSchema()
def MakeBuilderName(role, extra_config=None, is_trybot=False, **kwargs):
schema = BUILDER_NAME_SCHEMA.get(role)
if not schema:
raise ValueError('%s is not a recognized role.' % role)
for k, v in kwargs.iteritems():
if BUILDER_NAME_SEP in v:
raise ValueError('%s not allowed in %s.' % (BUILDER_NAME_SEP, v))
if not k in schema:
raise ValueError('Schema does not contain "%s": %s' %(k, schema))
if extra_config and BUILDER_NAME_SEP in extra_config:
raise ValueError('%s not allowed in %s.' % (BUILDER_NAME_SEP,
extra_config))
name_parts = [role]
name_parts.extend([kwargs[attribute] for attribute in schema])
if extra_config:
name_parts.append(extra_config)
if is_trybot:
name_parts.append(TRYBOT_NAME_SUFFIX)
return BUILDER_NAME_SEP.join(name_parts)
def BuilderNameFromObject(obj, is_trybot=False):
"""Create a builder name based on properties of the given object.
Args:
obj: the object from which to create the builder name. The object must
have as properties:
- A valid builder role, as defined in the JSON file
- All properties listed in the JSON file for that role
- Optionally, an extra_config property
is_trybot: bool; whether or not the builder is a trybot.
Returns:
string which combines the properties of the given object into a valid
builder name.
"""
schema = BUILDER_NAME_SCHEMA.get(obj.role)
if not schema:
raise ValueError('%s is not a recognized role.' % obj.role)
name_parts = [obj.role]
for attr_name in schema:
attr_val = getattr(obj, attr_name)
name_parts.append(attr_val)
extra_config = getattr(obj, 'extra_config', None)
if extra_config:
name_parts.append(extra_config)
if is_trybot:
name_parts.append(TRYBOT_NAME_SUFFIX)
return BUILDER_NAME_SEP.join(name_parts)
def IsTrybot(builder_name):
""" Returns true if builder_name refers to a trybot (as opposed to a
waterfall bot). """
return builder_name.endswith(TRYBOT_NAME_SUFFIX)
def GetWaterfallBot(builder_name):
"""Returns the name of the waterfall bot for this builder. If it is not a
trybot, builder_name is returned unchanged. If it is a trybot the name is
returned without the trybot suffix."""
if not IsTrybot(builder_name):
return builder_name
return _WithoutSuffix(builder_name, BUILDER_NAME_SEP + TRYBOT_NAME_SUFFIX)
def TrybotName(builder_name):
"""Returns the name of the trybot clone of this builder.
If the given builder is a trybot, the name is returned unchanged. If not, the
TRYBOT_NAME_SUFFIX is appended.
"""
if builder_name.endswith(TRYBOT_NAME_SUFFIX):
return builder_name
return builder_name + BUILDER_NAME_SEP + TRYBOT_NAME_SUFFIX
def _WithoutSuffix(string, suffix):
""" Returns a copy of string 'string', but with suffix 'suffix' removed.
Raises ValueError if string does not end with suffix. """
if not string.endswith(suffix):
raise ValueError('_WithoutSuffix: string %s does not end with suffix %s' % (
string, suffix))
return string[:-len(suffix)]
def DictForBuilderName(builder_name):
"""Makes a dictionary containing details about the builder from its name."""
split_name = builder_name.split(BUILDER_NAME_SEP)
def pop_front():
try:
return split_name.pop(0)
except:
raise ValueError('Invalid builder name: %s' % builder_name)
result = {'is_trybot': False}
if split_name[-1] == TRYBOT_NAME_SUFFIX:
result['is_trybot'] = True
split_name.pop()
if split_name[0] in BUILDER_NAME_SCHEMA.keys():
key_list = BUILDER_NAME_SCHEMA[split_name[0]]
result['role'] = pop_front()
for key in key_list:
result[key] = pop_front()
if split_name:
result['extra_config'] = pop_front()
if split_name:
raise ValueError('Invalid builder name: %s' % builder_name)
else:
raise ValueError('Invalid builder name: %s' % builder_name)
return result
| apache-2.0 |
derekjchow/models | research/street/python/nn_ops.py | 20 | 9175 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops and utilities for neural networks.
For now, just an LSTM layer.
"""
import shapes
import tensorflow as tf
rnn = tf.load_op_library("../cc/rnn_ops.so")
def rnn_helper(inp,
length,
cell_type=None,
direction="forward",
name=None,
*args,
**kwargs):
"""Adds ops for a recurrent neural network layer.
This function calls an actual implementation of a recurrent neural network
based on `cell_type`.
There are three modes depending on the value of `direction`:
forward: Adds a forward RNN.
backward: Adds a backward RNN.
bidirectional: Adds both forward and backward RNNs and creates a
bidirectional RNN.
Args:
inp: A 3-D tensor of shape [`batch_size`, `max_length`, `feature_dim`].
length: A 1-D tensor of shape [`batch_size`] and type int64. Each element
represents the length of the corresponding sequence in `inp`.
cell_type: Cell type of RNN. Currently can only be "lstm".
direction: One of "forward", "backward", "bidirectional".
name: Name of the op.
*args: Other arguments to the layer.
**kwargs: Keyword arugments to the layer.
Returns:
A 3-D tensor of shape [`batch_size`, `max_length`, `num_nodes`].
"""
assert cell_type is not None
rnn_func = None
if cell_type == "lstm":
rnn_func = lstm_layer
assert rnn_func is not None
assert direction in ["forward", "backward", "bidirectional"]
with tf.variable_scope(name):
if direction in ["forward", "bidirectional"]:
forward = rnn_func(
inp=inp,
length=length,
backward=False,
name="forward",
*args,
**kwargs)
if isinstance(forward, tuple):
# lstm_layer returns a tuple (output, memory). We only need the first
# element.
forward = forward[0]
if direction in ["backward", "bidirectional"]:
backward = rnn_func(
inp=inp,
length=length,
backward=True,
name="backward",
*args,
**kwargs)
if isinstance(backward, tuple):
# lstm_layer returns a tuple (output, memory). We only need the first
# element.
backward = backward[0]
if direction == "forward":
out = forward
elif direction == "backward":
out = backward
else:
out = tf.concat(axis=2, values=[forward, backward])
return out
@tf.RegisterShape("VariableLSTM")
def _variable_lstm_shape(op):
"""Shape function for the VariableLSTM op."""
input_shape = op.inputs[0].get_shape().with_rank(4)
state_shape = op.inputs[1].get_shape().with_rank(2)
memory_shape = op.inputs[2].get_shape().with_rank(2)
w_m_m_shape = op.inputs[3].get_shape().with_rank(3)
batch_size = input_shape[0].merge_with(state_shape[0])
batch_size = input_shape[0].merge_with(memory_shape[0])
seq_len = input_shape[1]
gate_num = input_shape[2].merge_with(w_m_m_shape[1])
output_dim = input_shape[3].merge_with(state_shape[1])
output_dim = output_dim.merge_with(memory_shape[1])
output_dim = output_dim.merge_with(w_m_m_shape[0])
output_dim = output_dim.merge_with(w_m_m_shape[2])
return [[batch_size, seq_len, output_dim],
[batch_size, seq_len, gate_num, output_dim],
[batch_size, seq_len, output_dim]]
@tf.RegisterGradient("VariableLSTM")
def _variable_lstm_grad(op, act_grad, gate_grad, mem_grad):
"""Gradient function for the VariableLSTM op."""
initial_state = op.inputs[1]
initial_memory = op.inputs[2]
w_m_m = op.inputs[3]
act = op.outputs[0]
gate_raw_act = op.outputs[1]
memory = op.outputs[2]
return rnn.variable_lstm_grad(initial_state, initial_memory, w_m_m, act,
gate_raw_act, memory, act_grad, gate_grad,
mem_grad)
def lstm_layer(inp,
length=None,
state=None,
memory=None,
num_nodes=None,
backward=False,
clip=50.0,
reg_func=tf.nn.l2_loss,
weight_reg=False,
weight_collection="LSTMWeights",
bias_reg=False,
stddev=None,
seed=None,
decode=False,
use_native_weights=False,
name=None):
"""Adds ops for an LSTM layer.
This adds ops for the following operations:
input => (forward-LSTM|backward-LSTM) => output
The direction of the LSTM is determined by `backward`. If it is false, the
forward LSTM is used, the backward one otherwise.
Args:
inp: A 3-D tensor of shape [`batch_size`, `max_length`, `feature_dim`].
length: A 1-D tensor of shape [`batch_size`] and type int64. Each element
represents the length of the corresponding sequence in `inp`.
state: If specified, uses it as the initial state.
memory: If specified, uses it as the initial memory.
num_nodes: The number of LSTM cells.
backward: If true, reverses the `inp` before adding the ops. The output is
also reversed so that the direction is the same as `inp`.
clip: Value used to clip the cell values.
reg_func: Function used for the weight regularization such as
`tf.nn.l2_loss`.
weight_reg: If true, regularize the filter weights with `reg_func`.
weight_collection: Collection to add the weights to for regularization.
bias_reg: If true, regularize the bias vector with `reg_func`.
stddev: Standard deviation used to initialize the variables.
seed: Seed used to initialize the variables.
decode: If true, does not add ops which are not used for inference.
use_native_weights: If true, uses weights in the same format as the native
implementations.
name: Name of the op.
Returns:
A 3-D tensor of shape [`batch_size`, `max_length`, `num_nodes`].
"""
with tf.variable_scope(name):
if backward:
if length is None:
inp = tf.reverse(inp, [1])
else:
inp = tf.reverse_sequence(inp, length, 1, 0)
num_prev = inp.get_shape()[2]
if stddev:
initializer = tf.truncated_normal_initializer(stddev=stddev, seed=seed)
else:
initializer = tf.uniform_unit_scaling_initializer(seed=seed)
if use_native_weights:
with tf.variable_scope("LSTMCell"):
w = tf.get_variable(
"W_0",
shape=[num_prev + num_nodes, 4 * num_nodes],
initializer=initializer,
dtype=tf.float32)
w_i_m = tf.slice(w, [0, 0], [num_prev, 4 * num_nodes], name="w_i_m")
w_m_m = tf.reshape(
tf.slice(w, [num_prev, 0], [num_nodes, 4 * num_nodes]),
[num_nodes, 4, num_nodes],
name="w_m_m")
else:
w_i_m = tf.get_variable("w_i_m", [num_prev, 4 * num_nodes],
initializer=initializer)
w_m_m = tf.get_variable("w_m_m", [num_nodes, 4, num_nodes],
initializer=initializer)
if not decode and weight_reg:
tf.add_to_collection(weight_collection, reg_func(w_i_m, name="w_i_m_reg"))
tf.add_to_collection(weight_collection, reg_func(w_m_m, name="w_m_m_reg"))
batch_size = shapes.tensor_dim(inp, dim=0)
num_frames = shapes.tensor_dim(inp, dim=1)
prev = tf.reshape(inp, tf.stack([batch_size * num_frames, num_prev]))
if use_native_weights:
with tf.variable_scope("LSTMCell"):
b = tf.get_variable(
"B",
shape=[4 * num_nodes],
initializer=tf.zeros_initializer(),
dtype=tf.float32)
biases = tf.identity(b, name="biases")
else:
biases = tf.get_variable(
"biases", [4 * num_nodes], initializer=tf.constant_initializer(0.0))
if not decode and bias_reg:
tf.add_to_collection(
weight_collection, reg_func(
biases, name="biases_reg"))
prev = tf.nn.xw_plus_b(prev, w_i_m, biases)
prev = tf.reshape(prev, tf.stack([batch_size, num_frames, 4, num_nodes]))
if state is None:
state = tf.fill(tf.stack([batch_size, num_nodes]), 0.0)
if memory is None:
memory = tf.fill(tf.stack([batch_size, num_nodes]), 0.0)
out, _, mem = rnn.variable_lstm(prev, state, memory, w_m_m, clip=clip)
if backward:
if length is None:
out = tf.reverse(out, [1])
else:
out = tf.reverse_sequence(out, length, 1, 0)
return out, mem
| apache-2.0 |
egabancho/invenio | invenio/modules/archiver/api.py | 2 | 3234 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Archiver API implementation."""
from __future__ import absolute_import
from fs.zipfs import ZipFS
from invenio.modules.documents import api
from .bagit import create_bagit
from .utils import name_generator
def get_archive_package(recid, version=None):
"""Return archive package.
:param recid: The record id of archive package.
"""
query = {'title': 'bagit', 'recid': recid}
documents = sorted(api.Document.storage_engine.search(query),
key=lambda x: x['creation_date'])
if len(documents) > 0:
if version == -1:
return api.Document(documents.pop(), process_model_info=True)
elif version is not None:
return api.Document(documents[version], process_model_info=True)
return map(lambda d: api.Document(d, process_model_info=True),
documents)
def create_archive_package(recid):
"""Create archive package for recid as zipped bagit folder.
:param recid: The record id to archive.
"""
document = get_archive_package(recid, -1)
if document is None:
document = api.Document.create({
'title': 'bagit',
'recid': recid,
}, model='archiver')
else:
document = document.update()
version = len(document.get('version_history', []))
bagit = create_bagit(recid, version)
document.setcontents(bagit, name_generator)
# FIXME remove old versions if necessary
def delete_archive_package(recid, version=None, force=False):
"""Delete archive package for given record id.
:note: This will delete **ALL** files in the storage for the given record.
:param recid: The record archive to delete
"""
map(lambda d: d.delete(force=force),
get_archive_package(recid, version=version))
def mount_archive_package(recid, version=-1, mode='r'):
"""Mount an archive that allows file operations in the archive package.
If your mount request returns more than one package, you can use the
returnedlist of dictionaries to select the _id field and mount a specific
packagebased on your own criteria.
:param recid: ID of the record to mount.
:param version: The record's archive version to mount.
:param mode: 'r' read, 'w' write.
:returns: :class:`fs.zipfs.ZipFS` object of the archive.
"""
document = get_archive_package(recid, version=version)
return ZipFS(document['uri'], mode=mode)
| gpl-2.0 |
brschneidE3/LegalNetworks | python_code/download_data_batch.py | 1 | 5208 | __author__ = 'brendan'
import helper_functions
import os
import tarfile
proj_cwd = os.path.dirname(os.getcwd())
data_dir = proj_cwd + r'/data'
def download_url(url, destination_path, curl_path=r'C:/Users/brendan/Downloads/curl-7.38.0-win64/bin/curl'):
"""
This is a quick and easy function that simulates clicking a link in your browser that initiates a download.
It requires downloading the program CURL. Then the curl_path argument must point to whever your curl.exe executable
is located.
url:: the url from which data is to be downloaded.
destination_path:: the downloaded file to be created.
"""
os_string = '%s "%s" > %s' % (curl_path, url, destination_path)
print os_string
os.system(os_string)
def download_court_data(court_name, curl_path):
"""
This function proceeds as follows:
1) Given court_name, a string representing a CourtListener court, download_court_data first checks that there
exists a subdirectory for court_name. This directory should contain within it a 'clusters' and 'opinions'
sub-subdirectory. If these don't exist, they are created.
2) download_court_data then compares how many files are in the 'clusters' sub-subdirectory to what is on the
CourtListener server. If these numbers are not the same, all locally-saved files are deleted and re-downloaded
and extracted to the 'clusters' sub-subdirectory.
3) This process is then repeated for 'opinions'.
"""
court_data_dir = data_dir + r'/%s' % court_name
court_clusters_data_dir = court_data_dir + r'/clusters'
court_opinions_data_dir = court_data_dir + r'/opinions'
# Make a court data directory if we don't have one already
if not os.path.exists(court_data_dir):
os.makedirs(court_data_dir)
if not os.path.exists(court_clusters_data_dir):
os.makedirs(court_clusters_data_dir)
if not os.path.exists(court_opinions_data_dir):
os.makedirs(court_opinions_data_dir)
###################
# FOR CLUSTERS DATA
###################
court_metadata_url = 'https://www.courtlistener.com/api/rest/v3/clusters/?docket__court=%s' % court_name
court_metadata = helper_functions.url_to_dict(court_metadata_url)
num_files_on_server = court_metadata['count']
files_in_dir = os.listdir(court_data_dir + r'/clusters')
num_files_in_dir = len(files_in_dir)
# If the number of files downloaded isn't the same as the number on the server
if num_files_on_server != num_files_in_dir:
print 'Re-downloading cluster data for court %s...' % court_name.upper()
# Delete the files we currently have
print '...deleting files...'
for filename in files_in_dir:
os.remove(r'%s/%s' % (court_clusters_data_dir, filename))
# Download the .tar.gz file
print '...downloading new .tar.gz file...'
download_url(url='https://www.courtlistener.com/api/bulk-data/clusters/%s.tar.gz' % court_name,
destination_path=court_clusters_data_dir + r'/%s.tar.gz' % court_name,
curl_path=curl_path)
# Extract it
print '...extracting files...'
with tarfile.open(court_clusters_data_dir + r'/%s.tar.gz' % court_name) as TarFile:
TarFile.extractall(path=court_clusters_data_dir)
# And delete .tar.gz file
os.remove(r'%s/%s.tar.gz' % (court_clusters_data_dir, court_name))
print '...done.'
else:
print "All server (cluster) files accounted for."
###################
# FOR OPINIONS DATA
###################
court_metadata_url = 'https://www.courtlistener.com/api/rest/v3/opinions/?docket__court=%s' % court_name
court_metadata = helper_functions.url_to_dict(court_metadata_url)
num_files_on_server = court_metadata['count']
files_in_dir = os.listdir(court_data_dir + r'/opinions')
num_files_in_dir = len(files_in_dir)
# If the number of files downloaded isn't the same as the number on the server
if num_files_on_server != num_files_in_dir:
print 'Re-downloading opinions data for court %s...' % court_name.upper()
# Delete the files we currently have
print '...deleting files...'
for filename in files_in_dir:
os.remove(r'%s/%s' % (court_opinions_data_dir, filename))
# Download the .tar.gz file
print '...downloading new .tar.gz file...'
download_url(url='https://www.courtlistener.com/api/bulk-data/opinions/%s.tar.gz' % court_name,
destination_path=court_opinions_data_dir + r'/%s.tar.gz' % court_name,
curl_path=curl_path)
# Extract it
print '...extracting files...'
with tarfile.open(court_opinions_data_dir + r'/%s.tar.gz' % court_name) as TarFile:
TarFile.extractall(path=court_opinions_data_dir)
# And delete .tar.gz file
os.remove(r'%s/%s.tar.gz' % (court_opinions_data_dir, court_name))
print '...done.'
else:
print "All server (opinion) files accounted for."
# download_court_data('scotus', r'C:/Users/brendan/Downloads/curl-7.38.0-win64/bin/curl')
| mit |
chainer/chainer | chainermn/links/n_step_rnn.py | 6 | 3181 | import chainer
import chainer.links.rnn as rnn
import chainermn.functions
class _MultiNodeNStepRNN(chainer.Chain):
def __init__(self, link, communicator, rank_in, rank_out):
super(_MultiNodeNStepRNN, self).__init__(actual_rnn=link)
self.communicator = communicator
self.rank_in = rank_in
self.rank_out = rank_out
check_lstm = isinstance(link, rnn.n_step_rnn.NStepRNNBase)
if not check_lstm:
raise ValueError('link must be NStepRNN and its inherited link')
else:
self.n_cells = link.n_cells
def __call__(self, *inputs):
cells = [None for _ in range(self.n_cells)]
if self.rank_in is not None:
cells = [chainermn.functions.recv(
self.communicator,
rank=self.rank_in)
for _ in range(self.n_cells)]
outputs = self.actual_rnn(*(tuple(cells) + inputs))
cells = outputs[:-1]
delegate_variable = None
if self.rank_out is not None:
cell = cells[0]
for i in range(self.n_cells):
delegate_variable = chainermn.functions.send(
cell, self.communicator, rank=self.rank_out)
if i < self.n_cells - 1:
cell, = chainermn.functions.pseudo_connect(
delegate_variable, cells[i + 1])
return outputs + tuple([delegate_variable])
def create_multi_node_n_step_rnn(
actual_link, communicator, rank_in=None, rank_out=None):
"""Create a multi node stacked RNN link from a Chainer stacked RNN link.
Multi node stacked RNN link is used for model-parallel.
The created link will receive initial hidden states from the process
specified by ``rank_in`` (or do not receive if ``None``), execute
the original RNN compuation, and then send resulting hidden states
to the process specified by ``rank_out``.
Compared with Chainer stacked RNN link, multi node stacked RNN link
returns an extra object called ``delegate_variable``.
If ``rank_out`` is not ``None``, backward computation is expected
to be begun from ``delegate_variable``.
For detail, please refer ``chainermn.functions.pseudo_connect``.
The following RNN links can be passed to this function:
- ``chainer.links.NStepBiGRU``
- ``chainer.links.NStepBiLSTM``
- ``chainer.links.NStepBiRNNReLU``
- ``chainer.links.NStepBiRNNTanh``
- ``chainer.links.NStepGRU``
- ``chainer.links.NStepLSTM``
- ``chainer.links.NStepRNNReLU``
- ``chainer.links.NStepRNNTanh``
Args:
link (chainer.Link): Chainer stacked RNN link
communicator: ChainerMN communicator
rank_in (int, or None):
Rank of the process which sends hidden RNN states to this process.
rank_out (int, or None):
Rank of the process to which this process sends hiddne RNN states.
Returns:
The multi node stacked RNN link based on ``actual_link``.
"""
chainer.utils.experimental('chainermn.links.create_multi_node_n_step_rnn')
return _MultiNodeNStepRNN(actual_link, communicator, rank_in, rank_out)
| mit |
psyke83/ace2europa_kernel | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
Codlydodly/python-client | venv/lib/python2.7/site-packages/pyglet/text/formats/attributed.py | 9 | 5145 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Extensible attributed text format for representing pyglet formatted
documents.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import operator
import parser
import re
import token
import pyglet
_pattern = re.compile(r'''
(?P<escape_hex>\{\#x(?P<escape_hex_val>[0-9a-fA-F]+)\})
| (?P<escape_dec>\{\#(?P<escape_dec_val>[0-9]+)\})
| (?P<escape_lbrace>\{\{)
| (?P<escape_rbrace>\}\})
| (?P<attr>\{
(?P<attr_name>[^ \{\}]+)\s+
(?P<attr_val>[^\}]+)\})
| (?P<nl_hard1>\n(?=[ \t]))
| (?P<nl_hard2>\{\}\n)
| (?P<nl_soft>\n(?=\S))
| (?P<nl_para>\n\n+)
| (?P<text>[^\{\}\n]+)
''', re.VERBOSE | re.DOTALL)
class AttributedTextDecoder(pyglet.text.DocumentDecoder):
def decode(self, text, location=None):
self.doc = pyglet.text.document.FormattedDocument()
self.length = 0
self.attributes = {}
next_trailing_space = True
trailing_newline = True
for m in _pattern.finditer(text):
group = m.lastgroup
trailing_space = True
if group == 'text':
t = m.group('text')
self.append(t)
trailing_space = t.endswith(' ')
trailing_newline = False
elif group == 'nl_soft':
if not next_trailing_space:
self.append(' ')
trailing_newline = False
elif group in ('nl_hard1', 'nl_hard2'):
self.append('\n')
trailing_newline = True
elif group == 'nl_para':
self.append(m.group('nl_para'))
trailing_newline = True
elif group == 'attr':
try:
ast = parser.expr(m.group('attr_val'))
if self.safe(ast):
val = eval(ast.compile())
else:
val = None
except (parser.ParserError, SyntaxError):
val = None
name = m.group('attr_name')
if name[0] == '.':
if trailing_newline:
self.attributes[name[1:]] = val
else:
self.doc.set_paragraph_style(self.length, self.length,
{name[1:]: val})
else:
self.attributes[name] = val
elif group == 'escape_dec':
self.append(unichr(int(m.group('escape_dec_val'))))
elif group == 'escape_hex':
self.append(unichr(int(m.group('escape_hex_val'), 16)))
elif group == 'escape_lbrace':
self.append('{')
elif group == 'escape_rbrace':
self.append('}')
next_trailing_space = trailing_space
return self.doc
def append(self, text):
self.doc.insert_text(self.length, text, self.attributes)
self.length += len(text)
self.attributes.clear()
_safe_names = ('True', 'False', 'None')
def safe(self, ast):
tree = ast.totuple()
return self.safe_node(tree)
def safe_node(self, node):
if token.ISNONTERMINAL(node[0]):
return reduce(operator.and_, map(self.safe_node, node[1:]))
elif node[0] == token.NAME:
return node[1] in self._safe_names
else:
return True
| mit |
sticksnleaves/ghost-blog | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/lexers/dotnet.py | 94 | 26751 | # -*- coding: utf-8 -*-
"""
pygments.lexers.dotnet
~~~~~~~~~~~~~~~~~~~~~~
Lexers for .net languages.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, include, \
using, this
from pygments.token import Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Literal, Other
from pygments.util import get_choice_opt
from pygments import unistring as uni
from pygments.lexers.web import XmlLexer
__all__ = ['CSharpLexer', 'NemerleLexer', 'BooLexer', 'VbNetLexer',
'CSharpAspxLexer', 'VbNetAspxLexer', 'FSharpLexer']
class CSharpLexer(RegexLexer):
"""
For `C# <http://msdn2.microsoft.com/en-us/vcsharp/default.aspx>`_
source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
*New in Pygments 0.8.*
"""
name = 'C#'
aliases = ['csharp', 'c#']
filenames = ['*.cs']
mimetypes = ['text/x-csharp'] # inferred
flags = re.MULTILINE | re.DOTALL | re.UNICODE
# for the range of allowed unicode characters in identifiers,
# see http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
'none': '@?[_a-zA-Z][a-zA-Z0-9_]*',
'basic': ('@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' +
'[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl +
uni.Nd + uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*'),
'full': ('@?(?:_|[^' +
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])'
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
}
tokens = {}
token_variants = True
for levelname, cs_ident in list(levels.items()):
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Punctuation)),
(r'^\s*\[.*?\]', Name.Attribute),
(r'[^\S\n]+', Text),
(r'\\\n', Text), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Text),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?"
r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number),
(r'#[ \t]*(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b.*?\n',
Comment.Preproc),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
Keyword)),
(r'(abstract|as|async|await|base|break|case|catch|'
r'checked|const|continue|default|delegate|'
r'do|else|enum|event|explicit|extern|false|finally|'
r'fixed|for|foreach|goto|if|implicit|in|interface|'
r'internal|is|lock|new|null|operator|'
r'out|override|params|private|protected|public|readonly|'
r'ref|return|sealed|sizeof|stackalloc|static|'
r'switch|this|throw|true|try|typeof|'
r'unchecked|unsafe|virtual|void|while|'
r'get|set|new|partial|yield|add|remove|value|alias|ascending|'
r'descending|from|group|into|orderby|select|where|'
r'join|equals)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|dynamic|float|int|long|object|'
r'sbyte|short|string|uint|ulong|ushort|var)\b\??', Keyword.Type),
(r'(class|struct)(\s+)', bygroups(Keyword, Text), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Text), 'namespace'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop')
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens.keys()), 'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class NemerleLexer(RegexLexer):
"""
For `Nemerle <http://nemerle.org>`_ source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
*New in Pygments 1.5.*
"""
name = 'Nemerle'
aliases = ['nemerle']
filenames = ['*.n']
mimetypes = ['text/x-nemerle'] # inferred
flags = re.MULTILINE | re.DOTALL | re.UNICODE
# for the range of allowed unicode characters in identifiers, see
# http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = dict(
none = '@?[_a-zA-Z][a-zA-Z0-9_]*',
basic = ('@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' +
'[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl +
uni.Nd + uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*'),
full = ('@?(?:_|[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo',
'Nl') + '])'
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
)
tokens = {}
token_variants = True
for levelname, cs_ident in list(levels.items()):
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Punctuation)),
(r'^\s*\[.*?\]', Name.Attribute),
(r'[^\S\n]+', Text),
(r'\\\n', Text), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Text),
(r'\$\s*"', String, 'splice-string'),
(r'\$\s*<#', String, 'splice-string2'),
(r'<#', String, 'recursive-string'),
(r'(<\[)\s*(' + cs_ident + ':)?', Keyword),
(r'\]\>', Keyword),
# quasiquotation only
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFLdD]?", Number),
(r'#[ \t]*(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b.*?\n',
Comment.Preproc),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
Keyword)),
(r'(abstract|and|as|base|catch|def|delegate|'
r'enum|event|extern|false|finally|'
r'fun|implements|interface|internal|'
r'is|macro|match|matches|module|mutable|new|'
r'null|out|override|params|partial|private|'
r'protected|public|ref|sealed|static|'
r'syntax|this|throw|true|try|type|typeof|'
r'virtual|volatile|when|where|with|'
r'assert|assert2|async|break|checked|continue|do|else|'
r'ensures|for|foreach|if|late|lock|new|nolate|'
r'otherwise|regexp|repeat|requires|return|surroundwith|'
r'unchecked|unless|using|while|yield)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|float|int|long|object|sbyte|'
r'short|string|uint|ulong|ushort|void|array|list)\b\??',
Keyword.Type),
(r'(:>?)\s*(' + cs_ident + r'\??)',
bygroups(Punctuation, Keyword.Type)),
(r'(class|struct|variant|module)(\s+)',
bygroups(Keyword, Text), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Text),
'namespace'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop')
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
],
'splice-string': [
(r'[^"$]', String),
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'\\"', String),
(r'"', String, '#pop')
],
'splice-string2': [
(r'[^#<>$]', String),
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'<#', String, '#push'),
(r'#>', String, '#pop')
],
'recursive-string': [
(r'[^#<>]', String),
(r'<#', String, '#push'),
(r'#>', String, '#pop')
],
'splice-string-content': [
(r'if|match', Keyword),
(r'[~!%^&*+=|\[\]:;,.<>/?-\\"$ ]', Punctuation),
(cs_ident, Name),
(r'\d+', Number),
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop')
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens.keys()),
'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class BooLexer(RegexLexer):
"""
For `Boo <http://boo.codehaus.org/>`_ source code.
"""
name = 'Boo'
aliases = ['boo']
filenames = ['*.boo']
mimetypes = ['text/x-boo']
tokens = {
'root': [
(r'\s+', Text),
(r'(#|//).*$', Comment.Single),
(r'/[*]', Comment.Multiline, 'comment'),
(r'[]{}:(),.;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'/(\\\\|\\/|[^/\s])/', String.Regex),
(r'@/(\\\\|\\/|[^/])*/', String.Regex),
(r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator),
(r'(as|abstract|callable|constructor|destructor|do|import|'
r'enum|event|final|get|interface|internal|of|override|'
r'partial|private|protected|public|return|set|static|'
r'struct|transient|virtual|yield|super|and|break|cast|'
r'continue|elif|else|ensure|except|for|given|goto|if|in|'
r'is|isa|not|or|otherwise|pass|raise|ref|try|unless|when|'
r'while|from|as)\b', Keyword),
(r'def(?=\s+\(.*?\))', Keyword),
(r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(namespace)(\s+)', bygroups(Keyword, Text), 'namespace'),
(r'(?<!\.)(true|false|null|self|__eval__|__switch__|array|'
r'assert|checked|enumerate|filter|getter|len|lock|map|'
r'matrix|max|min|normalArrayIndexing|print|property|range|'
r'rawArrayIndexing|required|typeof|unchecked|using|'
r'yieldAll|zip)\b', Name.Builtin),
(r'"""(\\\\|\\"|.*?)"""', String.Double),
(r'"(\\\\|\\"|[^"]*?)"', String.Double),
(r"'(\\\\|\\'|[^']*?)'", String.Single),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float),
(r'[0-9][0-9\.]*(ms?|d|h|s)', Number),
(r'0\d+', Number.Oct),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer),
],
'comment': [
('/[*]', Comment.Multiline, '#push'),
('[*]/', Comment.Multiline, '#pop'),
('[^/*]', Comment.Multiline),
('[*/]', Comment.Multiline)
],
'funcname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')
],
'classname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'namespace': [
('[a-zA-Z_][a-zA-Z0-9_.]*', Name.Namespace, '#pop')
]
}
class VbNetLexer(RegexLexer):
"""
For
`Visual Basic.NET <http://msdn2.microsoft.com/en-us/vbasic/default.aspx>`_
source code.
"""
name = 'VB.net'
aliases = ['vb.net', 'vbnet']
filenames = ['*.vb', '*.bas']
mimetypes = ['text/x-vbnet', 'text/x-vba'] # (?)
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^\s*<.*?>', Name.Attribute),
(r'\s+', Text),
(r'\n', Text),
(r'rem\b.*?\n', Comment),
(r"'.*?\n", Comment),
(r'#If\s.*?\sThen|#ElseIf\s.*?\sThen|#End\s+If|#Const|'
r'#ExternalSource.*?\n|#End\s+ExternalSource|'
r'#Region.*?\n|#End\s+Region|#ExternalChecksum',
Comment.Preproc),
(r'[\(\){}!#,.:]', Punctuation),
(r'Option\s+(Strict|Explicit|Compare)\s+'
r'(On|Off|Binary|Text)', Keyword.Declaration),
(r'(?<!\.)(AddHandler|Alias|'
r'ByRef|ByVal|Call|Case|Catch|CBool|CByte|CChar|CDate|'
r'CDec|CDbl|CInt|CLng|CObj|Continue|CSByte|CShort|'
r'CSng|CStr|CType|CUInt|CULng|CUShort|Declare|'
r'Default|Delegate|DirectCast|Do|Each|Else|ElseIf|'
r'EndIf|Erase|Error|Event|Exit|False|Finally|For|'
r'Friend|Get|Global|GoSub|GoTo|Handles|If|'
r'Implements|Inherits|Interface|'
r'Let|Lib|Loop|Me|MustInherit|'
r'MustOverride|MyBase|MyClass|Narrowing|New|Next|'
r'Not|Nothing|NotInheritable|NotOverridable|Of|On|'
r'Operator|Option|Optional|Overloads|Overridable|'
r'Overrides|ParamArray|Partial|Private|Protected|'
r'Public|RaiseEvent|ReadOnly|ReDim|RemoveHandler|Resume|'
r'Return|Select|Set|Shadows|Shared|Single|'
r'Static|Step|Stop|SyncLock|Then|'
r'Throw|To|True|Try|TryCast|Wend|'
r'Using|When|While|Widening|With|WithEvents|'
r'WriteOnly)\b', Keyword),
(r'(?<!\.)End\b', Keyword, 'end'),
(r'(?<!\.)(Dim|Const)\b', Keyword, 'dim'),
(r'(?<!\.)(Function|Sub|Property)(\s+)',
bygroups(Keyword, Text), 'funcname'),
(r'(?<!\.)(Class|Structure|Enum)(\s+)',
bygroups(Keyword, Text), 'classname'),
(r'(?<!\.)(Module|Namespace|Imports)(\s+)',
bygroups(Keyword, Text), 'namespace'),
(r'(?<!\.)(Boolean|Byte|Char|Date|Decimal|Double|Integer|Long|'
r'Object|SByte|Short|Single|String|Variant|UInteger|ULong|'
r'UShort)\b', Keyword.Type),
(r'(?<!\.)(AddressOf|And|AndAlso|As|GetType|In|Is|IsNot|Like|Mod|'
r'Or|OrElse|TypeOf|Xor)\b', Operator.Word),
(r'&=|[*]=|/=|\\=|\^=|\+=|-=|<<=|>>=|<<|>>|:=|'
r'<=|>=|<>|[-&*/\\^+=<>]',
Operator),
('"', String, 'string'),
('[a-zA-Z_][a-zA-Z0-9_]*[%&@!#$]?', Name),
('#.*?#', Literal.Date),
(r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float),
(r'\d+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&H[0-9a-f]+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&O[0-7]+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'_\n', Text), # Line continuation
],
'string': [
(r'""', String),
(r'"C?', String, '#pop'),
(r'[^"]+', String),
],
'dim': [
(r'[a-z_][a-z0-9_]*', Name.Variable, '#pop'),
(r'', Text, '#pop'), # any other syntax
],
'funcname': [
(r'[a-z_][a-z0-9_]*', Name.Function, '#pop'),
],
'classname': [
(r'[a-z_][a-z0-9_]*', Name.Class, '#pop'),
],
'namespace': [
(r'[a-z_][a-z0-9_.]*', Name.Namespace, '#pop'),
],
'end': [
(r'\s+', Text),
(r'(Function|Sub|Property|Class|Structure|Enum|Module|Namespace)\b',
Keyword, '#pop'),
(r'', Text, '#pop'),
]
}
class GenericAspxLexer(RegexLexer):
"""
Lexer for ASP.NET pages.
"""
name = 'aspx-gen'
filenames = []
mimetypes = []
flags = re.DOTALL
tokens = {
'root': [
(r'(<%[@=#]?)(.*?)(%>)', bygroups(Name.Tag, Other, Name.Tag)),
(r'(<script.*?>)(.*?)(</script>)', bygroups(using(XmlLexer),
Other,
using(XmlLexer))),
(r'(.+?)(?=<)', using(XmlLexer)),
(r'.+', using(XmlLexer)),
],
}
#TODO support multiple languages within the same source file
class CSharpAspxLexer(DelegatingLexer):
"""
Lexer for highligting C# within ASP.NET pages.
"""
name = 'aspx-cs'
aliases = ['aspx-cs']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super(CSharpAspxLexer, self).__init__(CSharpLexer,GenericAspxLexer,
**options)
def analyse_text(text):
if re.search(r'Page\s*Language="C#"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']C#', text, re.I) is not None:
return 0.15
class VbNetAspxLexer(DelegatingLexer):
"""
Lexer for highligting Visual Basic.net within ASP.NET pages.
"""
name = 'aspx-vb'
aliases = ['aspx-vb']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super(VbNetAspxLexer, self).__init__(VbNetLexer,GenericAspxLexer,
**options)
def analyse_text(text):
if re.search(r'Page\s*Language="Vb"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']vb', text, re.I) is not None:
return 0.15
# Very close to functional.OcamlLexer
class FSharpLexer(RegexLexer):
"""
For the F# language (version 3.0).
*New in Pygments 1.5.*
"""
name = 'FSharp'
aliases = ['fsharp']
filenames = ['*.fs', '*.fsi']
mimetypes = ['text/x-fsharp']
keywords = [
'abstract', 'as', 'assert', 'base', 'begin', 'class', 'default',
'delegate', 'do!', 'do', 'done', 'downcast', 'downto', 'elif', 'else',
'end', 'exception', 'extern', 'false', 'finally', 'for', 'function',
'fun', 'global', 'if', 'inherit', 'inline', 'interface', 'internal',
'in', 'lazy', 'let!', 'let', 'match', 'member', 'module', 'mutable',
'namespace', 'new', 'null', 'of', 'open', 'override', 'private', 'public',
'rec', 'return!', 'return', 'select', 'static', 'struct', 'then', 'to',
'true', 'try', 'type', 'upcast', 'use!', 'use', 'val', 'void', 'when',
'while', 'with', 'yield!', 'yield',
]
# Reserved words; cannot hurt to color them as keywords too.
keywords += [
'atomic', 'break', 'checked', 'component', 'const', 'constraint',
'constructor', 'continue', 'eager', 'event', 'external', 'fixed',
'functor', 'include', 'method', 'mixin', 'object', 'parallel',
'process', 'protected', 'pure', 'sealed', 'tailcall', 'trait',
'virtual', 'volatile',
]
keyopts = [
'!=', '#', '&&', '&', '\(', '\)', '\*', '\+', ',', '-\.',
'->', '-', '\.\.', '\.', '::', ':=', ':>', ':', ';;', ';', '<-',
'<\]', '<', '>\]', '>', '\?\?', '\?', '\[<', '\[\|', '\[', '\]',
'_', '`', '{', '\|\]', '\|', '}', '~', '<@@', '<@', '=', '@>', '@@>',
]
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ['and', 'or', 'not']
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = [
'sbyte', 'byte', 'char', 'nativeint', 'unativeint', 'float32', 'single',
'float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32',
'uint32', 'int64', 'uint64', 'decimal', 'unit', 'bool', 'string',
'list', 'exn', 'obj', 'enum',
]
# See http://msdn.microsoft.com/en-us/library/dd233181.aspx and/or
# http://fsharp.org/about/files/spec.pdf for reference. Good luck.
tokens = {
'escape-sequence': [
(r'\\[\\\"\'ntbrafv]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\U[0-9a-fA-F]{8}', String.Escape),
],
'root': [
(r'\s+', Text),
(r'\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b(?<!\.)([A-Z][A-Za-z0-9_\']*)(?=\s*\.)',
Name.Namespace, 'dotted'),
(r'\b([A-Z][A-Za-z0-9_\']*)', Name),
(r'///.*?\n', String.Doc),
(r'//.*?\n', Comment.Single),
(r'\(\*(?!\))', Comment, 'comment'),
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'\b(open|module)(\s+)([a-zA-Z0-9_.]+)',
bygroups(Keyword, Text, Name.Namespace)),
(r'\b(let!?)(\s+)([a-zA-Z0-9_]+)',
bygroups(Keyword, Text, Name.Variable)),
(r'\b(type)(\s+)([a-zA-Z0-9_]+)',
bygroups(Keyword, Text, Name.Class)),
(r'\b(member|override)(\s+)([a-zA-Z0-9_]+)(\.)([a-zA-Z0-9_]+)',
bygroups(Keyword, Text, Name, Punctuation, Name.Function)),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'(%s)' % '|'.join(keyopts), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r'#[ \t]*(if|endif|else|line|nowarn|light|\d+)\b.*?\n',
Comment.Preproc),
(r"[^\W\d][\w']*", Name),
(r'\d[\d_]*[uU]?[yslLnQRZINGmM]?', Number.Integer),
(r'0[xX][\da-fA-F][\da-fA-F_]*[uU]?[yslLn]?[fF]?', Number.Hex),
(r'0[oO][0-7][0-7_]*[uU]?[yslLn]?', Number.Oct),
(r'0[bB][01][01_]*[uU]?[yslLn]?', Number.Binary),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)[fFmM]?',
Number.Float),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'B?",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][A-Za-z0-9_\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][A-Za-z0-9_\']*', Name, '#pop'),
(r'[a-z_][A-Za-z0-9_\']*', Name, '#pop'),
],
'comment': [
(r'[^(*)@"]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
# comments cannot be closed within strings in comments
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'[(*)@]', Comment),
],
'string': [
(r'[^\\"]+', String),
include('escape-sequence'),
(r'\\\n', String),
(r'\n', String), # newlines are allowed in any string
(r'"B?', String, '#pop'),
],
'lstring': [
(r'[^"]+', String),
(r'\n', String),
(r'""', String),
(r'"B?', String, '#pop'),
],
'tqs': [
(r'[^"]+', String),
(r'\n', String),
(r'"""B?', String, '#pop'),
(r'"', String),
],
}
| mit |
runjmc/maraschino | lib/werkzeug/testsuite/wsgi.py | 11 | 8402 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.wsgi
~~~~~~~~~~~~~~~~~~~~~~~
Tests the WSGI utilities.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import unittest
from os import path
from cStringIO import StringIO
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.wrappers import BaseResponse
from werkzeug.exceptions import BadRequest, ClientDisconnected
from werkzeug.test import Client, create_environ, run_wsgi_app
from werkzeug import wsgi
class WSGIUtilsTestCase(WerkzeugTestCase):
def test_shareddatamiddleware_get_file_loader(self):
app = wsgi.SharedDataMiddleware(None, {})
assert callable(app.get_file_loader('foo'))
def test_shared_data_middleware(self):
def null_application(environ, start_response):
start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])
yield 'NOT FOUND'
app = wsgi.SharedDataMiddleware(null_application, {
'/': path.join(path.dirname(__file__), 'res'),
'/sources': path.join(path.dirname(__file__), 'res'),
'/pkg': ('werkzeug.debug', 'shared')
})
for p in '/test.txt', '/sources/test.txt':
app_iter, status, headers = run_wsgi_app(app, create_environ(p))
assert status == '200 OK'
assert ''.join(app_iter).strip() == 'FOUND'
app_iter, status, headers = run_wsgi_app(app, create_environ('/pkg/debugger.js'))
contents = ''.join(app_iter)
assert '$(function() {' in contents
app_iter, status, headers = run_wsgi_app(app, create_environ('/missing'))
assert status == '404 NOT FOUND'
assert ''.join(app_iter).strip() == 'NOT FOUND'
def test_get_host(self):
env = {'HTTP_X_FORWARDED_HOST': 'example.org',
'SERVER_NAME': 'bullshit', 'HOST_NAME': 'ignore me dammit'}
assert wsgi.get_host(env) == 'example.org'
assert wsgi.get_host(create_environ('/', 'http://example.org')) \
== 'example.org'
def test_responder(self):
def foo(environ, start_response):
return BaseResponse('Test')
client = Client(wsgi.responder(foo), BaseResponse)
response = client.get('/')
assert response.status_code == 200
assert response.data == 'Test'
def test_pop_path_info(self):
original_env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b///c'}
# regular path info popping
def assert_tuple(script_name, path_info):
assert env.get('SCRIPT_NAME') == script_name
assert env.get('PATH_INFO') == path_info
env = original_env.copy()
pop = lambda: wsgi.pop_path_info(env)
assert_tuple('/foo', '/a/b///c')
assert pop() == 'a'
assert_tuple('/foo/a', '/b///c')
assert pop() == 'b'
assert_tuple('/foo/a/b', '///c')
assert pop() == 'c'
assert_tuple('/foo/a/b///c', '')
assert pop() is None
def test_peek_path_info(self):
env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/aaa/b///c'}
assert wsgi.peek_path_info(env) == 'aaa'
assert wsgi.peek_path_info(env) == 'aaa'
def test_limited_stream(self):
class RaisingLimitedStream(wsgi.LimitedStream):
def on_exhausted(self):
raise BadRequest('input stream exhausted')
io = StringIO('123456')
stream = RaisingLimitedStream(io, 3)
assert stream.read() == '123'
self.assert_raises(BadRequest, stream.read)
io = StringIO('123456')
stream = RaisingLimitedStream(io, 3)
assert stream.read(1) == '1'
assert stream.read(1) == '2'
assert stream.read(1) == '3'
self.assert_raises(BadRequest, stream.read)
io = StringIO('123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
assert stream.readline() == '123456\n'
assert stream.readline() == 'ab'
io = StringIO('123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
assert stream.readlines() == ['123456\n', 'ab']
io = StringIO('123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
assert stream.readlines(2) == ['12']
assert stream.readlines(2) == ['34']
assert stream.readlines() == ['56\n', 'ab']
io = StringIO('123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
assert stream.readline(100) == '123456\n'
io = StringIO('123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
assert stream.readlines(100) == ['123456\n', 'ab']
io = StringIO('123456')
stream = wsgi.LimitedStream(io, 3)
assert stream.read(1) == '1'
assert stream.read(1) == '2'
assert stream.read() == '3'
assert stream.read() == ''
io = StringIO('123456')
stream = wsgi.LimitedStream(io, 3)
assert stream.read(-1) == '123'
def test_limited_stream_disconnection(self):
io = StringIO('A bit of content')
# disconnect detection on out of bytes
stream = wsgi.LimitedStream(io, 255)
with self.assert_raises(ClientDisconnected):
stream.read()
# disconnect detection because file close
io = StringIO('x' * 255)
io.close()
stream = wsgi.LimitedStream(io, 255)
with self.assert_raises(ClientDisconnected):
stream.read()
def test_path_info_extraction(self):
x = wsgi.extract_path_info('http://example.com/app', '/app/hello')
assert x == u'/hello'
x = wsgi.extract_path_info('http://example.com/app',
'https://example.com/app/hello')
assert x == u'/hello'
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/app/hello')
assert x == u'/hello'
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/app')
assert x == u'/'
x = wsgi.extract_path_info(u'http://☃.net/', u'/fööbär')
assert x == u'/fööbär'
x = wsgi.extract_path_info(u'http://☃.net/x', u'http://☃.net/x/fööbär')
assert x == u'/fööbär'
env = create_environ(u'/fööbär', u'http://☃.net/x/')
x = wsgi.extract_path_info(env, u'http://☃.net/x/fööbär')
assert x == u'/fööbär'
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/a/hello')
assert x is None
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/app/hello',
collapse_http_schemes=False)
assert x is None
def test_get_host_fallback(self):
assert wsgi.get_host({
'SERVER_NAME': 'foobar.example.com',
'wsgi.url_scheme': 'http',
'SERVER_PORT': '80'
}) == 'foobar.example.com'
assert wsgi.get_host({
'SERVER_NAME': 'foobar.example.com',
'wsgi.url_scheme': 'http',
'SERVER_PORT': '81'
}) == 'foobar.example.com:81'
def test_multi_part_line_breaks(self):
data = 'abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK'
test_stream = StringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=16))
assert lines == ['abcdef\r\n', 'ghijkl\r\n', 'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK']
data = 'abc\r\nThis line is broken by the buffer length.\r\nFoo bar baz'
test_stream = StringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=24))
assert lines == ['abc\r\n', 'This line is broken by the buffer length.\r\n', 'Foo bar baz']
def test_multi_part_line_breaks_problematic(self):
data = 'abc\rdef\r\nghi'
for x in xrange(1, 10):
test_stream = StringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=4))
assert lines == ['abc\r', 'def\r\n', 'ghi']
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(WSGIUtilsTestCase))
return suite
| mit |
Maspear/odoo | addons/product_email_template/models/invoice.py | 321 | 1969 | # -*- coding: utf-8 -*-
from openerp.osv import osv
class account_invoice(osv.Model):
_inherit = 'account.invoice'
def invoice_validate_send_email(self, cr, uid, ids, context=None):
Composer = self.pool['mail.compose.message']
for invoice in self.browse(cr, uid, ids, context=context):
# send template only on customer invoice
if invoice.type != 'out_invoice':
continue
# subscribe the partner to the invoice
if invoice.partner_id not in invoice.message_follower_ids:
self.message_subscribe(cr, uid, [invoice.id], [invoice.partner_id.id], context=context)
for line in invoice.invoice_line:
if line.product_id.email_template_id:
# CLEANME: should define and use a clean API: message_post with a template
composer_id = Composer.create(cr, uid, {
'model': 'account.invoice',
'res_id': invoice.id,
'template_id': line.product_id.email_template_id.id,
'composition_mode': 'comment',
}, context=context)
template_values = Composer.onchange_template_id(
cr, uid, composer_id, line.product_id.email_template_id.id, 'comment', 'account.invoice', invoice.id
)['value']
template_values['attachment_ids'] = [(4, id) for id in template_values.get('attachment_ids', [])]
Composer.write(cr, uid, [composer_id], template_values, context=context)
Composer.send_mail(cr, uid, [composer_id], context=context)
return True
def invoice_validate(self, cr, uid, ids, context=None):
res = super(account_invoice, self).invoice_validate(cr, uid, ids, context=context)
self.invoice_validate_send_email(cr, uid, ids, context=context)
return res
| agpl-3.0 |
messense/wechatpy | tests/test_create_reply.py | 1 | 4694 | # -*- coding: utf-8 -*-
import unittest
from wechatpy.replies import TextReply, create_reply
class CreateReplyTestCase(unittest.TestCase):
def test_create_reply_with_text_not_render(self):
text = 'test'
reply = create_reply(text, render=False)
self.assertEqual('text', reply.type)
self.assertEqual(text, reply.content)
reply.render()
def test_create_reply_with_text_render(self):
text = 'test'
reply = create_reply(text, render=True)
self.assertTrue(isinstance(reply, str))
def test_create_reply_with_message(self):
from wechatpy.messages import TextMessage
msg = TextMessage({
'FromUserName': 'user1',
'ToUserName': 'user2',
})
reply = create_reply('test', msg, render=False)
self.assertEqual('user1', reply.target)
self.assertEqual('user2', reply.source)
reply.render()
def test_create_reply_with_reply(self):
_reply = TextReply(content='test')
reply = create_reply(_reply, render=False)
self.assertEqual(_reply, reply)
reply.render()
def test_create_reply_with_articles(self):
articles = [
{
'title': 'test 1',
'description': 'test 1',
'image': 'http://www.qq.com/1.png',
'url': 'http://www.qq.com/1'
},
{
'title': 'test 2',
'description': 'test 2',
'image': 'http://www.qq.com/2.png',
'url': 'http://www.qq.com/2'
},
{
'title': 'test 3',
'description': 'test 3',
'image': 'http://www.qq.com/3.png',
'url': 'http://www.qq.com/3'
},
]
reply = create_reply(articles, render=False)
self.assertEqual('news', reply.type)
reply.render()
def test_create_reply_with_more_than_ten_articles(self):
articles = [
{
'title': 'test 1',
'description': 'test 1',
'image': 'http://www.qq.com/1.png',
'url': 'http://www.qq.com/1'
},
{
'title': 'test 2',
'description': 'test 2',
'image': 'http://www.qq.com/2.png',
'url': 'http://www.qq.com/2'
},
{
'title': 'test 3',
'description': 'test 3',
'image': 'http://www.qq.com/3.png',
'url': 'http://www.qq.com/3'
},
{
'title': 'test 4',
'description': 'test 4',
'image': 'http://www.qq.com/4.png',
'url': 'http://www.qq.com/4'
},
{
'title': 'test 5',
'description': 'test 5',
'image': 'http://www.qq.com/5.png',
'url': 'http://www.qq.com/5'
},
{
'title': 'test 6',
'description': 'test 6',
'image': 'http://www.qq.com/6.png',
'url': 'http://www.qq.com/6'
},
{
'title': 'test 7',
'description': 'test 7',
'image': 'http://www.qq.com/7.png',
'url': 'http://www.qq.com/7'
},
{
'title': 'test 8',
'description': 'test 8',
'image': 'http://www.qq.com/8.png',
'url': 'http://www.qq.com/8'
},
{
'title': 'test 9',
'description': 'test 9',
'image': 'http://www.qq.com/9.png',
'url': 'http://www.qq.com/9'
},
{
'title': 'test 10',
'description': 'test 10',
'image': 'http://www.qq.com/10.png',
'url': 'http://www.qq.com/10'
},
{
'title': 'test 11',
'description': 'test 11',
'image': 'http://www.qq.com/11.png',
'url': 'http://www.qq.com/11'
},
]
self.assertRaises(AttributeError, create_reply, articles)
def test_create_empty_reply(self):
from wechatpy.replies import EmptyReply
reply = create_reply('')
self.assertTrue(isinstance(reply, EmptyReply))
reply = create_reply(None)
self.assertTrue(isinstance(reply, EmptyReply))
reply = create_reply(False)
self.assertTrue(isinstance(reply, EmptyReply))
| mit |
sachintyagi22/spark | python/pyspark/sql/session.py | 7 | 25220 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
else:
from itertools import imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.catalog import Catalog
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, _verify_type, \
_infer_schema, _has_nulltype, _merge_type, _create_converter, _parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
# This SparkContext may be an existing one.
for key, value in self._options.items():
# we need to propagate the confs
# before we create the SparkSession. Otherwise, confs like
# warehouse path and metastore url will not be set correctly (
# these confs cannot be changed once the SparkSession is created).
sc._conf.set(key, value)
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
for key, value in self._options.items():
session.sparkContext._conf.set(key, value)
return session
builder = Builder()
_instantiatedSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
"""
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.context import UDFRegistration
return UDFRegistration(self._wrapped)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, map(_infer_schema, data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(_infer_schema).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
if schema is None:
schema = [str(x) for x in data.columns]
data = [r.tolist() for r in data.to_records(index=False)]
verify_func = _verify_type if verifySchema else lambda _, t: True
if isinstance(schema, StructType):
def prepare(obj):
verify_func(obj, schema)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
def prepare(obj):
verify_func(obj, dataType)
return obj,
else:
if isinstance(schema, list):
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
SparkSession._instantiatedSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
JamesGuthrie/libcloud | libcloud/dns/drivers/route53.py | 14 | 20413 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'Route53DNSDriver'
]
import base64
import hmac
import datetime
import uuid
import copy
from libcloud.utils.py3 import httplib
from hashlib import sha1
try:
from lxml import etree as ET
except ImportError:
from xml.etree import ElementTree as ET
from libcloud.utils.py3 import b, urlencode
from libcloud.utils.xml import findtext, findall, fixxpath
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
from libcloud.dns.base import DNSDriver, Zone, Record
from libcloud.common.types import LibcloudError
from libcloud.common.aws import AWSGenericResponse
from libcloud.common.base import ConnectionUserAndKey
API_VERSION = '2012-02-29'
API_HOST = 'route53.amazonaws.com'
API_ROOT = '/%s/' % (API_VERSION)
NAMESPACE = 'https://%s/doc%s' % (API_HOST, API_ROOT)
class InvalidChangeBatch(LibcloudError):
pass
class Route53DNSResponse(AWSGenericResponse):
"""
Amazon Route53 response class.
"""
namespace = NAMESPACE
xpath = 'Error'
exceptions = {
'NoSuchHostedZone': ZoneDoesNotExistError,
'InvalidChangeBatch': InvalidChangeBatch,
}
class Route53Connection(ConnectionUserAndKey):
host = API_HOST
responseCls = Route53DNSResponse
def pre_connect_hook(self, params, headers):
time_string = datetime.datetime.utcnow() \
.strftime('%a, %d %b %Y %H:%M:%S GMT')
headers['Date'] = time_string
tmp = []
signature = self._get_aws_auth_b64(self.key, time_string)
auth = {'AWSAccessKeyId': self.user_id, 'Signature': signature,
'Algorithm': 'HmacSHA1'}
for k, v in auth.items():
tmp.append('%s=%s' % (k, v))
headers['X-Amzn-Authorization'] = 'AWS3-HTTPS ' + ','.join(tmp)
return params, headers
def _get_aws_auth_b64(self, secret_key, time_string):
b64_hmac = base64.b64encode(
hmac.new(b(secret_key), b(time_string), digestmod=sha1).digest()
)
return b64_hmac.decode('utf-8')
class Route53DNSDriver(DNSDriver):
type = Provider.ROUTE53
name = 'Route53 DNS'
website = 'http://aws.amazon.com/route53/'
connectionCls = Route53Connection
RECORD_TYPE_MAP = {
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.CNAME: 'CNAME',
RecordType.MX: 'MX',
RecordType.NS: 'NS',
RecordType.PTR: 'PTR',
RecordType.SOA: 'SOA',
RecordType.SPF: 'SPF',
RecordType.SRV: 'SRV',
RecordType.TXT: 'TXT',
}
def iterate_zones(self):
return self._get_more('zones')
def iterate_records(self, zone):
return self._get_more('records', zone=zone)
def get_zone(self, zone_id):
self.connection.set_context({'zone_id': zone_id})
uri = API_ROOT + 'hostedzone/' + zone_id
data = self.connection.request(uri).object
elem = findall(element=data, xpath='HostedZone',
namespace=NAMESPACE)[0]
return self._to_zone(elem)
def get_record(self, zone_id, record_id):
zone = self.get_zone(zone_id=zone_id)
record_type, name = record_id.split(':', 1)
if name:
full_name = ".".join((name, zone.domain))
else:
full_name = zone.domain
self.connection.set_context({'zone_id': zone_id})
params = urlencode({
'name': full_name,
'type': record_type,
'maxitems': '1'
})
uri = API_ROOT + 'hostedzone/' + zone_id + '/rrset?' + params
data = self.connection.request(uri).object
record = self._to_records(data=data, zone=zone)[0]
# A cute aspect of the /rrset filters is that they are more pagination
# hints than filters!!
# So will return a result even if its not what you asked for.
record_type_num = self._string_to_record_type(record_type)
if record.name != name or record.type != record_type_num:
raise RecordDoesNotExistError(value='', driver=self,
record_id=record_id)
return record
def create_zone(self, domain, type='master', ttl=None, extra=None):
zone = ET.Element('CreateHostedZoneRequest', {'xmlns': NAMESPACE})
ET.SubElement(zone, 'Name').text = domain
ET.SubElement(zone, 'CallerReference').text = str(uuid.uuid4())
if extra and 'Comment' in extra:
hzg = ET.SubElement(zone, 'HostedZoneConfig')
ET.SubElement(hzg, 'Comment').text = extra['Comment']
uri = API_ROOT + 'hostedzone'
data = ET.tostring(zone)
rsp = self.connection.request(uri, method='POST', data=data).object
elem = findall(element=rsp, xpath='HostedZone', namespace=NAMESPACE)[0]
return self._to_zone(elem=elem)
def delete_zone(self, zone, ex_delete_records=False):
self.connection.set_context({'zone_id': zone.id})
if ex_delete_records:
self.ex_delete_all_records(zone=zone)
uri = API_ROOT + 'hostedzone/%s' % (zone.id)
response = self.connection.request(uri, method='DELETE')
return response.status in [httplib.OK]
def create_record(self, name, zone, type, data, extra=None):
extra = extra or {}
batch = [('CREATE', name, type, data, extra)]
self._post_changeset(zone, batch)
id = ':'.join((self.RECORD_TYPE_MAP[type], name))
return Record(id=id, name=name, type=type, data=data, zone=zone,
driver=self, extra=extra)
def update_record(self, record, name=None, type=None, data=None,
extra=None):
name = name or record.name
type = type or record.type
extra = extra or record.extra
if not extra:
extra = record.extra
# Multiple value records need to be handled specially - we need to
# pass values for other records as well
multiple_value_record = record.extra.get('_multi_value', False)
other_records = record.extra.get('_other_records', [])
if multiple_value_record and other_records:
self._update_multi_value_record(record=record, name=name,
type=type, data=data,
extra=extra)
else:
self._update_single_value_record(record=record, name=name,
type=type, data=data,
extra=extra)
id = ':'.join((self.RECORD_TYPE_MAP[type], name))
return Record(id=id, name=name, type=type, data=data, zone=record.zone,
driver=self, extra=extra)
def delete_record(self, record):
try:
r = record
batch = [('DELETE', r.name, r.type, r.data, r.extra)]
self._post_changeset(record.zone, batch)
except InvalidChangeBatch:
raise RecordDoesNotExistError(value='', driver=self,
record_id=r.id)
return True
def ex_create_multi_value_record(self, name, zone, type, data, extra=None):
"""
Create a record with multiple values with a single call.
:return: A list of created records.
:rtype: ``list`` of :class:`libcloud.dns.base.Record`
"""
extra = extra or {}
attrs = {'xmlns': NAMESPACE}
changeset = ET.Element('ChangeResourceRecordSetsRequest', attrs)
batch = ET.SubElement(changeset, 'ChangeBatch')
changes = ET.SubElement(batch, 'Changes')
change = ET.SubElement(changes, 'Change')
ET.SubElement(change, 'Action').text = 'CREATE'
rrs = ET.SubElement(change, 'ResourceRecordSet')
ET.SubElement(rrs, 'Name').text = name + '.' + zone.domain
ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[type]
ET.SubElement(rrs, 'TTL').text = str(extra.get('ttl', '0'))
rrecs = ET.SubElement(rrs, 'ResourceRecords')
# Value is provided as a multi line string
values = [value.strip() for value in data.split('\n') if
value.strip()]
for value in values:
rrec = ET.SubElement(rrecs, 'ResourceRecord')
ET.SubElement(rrec, 'Value').text = value
uri = API_ROOT + 'hostedzone/' + zone.id + '/rrset'
data = ET.tostring(changeset)
self.connection.set_context({'zone_id': zone.id})
self.connection.request(uri, method='POST', data=data)
id = ':'.join((self.RECORD_TYPE_MAP[type], name))
records = []
for value in values:
record = Record(id=id, name=name, type=type, data=value, zone=zone,
driver=self, extra=extra)
records.append(record)
return records
def ex_delete_all_records(self, zone):
"""
Remove all the records for the provided zone.
:param zone: Zone to delete records for.
:type zone: :class:`Zone`
"""
deletions = []
for r in zone.list_records():
if r.type in (RecordType.NS, RecordType.SOA):
continue
deletions.append(('DELETE', r.name, r.type, r.data, r.extra))
if deletions:
self._post_changeset(zone, deletions)
def _update_single_value_record(self, record, name=None, type=None,
data=None, extra=None):
batch = [
('DELETE', record.name, record.type, record.data, record.extra),
('CREATE', name, type, data, extra)
]
return self._post_changeset(record.zone, batch)
def _update_multi_value_record(self, record, name=None, type=None,
data=None, extra=None):
other_records = record.extra.get('_other_records', [])
attrs = {'xmlns': NAMESPACE}
changeset = ET.Element('ChangeResourceRecordSetsRequest', attrs)
batch = ET.SubElement(changeset, 'ChangeBatch')
changes = ET.SubElement(batch, 'Changes')
# Delete existing records
change = ET.SubElement(changes, 'Change')
ET.SubElement(change, 'Action').text = 'DELETE'
rrs = ET.SubElement(change, 'ResourceRecordSet')
if record.name:
record_name = record.name + '.' + record.zone.domain
else:
record_name = record.zone.domain
ET.SubElement(rrs, 'Name').text = record_name
ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[record.type]
ET.SubElement(rrs, 'TTL').text = str(record.extra.get('ttl', '0'))
rrecs = ET.SubElement(rrs, 'ResourceRecords')
rrec = ET.SubElement(rrecs, 'ResourceRecord')
ET.SubElement(rrec, 'Value').text = record.data
for other_record in other_records:
rrec = ET.SubElement(rrecs, 'ResourceRecord')
ET.SubElement(rrec, 'Value').text = other_record['data']
# Re-create new (updated) records. Since we are updating a multi value
# record, only a single record is updated and others are left as is.
change = ET.SubElement(changes, 'Change')
ET.SubElement(change, 'Action').text = 'CREATE'
rrs = ET.SubElement(change, 'ResourceRecordSet')
if name:
record_name = name + '.' + record.zone.domain
else:
record_name = record.zone.domain
ET.SubElement(rrs, 'Name').text = record_name
ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[type]
ET.SubElement(rrs, 'TTL').text = str(extra.get('ttl', '0'))
rrecs = ET.SubElement(rrs, 'ResourceRecords')
rrec = ET.SubElement(rrecs, 'ResourceRecord')
ET.SubElement(rrec, 'Value').text = data
for other_record in other_records:
rrec = ET.SubElement(rrecs, 'ResourceRecord')
ET.SubElement(rrec, 'Value').text = other_record['data']
uri = API_ROOT + 'hostedzone/' + record.zone.id + '/rrset'
data = ET.tostring(changeset)
self.connection.set_context({'zone_id': record.zone.id})
response = self.connection.request(uri, method='POST', data=data)
return response.status == httplib.OK
def _post_changeset(self, zone, changes_list):
attrs = {'xmlns': NAMESPACE}
changeset = ET.Element('ChangeResourceRecordSetsRequest', attrs)
batch = ET.SubElement(changeset, 'ChangeBatch')
changes = ET.SubElement(batch, 'Changes')
for action, name, type_, data, extra in changes_list:
change = ET.SubElement(changes, 'Change')
ET.SubElement(change, 'Action').text = action
rrs = ET.SubElement(change, 'ResourceRecordSet')
if name:
record_name = name + '.' + zone.domain
else:
record_name = zone.domain
ET.SubElement(rrs, 'Name').text = record_name
ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[type_]
ET.SubElement(rrs, 'TTL').text = str(extra.get('ttl', '0'))
rrecs = ET.SubElement(rrs, 'ResourceRecords')
rrec = ET.SubElement(rrecs, 'ResourceRecord')
if 'priority' in extra:
data = '%s %s' % (extra['priority'], data)
ET.SubElement(rrec, 'Value').text = data
uri = API_ROOT + 'hostedzone/' + zone.id + '/rrset'
data = ET.tostring(changeset)
self.connection.set_context({'zone_id': zone.id})
response = self.connection.request(uri, method='POST', data=data)
return response.status == httplib.OK
def _to_zones(self, data):
zones = []
for element in data.findall(fixxpath(xpath='HostedZones/HostedZone',
namespace=NAMESPACE)):
zones.append(self._to_zone(element))
return zones
def _to_zone(self, elem):
name = findtext(element=elem, xpath='Name', namespace=NAMESPACE)
id = findtext(element=elem, xpath='Id',
namespace=NAMESPACE).replace('/hostedzone/', '')
comment = findtext(element=elem, xpath='Config/Comment',
namespace=NAMESPACE)
resource_record_count = int(findtext(element=elem,
xpath='ResourceRecordSetCount',
namespace=NAMESPACE))
extra = {'Comment': comment, 'ResourceRecordSetCount':
resource_record_count}
zone = Zone(id=id, domain=name, type='master', ttl=0, driver=self,
extra=extra)
return zone
def _to_records(self, data, zone):
records = []
elems = data.findall(
fixxpath(xpath='ResourceRecordSets/ResourceRecordSet',
namespace=NAMESPACE))
for elem in elems:
record_set = elem.findall(fixxpath(
xpath='ResourceRecords/ResourceRecord',
namespace=NAMESPACE))
record_count = len(record_set)
multiple_value_record = (record_count > 1)
record_set_records = []
for index, record in enumerate(record_set):
# Need to special handling for records with multiple values for
# update to work correctly
record = self._to_record(elem=elem, zone=zone, index=index)
record.extra['_multi_value'] = multiple_value_record
if multiple_value_record:
record.extra['_other_records'] = []
record_set_records.append(record)
# Store reference to other records so update works correctly
if multiple_value_record:
for index in range(0, len(record_set_records)):
record = record_set_records[index]
for other_index, other_record in \
enumerate(record_set_records):
if index == other_index:
# Skip current record
continue
extra = copy.deepcopy(other_record.extra)
extra.pop('_multi_value')
extra.pop('_other_records')
item = {'name': other_record.name,
'data': other_record.data,
'type': other_record.type,
'extra': extra}
record.extra['_other_records'].append(item)
records.extend(record_set_records)
return records
def _to_record(self, elem, zone, index=0):
name = findtext(element=elem, xpath='Name',
namespace=NAMESPACE)
name = name[:-len(zone.domain) - 1]
type = self._string_to_record_type(findtext(element=elem, xpath='Type',
namespace=NAMESPACE))
ttl = int(findtext(element=elem, xpath='TTL', namespace=NAMESPACE))
value_elem = elem.findall(
fixxpath(xpath='ResourceRecords/ResourceRecord',
namespace=NAMESPACE))[index]
data = findtext(element=(value_elem), xpath='Value',
namespace=NAMESPACE)
extra = {'ttl': ttl}
if type == 'MX':
split = data.split()
priority, data = split
extra['priority'] = int(priority)
elif type == 'SRV':
split = data.split()
priority, weight, port, data = split
extra['priority'] = int(priority)
extra['weight'] = int(weight)
extra['port'] = int(port)
id = ':'.join((self.RECORD_TYPE_MAP[type], name))
record = Record(id=id, name=name, type=type, data=data, zone=zone,
driver=self, extra=extra)
return record
def _get_more(self, rtype, **kwargs):
exhausted = False
last_key = None
while not exhausted:
items, last_key, exhausted = self._get_data(rtype, last_key,
**kwargs)
for item in items:
yield item
def _get_data(self, rtype, last_key, **kwargs):
params = {}
if last_key:
params['name'] = last_key
path = API_ROOT + 'hostedzone'
if rtype == 'zones':
response = self.connection.request(path, params=params)
transform_func = self._to_zones
elif rtype == 'records':
zone = kwargs['zone']
path += '/%s/rrset' % (zone.id)
self.connection.set_context({'zone_id': zone.id})
response = self.connection.request(path, params=params)
transform_func = self._to_records
if response.status == httplib.OK:
is_truncated = findtext(element=response.object,
xpath='IsTruncated',
namespace=NAMESPACE)
exhausted = is_truncated != 'true'
last_key = findtext(element=response.object,
xpath='NextRecordName',
namespace=NAMESPACE)
items = transform_func(data=response.object, **kwargs)
return items, last_key, exhausted
else:
return [], None, True
| apache-2.0 |
huguesv/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/urllib3/util/ssl_.py | 4 | 13257 | from __future__ import absolute_import
import errno
import warnings
import hmac
import socket
from binascii import hexlify, unhexlify
from hashlib import md5, sha1, sha256
from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning
from ..packages import six
SSLContext = None
HAS_SNI = False
IS_PYOPENSSL = False
IS_SECURETRANSPORT = False
# Maps the length of a digest to a possible hash function producing this digest
HASHFUNC_MAP = {
32: md5,
40: sha1,
64: sha256,
}
def _const_compare_digest_backport(a, b):
"""
Compare two digests of equal length in constant time.
The digests must be of type str/bytes.
Returns True if the digests match, and False otherwise.
"""
result = abs(len(a) - len(b))
for l, r in zip(bytearray(a), bytearray(b)):
result |= l ^ r
return result == 0
_const_compare_digest = getattr(hmac, 'compare_digest',
_const_compare_digest_backport)
try: # Test for SSL features
import ssl
from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
try:
from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
except ImportError:
OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
OP_NO_COMPRESSION = 0x20000
# Python 2.7 doesn't have inet_pton on non-Linux so we fallback on inet_aton in
# those cases. This means that we can only detect IPv4 addresses in this case.
if hasattr(socket, 'inet_pton'):
inet_pton = socket.inet_pton
else:
# Maybe we can use ipaddress if the user has urllib3[secure]?
try:
import ipaddress
def inet_pton(_, host):
if isinstance(host, bytes):
host = host.decode('ascii')
return ipaddress.ip_address(host)
except ImportError: # Platform-specific: Non-Linux
def inet_pton(_, host):
return socket.inet_aton(host)
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - Prefer TLS 1.3 cipher suites
# - prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
# security,
# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
# - disable NULL authentication, MD5 MACs and DSS for security reasons.
DEFAULT_CIPHERS = ':'.join([
'TLS13-AES-256-GCM-SHA384',
'TLS13-CHACHA20-POLY1305-SHA256',
'TLS13-AES-128-GCM-SHA256',
'ECDH+AESGCM',
'ECDH+CHACHA20',
'DH+AESGCM',
'DH+CHACHA20',
'ECDH+AES256',
'DH+AES256',
'ECDH+AES128',
'DH+AES',
'RSA+AESGCM',
'RSA+AES',
'!aNULL',
'!eNULL',
'!MD5',
])
try:
from ssl import SSLContext # Modern SSL?
except ImportError:
import sys
class SSLContext(object): # Platform-specific: Python 2
def __init__(self, protocol_version):
self.protocol = protocol_version
# Use default values from a real SSLContext
self.check_hostname = False
self.verify_mode = ssl.CERT_NONE
self.ca_certs = None
self.options = 0
self.certfile = None
self.keyfile = None
self.ciphers = None
def load_cert_chain(self, certfile, keyfile):
self.certfile = certfile
self.keyfile = keyfile
def load_verify_locations(self, cafile=None, capath=None):
self.ca_certs = cafile
if capath is not None:
raise SSLError("CA directories not supported in older Pythons")
def set_ciphers(self, cipher_suite):
self.ciphers = cipher_suite
def wrap_socket(self, socket, server_hostname=None, server_side=False):
warnings.warn(
'A true SSLContext object is not available. This prevents '
'urllib3 from configuring SSL appropriately and may cause '
'certain SSL connections to fail. You can upgrade to a newer '
'version of Python to solve this. For more information, see '
'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
'#ssl-warnings',
InsecurePlatformWarning
)
kwargs = {
'keyfile': self.keyfile,
'certfile': self.certfile,
'ca_certs': self.ca_certs,
'cert_reqs': self.verify_mode,
'ssl_version': self.protocol,
'server_side': server_side,
}
return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
fingerprint = fingerprint.replace(':', '').lower()
digest_length = len(fingerprint)
hashfunc = HASHFUNC_MAP.get(digest_length)
if not hashfunc:
raise SSLError(
'Fingerprint of invalid length: {0}'.format(fingerprint))
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
cert_digest = hashfunc(cert).digest()
if not _const_compare_digest(cert_digest, fingerprint_bytes):
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
.format(fingerprint, hexlify(cert_digest)))
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbreviation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
def create_urllib3_context(ssl_version=None, cert_reqs=None,
options=None, ciphers=None):
"""All arguments have the same meaning as ``ssl_wrap_socket``.
By default, this function does a lot of the same work that
``ssl.create_default_context`` does on Python 3.4+. It:
- Disables SSLv2, SSLv3, and compression
- Sets a restricted set of server ciphers
If you wish to enable SSLv3, you can do::
from urllib3.util import ssl_
context = ssl_.create_urllib3_context()
context.options &= ~ssl_.OP_NO_SSLv3
You can do the same to enable compression (substituting ``COMPRESSION``
for ``SSLv3`` in the last line above).
:param ssl_version:
The desired protocol version to use. This will default to
PROTOCOL_SSLv23 which will negotiate the highest protocol that both
the server and your installation of OpenSSL support.
:param cert_reqs:
Whether to require the certificate verification. This defaults to
``ssl.CERT_REQUIRED``.
:param options:
Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
:param ciphers:
Which cipher suites to allow the server to select.
:returns:
Constructed SSLContext object with specified options
:rtype: SSLContext
"""
context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
context.set_ciphers(ciphers or DEFAULT_CIPHERS)
# Setting the default here, as we may have no ssl module on import
cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
if options is None:
options = 0
# SSLv2 is easily broken and is considered harmful and dangerous
options |= OP_NO_SSLv2
# SSLv3 has several problems and is now dangerous
options |= OP_NO_SSLv3
# Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (issue #309)
options |= OP_NO_COMPRESSION
context.options |= options
context.verify_mode = cert_reqs
if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
# We do our own verification, including fingerprints and alternative
# hostnames. So disable it here
context.check_hostname = False
return context
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None, ciphers=None, ssl_context=None,
ca_cert_dir=None):
"""
All arguments except for server_hostname, ssl_context, and ca_cert_dir have
the same meaning as they do when using :func:`ssl.wrap_socket`.
:param server_hostname:
When SNI is supported, the expected hostname of the certificate
:param ssl_context:
A pre-made :class:`SSLContext` object. If none is provided, one will
be created using :func:`create_urllib3_context`.
:param ciphers:
A string of ciphers we wish the client to support.
:param ca_cert_dir:
A directory containing CA certificates in multiple separate files, as
supported by OpenSSL's -CApath flag or the capath argument to
SSLContext.load_verify_locations().
"""
context = ssl_context
if context is None:
# Note: This branch of code and all the variables in it are no longer
# used by urllib3 itself. We should consider deprecating and removing
# this code.
context = create_urllib3_context(ssl_version, cert_reqs,
ciphers=ciphers)
if ca_certs or ca_cert_dir:
try:
context.load_verify_locations(ca_certs, ca_cert_dir)
except IOError as e: # Platform-specific: Python 2.7
raise SSLError(e)
# Py33 raises FileNotFoundError which subclasses OSError
# These are not equivalent unless we check the errno attribute
except OSError as e: # Platform-specific: Python 3.3 and beyond
if e.errno == errno.ENOENT:
raise SSLError(e)
raise
# Don't load system certs unless there were no CA certs or
# SSLContext object specified manually.
elif ssl_context is None and hasattr(context, 'load_default_certs'):
# try to load OS default certs; works well on Windows (require Python3.4+)
context.load_default_certs()
if certfile:
context.load_cert_chain(certfile, keyfile)
# If we detect server_hostname is an IP address then the SNI
# extension should not be used according to RFC3546 Section 3.1
# We shouldn't warn the user if SNI isn't available but we would
# not be using SNI anyways due to IP address for server_hostname.
if ((server_hostname is not None and not is_ipaddress(server_hostname))
or IS_SECURETRANSPORT):
if HAS_SNI and server_hostname is not None:
return context.wrap_socket(sock, server_hostname=server_hostname)
warnings.warn(
'An HTTPS request has been made, but the SNI (Server Name '
'Indication) extension to TLS is not available on this platform. '
'This may cause the server to present an incorrect TLS '
'certificate, which can cause validation failures. You can upgrade to '
'a newer version of Python to solve this. For more information, see '
'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
'#ssl-warnings',
SNIMissingWarning
)
return context.wrap_socket(sock)
def is_ipaddress(hostname):
"""Detects whether the hostname given is an IP address.
:param str hostname: Hostname to examine.
:return: True if the hostname is an IP address, False otherwise.
"""
if six.PY3 and isinstance(hostname, bytes):
# IDN A-label bytes are ASCII compatible.
hostname = hostname.decode('ascii')
families = [socket.AF_INET]
if hasattr(socket, 'AF_INET6'):
families.append(socket.AF_INET6)
for af in families:
try:
inet_pton(af, hostname)
except (socket.error, ValueError, OSError):
pass
else:
return True
return False
| apache-2.0 |
broferek/ansible | lib/ansible/modules/network/fortios/fortios_ips_settings.py | 14 | 9349 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_ips_settings
short_description: Configure IPS VDOM parameter in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify ips feature and settings category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
ips_settings:
description:
- Configure IPS VDOM parameter.
default: null
type: dict
suboptions:
ips_packet_quota:
description:
- Maximum amount of disk space in MB for logged packets when logging to disk. Range depends on disk size.
type: int
packet_log_history:
description:
- Number of packets to capture before and including the one in which the IPS signature is detected (1 - 255).
type: int
packet_log_memory:
description:
- Maximum memory can be used by packet log (64 - 8192 kB).
type: int
packet_log_post_attack:
description:
- Number of packets to log after the IPS signature is detected (0 - 255).
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure IPS VDOM parameter.
fortios_ips_settings:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
ips_settings:
ips_packet_quota: "3"
packet_log_history: "4"
packet_log_memory: "5"
packet_log_post_attack: "6"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_ips_settings_data(json):
option_list = ['ips_packet_quota', 'packet_log_history', 'packet_log_memory',
'packet_log_post_attack']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def ips_settings(data, fos):
vdom = data['vdom']
ips_settings_data = data['ips_settings']
filtered_data = underscore_to_hyphen(filter_ips_settings_data(ips_settings_data))
return fos.set('ips',
'settings',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_ips(data, fos):
if data['ips_settings']:
resp = ips_settings(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"ips_settings": {
"required": False, "type": "dict", "default": None,
"options": {
"ips_packet_quota": {"required": False, "type": "int"},
"packet_log_history": {"required": False, "type": "int"},
"packet_log_memory": {"required": False, "type": "int"},
"packet_log_post_attack": {"required": False, "type": "int"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_ips(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_ips(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
chen0031/nupic | nupic/regions/PictureSensorExplorers/rotate.py | 17 | 3479 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file defines RotatePictureExplorer, an explorer for
PictureSensor.
"""
from nupic.regions.PictureSensor import PictureSensor
class RotatePictureExplorer(PictureSensor.PictureExplorer):
@classmethod
def queryRelevantParams(klass):
"""
Returns a sequence of parameter names that are relevant to
the operation of the explorer.
May be extended or overridden by sub-classes as appropriate.
"""
return ( 'numRepetitions',
'minAngularPosn', 'maxAngularPosn',
'minAngularVelocity', 'maxAngularVelocity',
)
def notifyParamUpdate(self, params):
"""
A callback that will be invoked if/when any of the explorer's
relevant parameters have their values changed.
@param params: a dict containing the new values of all parameters
that are relevant to the explorer's operation
(as specified by a call to queryRelevantParams()).
"""
# Parameter checks
if params['minAngularVelocity'] != params['maxAngularVelocity']:
raise NotImplementedError("'rotate' explorer currently supports " \
"only a fixed angular velocity; i.e., 'minAngularVelocity' (%d) " \
"must be identical to 'maxAngularVelocity' (%d)" \
% (params['minAngularVelocity'], params['maxAngularVelocity']))
super(RotatePictureExplorer, self).notifyParamUpdate(params)
def initSequence(self, state, params):
self._presentNextRotation(state, params)
def updateSequence(self, state, params):
self._presentNextRotation(state, params)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Internal helper method(s)
def _presentNextRotation(self, state, params):
"""
Compute the appropriate category and rotational angle
deterministically based on the current iteration count.
"""
# These don't change
state['posnX'] = 0
state['posnY'] = 0
state['velocityX'] = 0
state['velocityY'] = 0
state['angularVelocity'] = params['minAngularVelocity']
# These do change
sequenceLength = 1 + int((params['maxAngularPosn'] - params['minAngularPosn'])
/ params['minAngularVelocity'])
state['catIndex'] = self._getIterCount() / (sequenceLength * params['numRepetitions'])
seqIndex = self._getIterCount() % (sequenceLength * params['numRepetitions'])
state['angularPosn'] = params['maxAngularPosn'] \
- state['angularVelocity'] * seqIndex
| agpl-3.0 |
axinging/chromium-crosswalk | tools/perf/metrics/speedindex_unittest.py | 21 | 4002 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# These tests access private methods in the speedindex module.
# pylint: disable=protected-access
import unittest
from telemetry.util import color_histogram
from telemetry.util import rgba_color
from metrics import speedindex
class FakeImageUtil(object):
# pylint: disable=unused-argument
def GetColorHistogram(self, image, ignore_color=None, tolerance=None):
return image.ColorHistogram()
class FakeVideo(object):
def __init__(self, frames):
self._frames = frames
def GetVideoFrameIter(self):
for frame in self._frames:
yield frame
class FakeBitmap(object):
def __init__(self, r, g, b):
self._histogram = color_histogram.ColorHistogram(r, g, b, rgba_color.WHITE)
# pylint: disable=unused-argument
def ColorHistogram(self, ignore_color=None, tolerance=None):
return self._histogram
class FakeTab(object):
def __init__(self, video_capture_result=None):
self._javascript_result = None
self._video_capture_result = FakeVideo(video_capture_result)
@property
def video_capture_supported(self):
return self._video_capture_result is not None
def SetEvaluateJavaScriptResult(self, result):
self._javascript_result = result
def EvaluateJavaScript(self, _):
return self._javascript_result
def StartVideoCapture(self, min_bitrate_mbps=1):
assert self.video_capture_supported
assert min_bitrate_mbps > 0
def StopVideoCapture(self):
assert self.video_capture_supported
return self._video_capture_result
def Highlight(self, _):
pass
class SpeedIndexImplTest(unittest.TestCase):
def testVideoCompleteness(self):
frames = [
(0.0, FakeBitmap([0, 0, 0, 10], [0, 0, 0, 10], [0, 0, 0, 10])),
(0.1, FakeBitmap([10, 0, 0, 0], [10, 0, 0, 0], [10, 0, 0, 0])),
(0.2, FakeBitmap([0, 0, 2, 8], [0, 0, 4, 6], [0, 0, 1, 9])),
(0.3, FakeBitmap([0, 3, 2, 5], [2, 1, 0, 7], [0, 3, 0, 7])),
(0.4, FakeBitmap([0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 1, 0])),
(0.5, FakeBitmap([0, 4, 6, 0], [0, 4, 6, 0], [0, 4, 6, 0])),
]
max_distance = 42.
tab = FakeTab(frames)
impl = speedindex.VideoSpeedIndexImpl(FakeImageUtil())
impl.Start(tab)
impl.Stop(tab)
time_completeness = impl.GetTimeCompletenessList(tab)
self.assertEqual(len(time_completeness), 6)
self.assertEqual(time_completeness[0], (0.0, 0))
self.assertTimeCompleteness(
time_completeness[1], 0.1, 1 - (16 + 16 + 16) / max_distance)
self.assertTimeCompleteness(
time_completeness[2], 0.2, 1 - (12 + 10 + 13) / max_distance)
self.assertTimeCompleteness(
time_completeness[3], 0.3, 1 - (6 + 10 + 8) / max_distance)
self.assertTimeCompleteness(
time_completeness[4], 0.4, 1 - (4 + 4 + 4) / max_distance)
self.assertEqual(time_completeness[5], (0.5, 1))
def testBlankPage(self):
frames = [
(0.0, FakeBitmap([0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 0, 1])),
(0.1, FakeBitmap([0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 0, 1])),
(0.2, FakeBitmap([1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 0, 1])),
(0.3, FakeBitmap([0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 0, 1])),
]
tab = FakeTab(frames)
impl = speedindex.VideoSpeedIndexImpl(FakeImageUtil())
impl.Start(tab)
impl.Stop(tab)
time_completeness = impl.GetTimeCompletenessList(tab)
self.assertEqual(len(time_completeness), 4)
self.assertEqual(time_completeness[0], (0.0, 1.0))
self.assertEqual(time_completeness[1], (0.1, 1.0))
self.assertEqual(time_completeness[2], (0.2, 0.0))
self.assertEqual(time_completeness[3], (0.3, 1.0))
def assertTimeCompleteness(self, time_completeness, time, completeness):
self.assertEqual(time_completeness[0], time)
self.assertAlmostEqual(time_completeness[1], completeness)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
partofthething/home-assistant | script/hassfest/coverage.py | 9 | 3811 | """Validate coverage files."""
from pathlib import Path
from typing import Dict
from .model import Config, Integration
DONT_IGNORE = (
"config_flow.py",
"device_action.py",
"device_condition.py",
"device_trigger.py",
"group.py",
"intent.py",
"logbook.py",
"media_source.py",
"scene.py",
)
# They were violating when we introduced this check
# Need to be fixed in a future PR.
ALLOWED_IGNORE_VIOLATIONS = {
("ambient_station", "config_flow.py"),
("cast", "config_flow.py"),
("daikin", "config_flow.py"),
("doorbird", "config_flow.py"),
("doorbird", "logbook.py"),
("elkm1", "config_flow.py"),
("elkm1", "scene.py"),
("fibaro", "scene.py"),
("flume", "config_flow.py"),
("hangouts", "config_flow.py"),
("harmony", "config_flow.py"),
("hisense_aehw4a1", "config_flow.py"),
("home_connect", "config_flow.py"),
("huawei_lte", "config_flow.py"),
("ifttt", "config_flow.py"),
("ios", "config_flow.py"),
("iqvia", "config_flow.py"),
("knx", "scene.py"),
("konnected", "config_flow.py"),
("lcn", "scene.py"),
("life360", "config_flow.py"),
("lifx", "config_flow.py"),
("lutron", "scene.py"),
("mobile_app", "config_flow.py"),
("nest", "config_flow.py"),
("plaato", "config_flow.py"),
("point", "config_flow.py"),
("rachio", "config_flow.py"),
("sense", "config_flow.py"),
("sms", "config_flow.py"),
("solarlog", "config_flow.py"),
("somfy", "config_flow.py"),
("sonos", "config_flow.py"),
("speedtestdotnet", "config_flow.py"),
("spider", "config_flow.py"),
("starline", "config_flow.py"),
("tado", "config_flow.py"),
("tahoma", "scene.py"),
("totalconnect", "config_flow.py"),
("tradfri", "config_flow.py"),
("tuya", "config_flow.py"),
("tuya", "scene.py"),
("upnp", "config_flow.py"),
("velux", "scene.py"),
("wemo", "config_flow.py"),
("wiffi", "config_flow.py"),
("wink", "scene.py"),
}
def validate(integrations: Dict[str, Integration], config: Config):
"""Validate coverage."""
coverage_path = config.root / ".coveragerc"
not_found = []
checking = False
with coverage_path.open("rt") as fp:
for line in fp:
line = line.strip()
if not line or line.startswith("#"):
continue
if not checking:
if line == "omit =":
checking = True
continue
# Finished
if line == "[report]":
break
path = Path(line)
# Discard wildcard
path_exists = path
while "*" in path_exists.name:
path_exists = path_exists.parent
if not path_exists.exists():
not_found.append(line)
continue
if (
not line.startswith("homeassistant/components/")
or not len(path.parts) == 4
or not path.parts[-1] == "*"
):
continue
integration_path = path.parent
integration = integrations[integration_path.name]
for check in DONT_IGNORE:
if (integration_path.name, check) in ALLOWED_IGNORE_VIOLATIONS:
continue
if (integration_path / check).exists():
integration.add_error(
"coverage",
f"{check} must not be ignored by the .coveragerc file",
)
if not not_found:
return
errors = []
if not_found:
errors.append(
f".coveragerc references files that don't exist: {', '.join(not_found)}."
)
raise RuntimeError(" ".join(errors))
| mit |
NEricN/RobotCSimulator | Python/App/Lib/test/test_mimetypes.py | 44 | 6242 | # -*- coding: utf-8 -*-
import mimetypes
import StringIO
import unittest
import sys
from test import test_support
# Tell it we don't know about external files:
mimetypes.knownfiles = []
mimetypes.inited = False
mimetypes._default_mime_types()
class MimeTypesTestCase(unittest.TestCase):
def setUp(self):
self.db = mimetypes.MimeTypes()
def test_default_data(self):
eq = self.assertEqual
eq(self.db.guess_type("foo.html"), ("text/html", None))
eq(self.db.guess_type("foo.tgz"), ("application/x-tar", "gzip"))
eq(self.db.guess_type("foo.tar.gz"), ("application/x-tar", "gzip"))
eq(self.db.guess_type("foo.tar.Z"), ("application/x-tar", "compress"))
eq(self.db.guess_type("foo.tar.bz2"), ("application/x-tar", "bzip2"))
eq(self.db.guess_type("foo.tar.xz"), ("application/x-tar", "xz"))
def test_data_urls(self):
eq = self.assertEqual
guess_type = self.db.guess_type
eq(guess_type("data:,thisIsTextPlain"), ("text/plain", None))
eq(guess_type("data:;base64,thisIsTextPlain"), ("text/plain", None))
eq(guess_type("data:text/x-foo,thisIsTextXFoo"), ("text/x-foo", None))
def test_file_parsing(self):
eq = self.assertEqual
sio = StringIO.StringIO("x-application/x-unittest pyunit\n")
self.db.readfp(sio)
eq(self.db.guess_type("foo.pyunit"),
("x-application/x-unittest", None))
eq(self.db.guess_extension("x-application/x-unittest"), ".pyunit")
def test_non_standard_types(self):
eq = self.assertEqual
# First try strict
eq(self.db.guess_type('foo.xul', strict=True), (None, None))
eq(self.db.guess_extension('image/jpg', strict=True), None)
# And then non-strict
eq(self.db.guess_type('foo.xul', strict=False), ('text/xul', None))
eq(self.db.guess_extension('image/jpg', strict=False), '.jpg')
def test_guess_all_types(self):
eq = self.assertEqual
unless = self.assertTrue
# First try strict. Use a set here for testing the results because if
# test_urllib2 is run before test_mimetypes, global state is modified
# such that the 'all' set will have more items in it.
all = set(self.db.guess_all_extensions('text/plain', strict=True))
unless(all >= set(['.bat', '.c', '.h', '.ksh', '.pl', '.txt']))
# And now non-strict
all = self.db.guess_all_extensions('image/jpg', strict=False)
all.sort()
eq(all, ['.jpg'])
# And now for no hits
all = self.db.guess_all_extensions('image/jpg', strict=True)
eq(all, [])
@unittest.skipUnless(sys.platform.startswith("win"), "Windows only")
class Win32MimeTypesTestCase(unittest.TestCase):
def setUp(self):
# ensure all entries actually come from the Windows registry
self.original_types_map = mimetypes.types_map.copy()
mimetypes.types_map.clear()
def tearDown(self):
# restore default settings
mimetypes.types_map.clear()
mimetypes.types_map.update(self.original_types_map)
def test_registry_parsing(self):
# the original, minimum contents of the MIME database in the
# Windows registry is undocumented AFAIK.
# Use file types that should *always* exist:
eq = self.assertEqual
mimetypes.init()
db = mimetypes.MimeTypes()
eq(db.guess_type("foo.txt"), ("text/plain", None))
eq(db.guess_type("image.jpg"), ("image/jpeg", None))
eq(db.guess_type("image.png"), ("image/png", None))
def test_non_latin_extension(self):
import _winreg
class MockWinreg(object):
def __getattr__(self, name):
if name == 'EnumKey':
return lambda key, i: _winreg.EnumKey(key, i) + "\xa3"
elif name == 'OpenKey':
return lambda key, name: _winreg.OpenKey(key, name.rstrip("\xa3"))
elif name == 'QueryValueEx':
return lambda subkey, label: (u'текст/простой' , _winreg.REG_SZ)
return getattr(_winreg, name)
mimetypes._winreg = MockWinreg()
try:
# this used to throw an exception if registry contained non-Latin
# characters in extensions (issue #9291)
mimetypes.init()
finally:
mimetypes._winreg = _winreg
def test_non_latin_type(self):
import _winreg
class MockWinreg(object):
def __getattr__(self, name):
if name == 'QueryValueEx':
return lambda subkey, label: (u'текст/простой', _winreg.REG_SZ)
return getattr(_winreg, name)
mimetypes._winreg = MockWinreg()
try:
# this used to throw an exception if registry contained non-Latin
# characters in content types (issue #9291)
mimetypes.init()
finally:
mimetypes._winreg = _winreg
def test_type_map_values(self):
import _winreg
class MockWinreg(object):
def __getattr__(self, name):
if name == 'QueryValueEx':
return lambda subkey, label: (u'text/plain', _winreg.REG_SZ)
return getattr(_winreg, name)
mimetypes._winreg = MockWinreg()
try:
mimetypes.init()
self.assertTrue(isinstance(mimetypes.types_map.values()[0], str))
finally:
mimetypes._winreg = _winreg
def test_registry_read_error(self):
import _winreg
class MockWinreg(object):
def OpenKey(self, key, name):
if key != _winreg.HKEY_CLASSES_ROOT:
raise WindowsError(5, "Access is denied")
return _winreg.OpenKey(key, name)
def __getattr__(self, name):
return getattr(_winreg, name)
mimetypes._winreg = MockWinreg()
try:
mimetypes.init()
finally:
mimetypes._winreg = _winreg
def test_main():
test_support.run_unittest(MimeTypesTestCase,
Win32MimeTypesTestCase
)
if __name__ == "__main__":
test_main()
| apache-2.0 |
idea4bsd/idea4bsd | python/lib/Lib/distutils/filelist.py | 85 | 12810 | """distutils.filelist
Provides the FileList class, used for poking about the filesystem
and building lists of files.
"""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: filelist.py 37828 2004-11-10 22:23:15Z loewis $"
import os, string, re
import fnmatch
from types import *
from glob import glob
from distutils.util import convert_path
from distutils.errors import DistutilsTemplateError, DistutilsInternalError
from distutils import log
class FileList:
"""A list of files built by on exploring the filesystem and filtered by
applying various patterns to what we find there.
Instance attributes:
dir
directory from which files will be taken -- only used if
'allfiles' not supplied to constructor
files
list of filenames currently being built/filtered/manipulated
allfiles
complete list of files under consideration (ie. without any
filtering applied)
"""
def __init__(self,
warn=None,
debug_print=None):
# ignore argument to FileList, but keep them for backwards
# compatibility
self.allfiles = None
self.files = []
def set_allfiles (self, allfiles):
self.allfiles = allfiles
def findall (self, dir=os.curdir):
self.allfiles = findall(dir)
def debug_print (self, msg):
"""Print 'msg' to stdout if the global DEBUG (taken from the
DISTUTILS_DEBUG environment variable) flag is true.
"""
from distutils.debug import DEBUG
if DEBUG:
print msg
# -- List-like methods ---------------------------------------------
def append (self, item):
self.files.append(item)
def extend (self, items):
self.files.extend(items)
def sort (self):
# Not a strict lexical sort!
sortable_files = map(os.path.split, self.files)
sortable_files.sort()
self.files = []
for sort_tuple in sortable_files:
self.files.append(apply(os.path.join, sort_tuple))
# -- Other miscellaneous utility methods ---------------------------
def remove_duplicates (self):
# Assumes list has been sorted!
for i in range(len(self.files) - 1, 0, -1):
if self.files[i] == self.files[i - 1]:
del self.files[i]
# -- "File template" methods ---------------------------------------
def _parse_template_line (self, line):
words = string.split(line)
action = words[0]
patterns = dir = dir_pattern = None
if action in ('include', 'exclude',
'global-include', 'global-exclude'):
if len(words) < 2:
raise DistutilsTemplateError, \
"'%s' expects <pattern1> <pattern2> ..." % action
patterns = map(convert_path, words[1:])
elif action in ('recursive-include', 'recursive-exclude'):
if len(words) < 3:
raise DistutilsTemplateError, \
"'%s' expects <dir> <pattern1> <pattern2> ..." % action
dir = convert_path(words[1])
patterns = map(convert_path, words[2:])
elif action in ('graft', 'prune'):
if len(words) != 2:
raise DistutilsTemplateError, \
"'%s' expects a single <dir_pattern>" % action
dir_pattern = convert_path(words[1])
else:
raise DistutilsTemplateError, "unknown action '%s'" % action
return (action, patterns, dir, dir_pattern)
# _parse_template_line ()
def process_template_line (self, line):
# Parse the line: split it up, make sure the right number of words
# is there, and return the relevant words. 'action' is always
# defined: it's the first word of the line. Which of the other
# three are defined depends on the action; it'll be either
# patterns, (dir and patterns), or (dir_pattern).
(action, patterns, dir, dir_pattern) = self._parse_template_line(line)
# OK, now we know that the action is valid and we have the
# right number of words on the line for that action -- so we
# can proceed with minimal error-checking.
if action == 'include':
self.debug_print("include " + string.join(patterns))
for pattern in patterns:
if not self.include_pattern(pattern, anchor=1):
log.warn("warning: no files found matching '%s'",
pattern)
elif action == 'exclude':
self.debug_print("exclude " + string.join(patterns))
for pattern in patterns:
if not self.exclude_pattern(pattern, anchor=1):
log.warn(("warning: no previously-included files "
"found matching '%s'"), pattern)
elif action == 'global-include':
self.debug_print("global-include " + string.join(patterns))
for pattern in patterns:
if not self.include_pattern(pattern, anchor=0):
log.warn(("warning: no files found matching '%s' " +
"anywhere in distribution"), pattern)
elif action == 'global-exclude':
self.debug_print("global-exclude " + string.join(patterns))
for pattern in patterns:
if not self.exclude_pattern(pattern, anchor=0):
log.warn(("warning: no previously-included files matching "
"'%s' found anywhere in distribution"),
pattern)
elif action == 'recursive-include':
self.debug_print("recursive-include %s %s" %
(dir, string.join(patterns)))
for pattern in patterns:
if not self.include_pattern(pattern, prefix=dir):
log.warn(("warning: no files found matching '%s' " +
"under directory '%s'"),
pattern, dir)
elif action == 'recursive-exclude':
self.debug_print("recursive-exclude %s %s" %
(dir, string.join(patterns)))
for pattern in patterns:
if not self.exclude_pattern(pattern, prefix=dir):
log.warn(("warning: no previously-included files matching "
"'%s' found under directory '%s'"),
pattern, dir)
elif action == 'graft':
self.debug_print("graft " + dir_pattern)
if not self.include_pattern(None, prefix=dir_pattern):
log.warn("warning: no directories found matching '%s'",
dir_pattern)
elif action == 'prune':
self.debug_print("prune " + dir_pattern)
if not self.exclude_pattern(None, prefix=dir_pattern):
log.warn(("no previously-included directories found " +
"matching '%s'"), dir_pattern)
else:
raise DistutilsInternalError, \
"this cannot happen: invalid action '%s'" % action
# process_template_line ()
# -- Filtering/selection methods -----------------------------------
def include_pattern (self, pattern,
anchor=1, prefix=None, is_regex=0):
"""Select strings (presumably filenames) from 'self.files' that
match 'pattern', a Unix-style wildcard (glob) pattern. Patterns
are not quite the same as implemented by the 'fnmatch' module: '*'
and '?' match non-special characters, where "special" is platform-
dependent: slash on Unix; colon, slash, and backslash on
DOS/Windows; and colon on Mac OS.
If 'anchor' is true (the default), then the pattern match is more
stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
'anchor' is false, both of these will match.
If 'prefix' is supplied, then only filenames starting with 'prefix'
(itself a pattern) and ending with 'pattern', with anything in between
them, will match. 'anchor' is ignored in this case.
If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
'pattern' is assumed to be either a string containing a regex or a
regex object -- no translation is done, the regex is just compiled
and used as-is.
Selected strings will be added to self.files.
Return 1 if files are found.
"""
files_found = 0
pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
self.debug_print("include_pattern: applying regex r'%s'" %
pattern_re.pattern)
# delayed loading of allfiles list
if self.allfiles is None:
self.findall()
for name in self.allfiles:
if pattern_re.search(name):
self.debug_print(" adding " + name)
self.files.append(name)
files_found = 1
return files_found
# include_pattern ()
def exclude_pattern (self, pattern,
anchor=1, prefix=None, is_regex=0):
"""Remove strings (presumably filenames) from 'files' that match
'pattern'. Other parameters are the same as for
'include_pattern()', above.
The list 'self.files' is modified in place.
Return 1 if files are found.
"""
files_found = 0
pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
self.debug_print("exclude_pattern: applying regex r'%s'" %
pattern_re.pattern)
for i in range(len(self.files)-1, -1, -1):
if pattern_re.search(self.files[i]):
self.debug_print(" removing " + self.files[i])
del self.files[i]
files_found = 1
return files_found
# exclude_pattern ()
# class FileList
# ----------------------------------------------------------------------
# Utility functions
def findall (dir = os.curdir):
"""Find all files under 'dir' and return the list of full filenames
(relative to 'dir').
"""
from stat import ST_MODE, S_ISREG, S_ISDIR, S_ISLNK
list = []
stack = [dir]
pop = stack.pop
push = stack.append
while stack:
dir = pop()
names = os.listdir(dir)
for name in names:
if dir != os.curdir: # avoid the dreaded "./" syndrome
fullname = os.path.join(dir, name)
else:
fullname = name
# Avoid excess stat calls -- just one will do, thank you!
stat = os.stat(fullname)
mode = stat[ST_MODE]
if S_ISREG(mode):
list.append(fullname)
elif S_ISDIR(mode) and not S_ISLNK(mode):
push(fullname)
return list
def glob_to_re (pattern):
"""Translate a shell-like glob pattern to a regular expression; return
a string containing the regex. Differs from 'fnmatch.translate()' in
that '*' does not match "special characters" (which are
platform-specific).
"""
pattern_re = fnmatch.translate(pattern)
# '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
# IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
# and by extension they shouldn't match such "special characters" under
# any OS. So change all non-escaped dots in the RE to match any
# character except the special characters.
# XXX currently the "special characters" are just slash -- i.e. this is
# Unix-only.
pattern_re = re.sub(r'(^|[^\\])\.', r'\1[^/]', pattern_re)
return pattern_re
# glob_to_re ()
def translate_pattern (pattern, anchor=1, prefix=None, is_regex=0):
"""Translate a shell-like wildcard pattern to a compiled regular
expression. Return the compiled regex. If 'is_regex' true,
then 'pattern' is directly compiled to a regex (if it's a string)
or just returned as-is (assumes it's a regex object).
"""
if is_regex:
if type(pattern) is StringType:
return re.compile(pattern)
else:
return pattern
if pattern:
pattern_re = glob_to_re(pattern)
else:
pattern_re = ''
if prefix is not None:
prefix_re = (glob_to_re(prefix))[0:-1] # ditch trailing $
pattern_re = "^" + os.path.join(prefix_re, ".*" + pattern_re)
else: # no prefix -- respect anchor flag
if anchor:
pattern_re = "^" + pattern_re
return re.compile(pattern_re)
# translate_pattern ()
| apache-2.0 |
mstriemer/zamboni | mkt/stats/views.py | 9 | 13102 | from django import http
import commonware
import requests
from rest_framework.exceptions import ParseError
from rest_framework.generics import ListAPIView
from rest_framework.permissions import AllowAny, BasePermission
from rest_framework.response import Response
from rest_framework.views import APIView
import mkt
from lib.metrics import get_monolith_client
from mkt.api.authentication import (RestOAuthAuthentication,
RestSharedSecretAuthentication)
from mkt.api.authorization import AllowAppOwner, AnyOf, GroupPermission
from mkt.api.base import CORSMixin, SlugOrIdMixin
from mkt.api.exceptions import ServiceUnavailable
from mkt.purchase.models import Contribution
from mkt.webapps.models import Webapp
from .forms import StatsForm
log = commonware.log.getLogger('z.stats')
class PublicStats(BasePermission):
"""
Allow for app's with `public_stats` set to True.
"""
def has_permission(self, request, view):
# Anonymous is allowed if app.public_stats is True.
return True
def has_object_permission(self, request, view, obj):
return obj.public_stats
# Map of URL metric name to monolith metric name.
#
# The 'dimensions' key is optional query string arguments with defaults that is
# passed to the monolith client and used in the facet filters. If the default
# is `None`, the dimension is excluded unless specified via the API.
#
# The 'lines' key is optional and used for multi-line charts. The format is:
# {'<name>': {'<dimension-key>': '<dimension-value>'}}
# where <name> is what's returned in the JSON output and the dimension
# key/value is what's sent to Monolith similar to the 'dimensions' above.
#
# The 'coerce' key is optional and used to coerce data types returned from
# monolith to other types. Provide the name of the key in the data you want to
# coerce with a callback for how you want the data coerced. E.g.:
# {'count': str}
def lines(name, vals):
return dict((val, {name: val}) for val in vals)
STATS = {
'apps_added_by_package': {
'metric': 'apps_added_package_count',
'dimensions': {'region': 'us'},
'lines': lines('package_type', mkt.ADDON_WEBAPP_TYPES.values()),
},
'apps_added_by_premium': {
'metric': 'apps_added_premium_count',
'dimensions': {'region': 'us'},
'lines': lines('premium_type', mkt.ADDON_PREMIUM_API.values()),
},
'apps_available_by_package': {
'metric': 'apps_available_package_count',
'dimensions': {'region': 'us'},
'lines': lines('package_type', mkt.ADDON_WEBAPP_TYPES.values()),
},
'apps_available_by_premium': {
'metric': 'apps_available_premium_count',
'dimensions': {'region': 'us'},
'lines': lines('premium_type', mkt.ADDON_PREMIUM_API.values()),
},
'apps_installed': {
'metric': 'app_installs',
'dimensions': {'region': None},
},
'total_developers': {
'metric': 'total_dev_count',
},
'total_visits': {
'metric': 'visits',
},
'ratings': {
'metric': 'apps_ratings',
},
'abuse_reports': {
'metric': 'apps_abuse_reports',
},
'revenue': {
'metric': 'gross_revenue',
# Counts are floats. Let's convert them to strings with 2 decimals.
'coerce': {'count': lambda d: '{0:.2f}'.format(d)},
},
}
APP_STATS = {
'installs': {
'metric': 'app_installs',
'dimensions': {'region': None},
},
'visits': {
'metric': 'app_visits',
},
'ratings': {
'metric': 'apps_ratings',
},
'average_rating': {
'metric': 'apps_average_rating',
},
'abuse_reports': {
'metric': 'apps_abuse_reports',
},
'revenue': {
'metric': 'gross_revenue',
# Counts are floats. Let's convert them to strings with 2 decimals.
'coerce': {'count': lambda d: '{0:.2f}'.format(d)},
},
}
# The total API will iterate over each key and return statistical totals
# information on them all.
STATS_TOTAL = {
'installs': {
'metric': 'app_installs',
},
'ratings': {
'metric': 'apps_ratings',
},
'abuse_reports': {
'metric': 'apps_abuse_reports',
},
}
APP_STATS_TOTAL = {
'installs': {
'metric': 'app_installs',
},
'ratings': {
'metric': 'apps_ratings',
},
'abuse_reports': {
'metric': 'apps_abuse_reports',
},
}
def _get_monolith_data(stat, start, end, interval, dimensions):
# If stat has a 'lines' attribute, it's a multi-line graph. Do a
# request for each item in 'lines' and compose them in a single
# response.
try:
client = get_monolith_client()
except requests.ConnectionError as e:
log.info('Monolith connection error: {0}'.format(e))
raise ServiceUnavailable
def _coerce(data):
for key, coerce in stat.get('coerce', {}).items():
if data.get(key):
data[key] = coerce(data[key])
return data
try:
data = {}
if 'lines' in stat:
for line_name, line_dimension in stat['lines'].items():
dimensions.update(line_dimension)
data[line_name] = map(_coerce,
client(stat['metric'], start, end,
interval, **dimensions))
else:
data['objects'] = map(_coerce,
client(stat['metric'], start, end, interval,
**dimensions))
except ValueError as e:
# This occurs if monolith doesn't have our metric and we get an
# elasticsearch SearchPhaseExecutionException error.
log.info('Monolith ValueError for metric {0}: {1}'.format(
stat['metric'], e))
raise ParseError('Invalid metric at this time. Try again later.')
return data
class GlobalStats(CORSMixin, APIView):
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
cors_allowed_methods = ['get']
permission_classes = [AllowAny]
def get(self, request, metric):
if metric not in STATS:
raise http.Http404('No metric by that name.')
stat = STATS[metric]
# Perform form validation.
form = StatsForm(request.GET)
if not form.is_valid():
raise ParseError(dict(form.errors.items()))
qs = form.cleaned_data
dimensions = {}
if 'dimensions' in stat:
for key, default in stat['dimensions'].items():
val = request.GET.get(key, default)
if val is not None:
# Avoid passing kwargs to the monolith client when the
# dimension is None to avoid facet filters being applied.
dimensions[key] = request.GET.get(key, default)
return Response(_get_monolith_data(stat, qs.get('start'),
qs.get('end'), qs.get('interval'),
dimensions))
class AppStats(CORSMixin, SlugOrIdMixin, ListAPIView):
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
cors_allowed_methods = ['get']
permission_classes = [AnyOf(PublicStats, AllowAppOwner,
GroupPermission('Stats', 'View'))]
queryset = Webapp.objects.all()
slug_field = 'app_slug'
def get(self, request, pk, metric):
if metric not in APP_STATS:
raise http.Http404('No metric by that name.')
app = self.get_object()
stat = APP_STATS[metric]
# Perform form validation.
form = StatsForm(request.GET)
if not form.is_valid():
raise ParseError(dict(form.errors.items()))
qs = form.cleaned_data
dimensions = {'app-id': app.id}
if 'dimensions' in stat:
for key, default in stat['dimensions'].items():
val = request.GET.get(key, default)
if val is not None:
# Avoid passing kwargs to the monolith client when the
# dimension is None to avoid facet filters being applied.
dimensions[key] = request.GET.get(key, default)
return Response(_get_monolith_data(stat, qs.get('start'),
qs.get('end'), qs.get('interval'),
dimensions))
class StatsTotalBase(object):
"""
A place for a few helper methods for totals stats API.
"""
def get_client(self):
try:
client = get_monolith_client()
except requests.ConnectionError as e:
log.info('Monolith connection error: {0}'.format(e))
raise ServiceUnavailable
return client
def get_query(self, metric, field, app_id=None):
query = {
'query': {
'match_all': {}
},
'facets': {
metric: {
'statistical': {
'field': field
}
}
},
'size': 0
}
# If this is per-app, add the facet_filter.
if app_id:
query['facets'][metric]['facet_filter'] = {
'term': {
'app-id': app_id
}
}
return query
def process_response(self, resp, data):
for metric, facet in resp.get('facets', {}).items():
count = facet.get('count', 0)
# We filter out facets with count=0 to avoid returning things
# like `'max': u'-Infinity'`.
if count > 0:
for field in ('max', 'mean', 'min', 'std_deviation',
'sum_of_squares', 'total', 'variance'):
value = facet.get(field)
if value is not None:
data[metric][field] = value
class GlobalStatsTotal(CORSMixin, APIView, StatsTotalBase):
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
cors_allowed_methods = ['get']
permission_classes = [AllowAny]
slug_field = 'app_slug'
def get(self, request):
client = self.get_client()
# Note: We have to do this as separate requests so that if one fails
# the rest can still be returned.
data = {}
for metric, stat in STATS_TOTAL.items():
data[metric] = {}
query = self.get_query(metric, stat['metric'])
try:
resp = client.raw(query)
except ValueError as e:
log.info('Received value error from monolith client: %s' % e)
continue
self.process_response(resp, data)
return Response(data)
class AppStatsTotal(CORSMixin, SlugOrIdMixin, ListAPIView, StatsTotalBase):
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
cors_allowed_methods = ['get']
permission_classes = [AnyOf(PublicStats, AllowAppOwner,
GroupPermission('Stats', 'View'))]
queryset = Webapp.objects.all()
slug_field = 'app_slug'
def get(self, request, pk):
app = self.get_object()
client = self.get_client()
# Note: We have to do this as separate requests so that if one fails
# the rest can still be returned.
data = {}
for metric, stat in APP_STATS_TOTAL.items():
data[metric] = {}
query = self.get_query(metric, stat['metric'], app.id)
try:
resp = client.raw(query)
except ValueError as e:
log.info('Received value error from monolith client: %s' % e)
continue
self.process_response(resp, data)
return Response(data)
class TransactionAPI(CORSMixin, APIView):
"""
API to query by transaction ID.
Note: This is intended for Monolith to be able to associate a Solitude
transaction with an app and price tier amount in USD.
"""
authentication_classes = (RestOAuthAuthentication,
RestSharedSecretAuthentication)
cors_allowed_methods = ['get']
permission_classes = [GroupPermission('RevenueStats', 'View')]
def get(self, request, transaction_id):
try:
contrib = (Contribution.objects.select_related('price_tier').
get(transaction_id=transaction_id))
except Contribution.DoesNotExist:
raise http.Http404('No transaction by that ID.')
data = {
'id': transaction_id,
'app_id': contrib.addon_id,
'amount_USD': contrib.price_tier.price,
'type': mkt.CONTRIB_TYPES[contrib.type],
}
return Response(data)
| bsd-3-clause |
joopert/home-assistant | tests/components/group/test_reproduce_state.py | 3 | 1559 | """The tests for reproduction of state."""
from asyncio import Future
from unittest.mock import patch
from homeassistant.components.group.reproduce_state import async_reproduce_states
from homeassistant.core import Context, State
async def test_reproduce_group(hass):
"""Test reproduce_state with group."""
context = Context()
def clone_state(state, entity_id):
"""Return a cloned state with different entity_id."""
return State(
entity_id,
state.state,
state.attributes,
last_changed=state.last_changed,
last_updated=state.last_updated,
context=state.context,
)
with patch("homeassistant.helpers.state.async_reproduce_state") as fun:
fun.return_value = Future()
fun.return_value.set_result(None)
hass.states.async_set(
"group.test",
"off",
{"entity_id": ["light.test1", "light.test2", "switch.test1"]},
)
hass.states.async_set("light.test1", "off")
hass.states.async_set("light.test2", "off")
hass.states.async_set("switch.test1", "off")
state = State("group.test", "on")
await async_reproduce_states(hass, [state], context)
fun.assert_called_once_with(
hass,
[
clone_state(state, "light.test1"),
clone_state(state, "light.test2"),
clone_state(state, "switch.test1"),
],
blocking=True,
context=context,
)
| apache-2.0 |
ralphiee22/kolibri | kolibri/auth/filters.py | 7 | 10962 | from django.db import models
from django.db.models.query import F
from six import string_types
from .constants import collection_kinds
from .errors import InvalidHierarchyRelationsArgument
class HierarchyRelationsFilter(object):
"""
Helper class for efficiently making queries based on relations between models in the Collection hierarchy via Roles/Memberships.
To use, instantiate an instance of `HierarchyRelationsFilter`, passing in a queryset. Then, to perform hierarchy-based queries
on the queryset, call the `filter_by_hierarchy` method on the `HierarchyRelationsFilter` instance, passing arguments fixing values
for models in the hierarchy structure, or linking them to fields on the base model being filtered (via F expressions).
"""
_role_extra = {
"tables": [
'"{facilityuser_table}" AS "source_user"',
'"{role_table}" AS "role"',
],
"where": [
"role.user_id = source_user.id",
"role.collection_id = ancestor_collection.id",
]
}
_collection_extra = {
"tables": [
'"{collection_table}" AS "ancestor_collection"',
'"{collection_table}" AS "descendant_collection"',
],
"where": [
"descendant_collection.lft BETWEEN ancestor_collection.lft AND ancestor_collection.rght",
"descendant_collection.tree_id = ancestor_collection.tree_id",
]
}
_facilityuser_table = [
'"{facilityuser_table}" AS "target_user"',
]
_membership_table = [
'"{membership_table}" AS "membership"',
]
def __init__(self, queryset):
# convert the provided argument from a Model class into a QuerySet as needed
if isinstance(queryset, type) and issubclass(queryset, models.Model):
queryset = queryset.objects.all()
self.queryset = queryset
self.tables = []
self.left_join_tables = []
self.where = []
# import auth models here to avoid circular imports
from .models import Role, Collection, Membership, FacilityUser
# retrieve the table names that will be used as context for building queries
self._table_names = {
"role_table": Role._meta.db_table,
"collection_table": Collection._meta.db_table,
"membership_table": Membership._meta.db_table,
"facilityuser_table": FacilityUser._meta.db_table,
}
def _add_extras(self, where, tables=None, left_join_tables=None):
self.where += where
if tables:
self.tables += [table.format(**self._table_names) for table in tables]
if left_join_tables:
self.left_join_tables += [table.format(**self._table_names) for table in left_join_tables]
def _resolve_f_expression(self, f_expr):
# try resolving the F expression; if it doesn't refer to a valid field or related field it will throw a FieldError
expression = f_expr.resolve_expression(self.queryset.query)
# extract the components of the F expression and do a sanity check
lookups, parts, _ = self.queryset.query.solve_lookup_type(f_expr.name)
assert len(lookups) == 1 and lookups[0] == "exact" # F expression should not have qualifiers like __gt, __contains, etc
# replace the last part of the reference with the target field name (e.g. this will replace `my_fkname` with `my_fkname_id`)
parts[-1] = expression.target.get_attname()
# join together the table name and field names to get a SQL-style reference to the target field
return ".".join([self.queryset.model._meta.db_table] + parts)
def _as_sql_reference(self, ref):
if hasattr(ref, "id"): # ref is a model instance; return its ID
return ref.id
elif isinstance(ref, string_types) or isinstance(ref, int): # ref is a string or integer; assume it's an ID
return ref
elif isinstance(ref, F): # ref is an F expression; resolve it to a SQL reference
return self._resolve_f_expression(ref)
else:
raise InvalidHierarchyRelationsArgument("Not a valid reference: %r" % ref)
def _join_with_logical_operator(self, lst, operator):
op = ") {operator} (".format(operator=operator)
return "(({items}))".format(items=op.join(lst))
def _is_non_facility_user(self, user):
from .models import KolibriAbstractBaseUser, FacilityUser
return isinstance(user, KolibriAbstractBaseUser) and not isinstance(user, FacilityUser)
def filter_by_hierarchy(self,
source_user=None,
role_kind=None,
ancestor_collection=None,
descendant_collection=None,
target_user=None):
"""
Filters a queryset through a multi-table join through the Collection hierarchy and Roles/Collections.
To anchor the hierarchy model relations back into the main queryset itself, use F expressions. For example, if
you are filtering on a FacilityUser queryset, and want to return all users that have an admin role for
collection `mycoll`, you would use something like:
`FacilityUser.objects.filter_by_hierarchy(source_user=F("id"), role_kind=ADMIN, descendant_collection=mycoll)`
(Here, `source_user=F("id")` means that the id of the source user is the same as the id of the model being filtered,
i.e. we're "filtering over source users" in the hierarchy structure.)
:param source_user: a specific value, or F expression, to constrain the source FacilityUser in the hierarchy structure
:param role_kind: a specific value, or F expression, to constrain the Role kind in the hierarchy structure
:param ancestor_collection: a specific value, or F expression, to constrain the ancestor Collection in the hierarchy structure
:param descendant_collection: a specific value, or F expression, to constrain the descendant Collection in the hierarchy structure
:param target_user: a specific value, or F expression, to constrain the target FacilityUser in the hierarchy structure
:return: a filtered queryset with all the hierarchy structure conditions applied, as well as conditions based on provided arguments
:rtype: QuerySet
"""
# if either the source or target user is not a facility user, return an empty queryset
if self._is_non_facility_user(source_user) or self._is_non_facility_user(target_user):
return self.queryset.none()
################################################################################################################
# 1. Determine which components of the hierarchy tree are relevant to the current query, and add in the
# corresponding tables and base conditions to establish the relationships between them.
################################################################################################################
# 1(a). If needed, add in the SQL to establish the relationships between the target user (member) and the collections.
if target_user: # there are two ways for the target user to be a member of the ancestor collection:
# the first way is via the collection hierarchy; having a Membership for the descendant collection
membership_via_hierarchy_where = self._join_with_logical_operator([
"membership.user_id = target_user.id",
"membership.collection_id = descendant_collection.id",
], "AND")
# the second, if the ancestor collection is the facility, is by virtue of being associated with that facility
member_via_facility_where = self._join_with_logical_operator([
"ancestor_collection.kind = '{facility_kind}'".format(facility_kind=collection_kinds.FACILITY),
"ancestor_collection.dataset_id = target_user.dataset_id",
], "AND")
where_clause = self._join_with_logical_operator([member_via_facility_where, membership_via_hierarchy_where], "OR")
self._add_extras(tables=self._facilityuser_table, left_join_tables=self._membership_table, where=[where_clause])
# 1(b). Add the tables and conditions relating the ancestor and descendant collections to one another:
self._add_extras(**self._collection_extra)
# 1(c). If needed, add the tables for source FacilityUser and Role, and conditions linking them together:
if source_user or role_kind:
self._add_extras(**self._role_extra)
################################################################################################################
# 2. Add in the additional conditions that apply constraints on the tables in the hierarchy, fixing their
# fields to particular values or tying them into a field on the base table that is being queried.
################################################################################################################
if source_user:
where_clause = ['source_user.id = {id}'.format(id=self._as_sql_reference(source_user))]
self._add_extras(where=where_clause)
# if role_kind is a single string, put it into a list
if isinstance(role_kind, string_types):
role_kind = [role_kind]
if role_kind:
# convert the list of kinds into a list of strings for use in SQL
kinds_string = "('{kind_list}')".format(kind_list="','".join(role_kind))
where_clause = ['role.kind IN {kinds}'.format(kinds=kinds_string)]
self._add_extras(where=where_clause)
if ancestor_collection:
where_clause = ['ancestor_collection.id = {id}'.format(id=self._as_sql_reference(ancestor_collection))]
self._add_extras(where=where_clause)
if descendant_collection:
where_clause = ['descendant_collection.id = {id}'.format(id=self._as_sql_reference(descendant_collection))]
self._add_extras(where=where_clause)
if target_user:
where_clause = ['target_user.id = {id}'.format(id=self._as_sql_reference(target_user))]
self._add_extras(where=where_clause)
# build the left join clause if we have any left join tables; the "ON 1=1" is needed to avoid syntax errors on Postgres
left_join_sql = "LEFT JOIN {tables} ON 1=1".format(tables=", ".join(self.left_join_tables)) if self.left_join_tables else ""
joined_condition = "EXISTS (SELECT * FROM {tables} {left_join_tables} WHERE {where})".format(
tables=", ".join(self.tables),
left_join_tables=left_join_sql,
where=self._join_with_logical_operator(self.where, "AND"))
return self.queryset.extra(where=[joined_condition])
| mit |
kgblll/libresoft-gymkhana | apps/explohyperfiction/models.py | 1 | 2903 | from django.db import models
from datetime import datetime
from django.contrib.gis.db import models
from social.core.models import *
from social.core.models import Social_node
# Create your models here.
class Group(models.Model):
manager=models.ManyToManyField("Player",null=True)
name=models.TextField(null=False)
private=models.BooleanField(null=False)
description=models.TextField(null=False)
class Player(models.Model):
person = models.ForeignKey(Person,null=False)
is_superuser = models.BooleanField(null=False)
is_manager = models.BooleanField(null=False)
is_player = models.BooleanField(null=False)
active_superuser = models.BooleanField(null=False)
active_manager = models.BooleanField(null=False)
active_player = models.BooleanField(null=False)
groups=models.ManyToManyField(Group,null=True)
class Petition(models.Model):
player=models.ForeignKey(Player, null=False)
for_super=models.BooleanField(null=False)
for_manager=models.BooleanField(null=False)
date=models.DateTimeField(null=False)
class PetitionGroup(models.Model):
player=models.ForeignKey(Player,null=False)
group=models.ForeignKey(Group, null=False)
date=models.DateTimeField(null=False)
class SystemMessage(models.Model):
to=models.ManyToManyField(Player,null=True)
message=models.TextField(null=False)
date=models.DateTimeField(null=False)
class Event(models.Model):
group=models.ManyToManyField(Group, null=True)
name=models.TextField(null=False)
description=models.TextField(null=False)
active=models.BooleanField(null=False)
manager=models.ForeignKey(Player,null=False)
qr=models.BooleanField(null=False)
date=models.DateTimeField(null=False)
attemps=models.IntegerField(null=False)
class Question(models.Model):
event=models.ForeignKey(Event, null=False)
text=models.TextField(null=False)
level=models.IntegerField(null=False)
qr=models.BooleanField(null=False)
class Answer(models.Model):
question=models.ForeignKey(Question, null=False)
text=models.TextField(null=False)
next=models.IntegerField(null=True)
message=models.TextField(null=False)
class Challenge(models.Model):
event=models.ForeignKey(Event,null=False)
user=models.ForeignKey(Player,null=False)
date=models.DateTimeField(default=datetime.now(),null=False)
date_finish=models.DateTimeField(null=True)
finish=models.BooleanField(null=False)
phone=models.BooleanField(null=False)
cancel=models.BooleanField(null=False)
class Responses(models.Model):
challenge=models.ForeignKey(Challenge,null=False)
question=models.ForeignKey(Question,null=False)
date=models.DateTimeField(default=datetime.now())
answer=models.ForeignKey(Answer,null=False)
latitude=models.FloatField(null=True)
longitude=models.FloatField(null=True)
| gpl-2.0 |
amenonsen/ansible | lib/ansible/modules/network/f5/bigip_apm_policy_import.py | 23 | 11958 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_apm_policy_import
short_description: Manage BIG-IP APM policy or APM access profile imports
description:
- Manage BIG-IP APM policy or APM access profile imports.
version_added: 2.8
options:
name:
description:
- The name of the APM policy or APM access profile to create or override.
type: str
required: True
type:
description:
- Specifies the type of item to export from device.
type: str
choices:
- profile_access
- access_policy
default: profile_access
source:
description:
- Full path to a file to be imported into the BIG-IP APM.
type: path
force:
description:
- When set to C(yes) any existing policy with the same name will be overwritten by the new import.
- If policy does not exist this setting is ignored.
default: no
type: bool
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
notes:
- Due to ID685681 it is not possible to execute ng_* tools via REST api on v12.x and 13.x, once this is fixed
this restriction will be removed.
- Requires BIG-IP >= 14.0.0
extends_documentation_fragment: f5
author:
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Import APM profile
bigip_apm_policy_import:
name: new_apm_profile
source: /root/apm_profile.tar.gz
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Import APM policy
bigip_apm_policy_import:
name: new_apm_policy
source: /root/apm_policy.tar.gz
type: access_policy
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Override existing APM policy
bigip_asm_policy:
name: new_apm_policy
source: /root/apm_policy.tar.gz
force: yes
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
'''
RETURN = r'''
source:
description: Local path to APM policy file.
returned: changed
type: str
sample: /root/some_policy.tar.gz
name:
description: Name of the APM policy or APM access profile to be created/overwritten.
returned: changed
type: str
sample: APM_policy_global
type:
description: Set to specify type of item to export.
returned: changed
type: str
sample: access_policy
force:
description: Set when overwriting an existing policy or profile.
returned: changed
type: bool
sample: yes
'''
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from distutils.version import LooseVersion
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.icontrol import upload_file
from library.module_utils.network.f5.icontrol import tmos_version
from library.module_utils.network.f5.icontrol import module_provisioned
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.icontrol import upload_file
from ansible.module_utils.network.f5.icontrol import tmos_version
from ansible.module_utils.network.f5.icontrol import module_provisioned
class Parameters(AnsibleF5Parameters):
api_map = {
}
api_attributes = [
]
returnables = [
'name',
'source',
'type',
]
updatables = [
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
if not module_provisioned(self.client, 'apm'):
raise F5ModuleError(
"APM must be provisioned to use this module."
)
if self.version_less_than_14():
raise F5ModuleError('Due to bug ID685681 it is not possible to use this module on TMOS version below 14.x')
result = dict()
changed = self.policy_import()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def version_less_than_14(self):
version = tmos_version(self.client)
if LooseVersion(version) < LooseVersion('14.0.0'):
return True
return False
def policy_import(self):
self._set_changed_options()
if self.module.check_mode:
return True
if self.exists():
if self.want.force is False:
return False
self.import_file_to_device()
self.remove_temp_file_from_device()
return True
def exists(self):
if self.want.type == 'access_policy':
uri = "https://{0}:{1}/mgmt/tm/apm/policy/access-policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
else:
uri = "https://{0}:{1}/mgmt/tm/apm/profile/access/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def upload_file_to_device(self, content, name):
url = 'https://{0}:{1}/mgmt/shared/file-transfer/uploads'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
try:
upload_file(self.client, url, content, name)
except F5ModuleError:
raise F5ModuleError(
"Failed to upload the file."
)
def import_file_to_device(self):
name = os.path.split(self.want.source)[1]
self.upload_file_to_device(self.want.source, name)
cmd = 'ng_import -s /var/config/rest/downloads/{0} {1} -p {2}'.format(name, self.want.name, self.want.partition)
uri = "https://{0}:{1}/mgmt/tm/util/bash/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs='-c "{0}"'.format(cmd)
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
if 'commandResult' in response:
raise F5ModuleError(response['commandResult'])
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def remove_temp_file_from_device(self):
name = os.path.split(self.want.source)[1]
tpath_name = '/var/config/rest/downloads/{0}'.format(name)
uri = "https://{0}:{1}/mgmt/tm/util/unix-rm/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs=tpath_name
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(
required=True,
),
source=dict(type='path'),
force=dict(
type='bool',
default='no'
),
type=dict(
default='profile_access',
choices=['profile_access', 'access_policy']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
shines77/Google-ProtoBuf | python/google/protobuf/internal/wire_format_test.py | 571 | 10848 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for google.protobuf.internal.wire_format."""
__author__ = 'robinson@google.com (Will Robinson)'
import unittest
from google.protobuf import message
from google.protobuf.internal import wire_format
class WireFormatTest(unittest.TestCase):
def testPackTag(self):
field_number = 0xabc
tag_type = 2
self.assertEqual((field_number << 3) | tag_type,
wire_format.PackTag(field_number, tag_type))
PackTag = wire_format.PackTag
# Number too high.
self.assertRaises(message.EncodeError, PackTag, field_number, 6)
# Number too low.
self.assertRaises(message.EncodeError, PackTag, field_number, -1)
def testUnpackTag(self):
# Test field numbers that will require various varint sizes.
for expected_field_number in (1, 15, 16, 2047, 2048):
for expected_wire_type in range(6): # Highest-numbered wiretype is 5.
field_number, wire_type = wire_format.UnpackTag(
wire_format.PackTag(expected_field_number, expected_wire_type))
self.assertEqual(expected_field_number, field_number)
self.assertEqual(expected_wire_type, wire_type)
self.assertRaises(TypeError, wire_format.UnpackTag, None)
self.assertRaises(TypeError, wire_format.UnpackTag, 'abc')
self.assertRaises(TypeError, wire_format.UnpackTag, 0.0)
self.assertRaises(TypeError, wire_format.UnpackTag, object())
def testZigZagEncode(self):
Z = wire_format.ZigZagEncode
self.assertEqual(0, Z(0))
self.assertEqual(1, Z(-1))
self.assertEqual(2, Z(1))
self.assertEqual(3, Z(-2))
self.assertEqual(4, Z(2))
self.assertEqual(0xfffffffe, Z(0x7fffffff))
self.assertEqual(0xffffffff, Z(-0x80000000))
self.assertEqual(0xfffffffffffffffe, Z(0x7fffffffffffffff))
self.assertEqual(0xffffffffffffffff, Z(-0x8000000000000000))
self.assertRaises(TypeError, Z, None)
self.assertRaises(TypeError, Z, 'abcd')
self.assertRaises(TypeError, Z, 0.0)
self.assertRaises(TypeError, Z, object())
def testZigZagDecode(self):
Z = wire_format.ZigZagDecode
self.assertEqual(0, Z(0))
self.assertEqual(-1, Z(1))
self.assertEqual(1, Z(2))
self.assertEqual(-2, Z(3))
self.assertEqual(2, Z(4))
self.assertEqual(0x7fffffff, Z(0xfffffffe))
self.assertEqual(-0x80000000, Z(0xffffffff))
self.assertEqual(0x7fffffffffffffff, Z(0xfffffffffffffffe))
self.assertEqual(-0x8000000000000000, Z(0xffffffffffffffff))
self.assertRaises(TypeError, Z, None)
self.assertRaises(TypeError, Z, 'abcd')
self.assertRaises(TypeError, Z, 0.0)
self.assertRaises(TypeError, Z, object())
def NumericByteSizeTestHelper(self, byte_size_fn, value, expected_value_size):
# Use field numbers that cause various byte sizes for the tag information.
for field_number, tag_bytes in ((15, 1), (16, 2), (2047, 2), (2048, 3)):
expected_size = expected_value_size + tag_bytes
actual_size = byte_size_fn(field_number, value)
self.assertEqual(expected_size, actual_size,
'byte_size_fn: %s, field_number: %d, value: %r\n'
'Expected: %d, Actual: %d'% (
byte_size_fn, field_number, value, expected_size, actual_size))
def testByteSizeFunctions(self):
# Test all numeric *ByteSize() functions.
NUMERIC_ARGS = [
# Int32ByteSize().
[wire_format.Int32ByteSize, 0, 1],
[wire_format.Int32ByteSize, 127, 1],
[wire_format.Int32ByteSize, 128, 2],
[wire_format.Int32ByteSize, -1, 10],
# Int64ByteSize().
[wire_format.Int64ByteSize, 0, 1],
[wire_format.Int64ByteSize, 127, 1],
[wire_format.Int64ByteSize, 128, 2],
[wire_format.Int64ByteSize, -1, 10],
# UInt32ByteSize().
[wire_format.UInt32ByteSize, 0, 1],
[wire_format.UInt32ByteSize, 127, 1],
[wire_format.UInt32ByteSize, 128, 2],
[wire_format.UInt32ByteSize, wire_format.UINT32_MAX, 5],
# UInt64ByteSize().
[wire_format.UInt64ByteSize, 0, 1],
[wire_format.UInt64ByteSize, 127, 1],
[wire_format.UInt64ByteSize, 128, 2],
[wire_format.UInt64ByteSize, wire_format.UINT64_MAX, 10],
# SInt32ByteSize().
[wire_format.SInt32ByteSize, 0, 1],
[wire_format.SInt32ByteSize, -1, 1],
[wire_format.SInt32ByteSize, 1, 1],
[wire_format.SInt32ByteSize, -63, 1],
[wire_format.SInt32ByteSize, 63, 1],
[wire_format.SInt32ByteSize, -64, 1],
[wire_format.SInt32ByteSize, 64, 2],
# SInt64ByteSize().
[wire_format.SInt64ByteSize, 0, 1],
[wire_format.SInt64ByteSize, -1, 1],
[wire_format.SInt64ByteSize, 1, 1],
[wire_format.SInt64ByteSize, -63, 1],
[wire_format.SInt64ByteSize, 63, 1],
[wire_format.SInt64ByteSize, -64, 1],
[wire_format.SInt64ByteSize, 64, 2],
# Fixed32ByteSize().
[wire_format.Fixed32ByteSize, 0, 4],
[wire_format.Fixed32ByteSize, wire_format.UINT32_MAX, 4],
# Fixed64ByteSize().
[wire_format.Fixed64ByteSize, 0, 8],
[wire_format.Fixed64ByteSize, wire_format.UINT64_MAX, 8],
# SFixed32ByteSize().
[wire_format.SFixed32ByteSize, 0, 4],
[wire_format.SFixed32ByteSize, wire_format.INT32_MIN, 4],
[wire_format.SFixed32ByteSize, wire_format.INT32_MAX, 4],
# SFixed64ByteSize().
[wire_format.SFixed64ByteSize, 0, 8],
[wire_format.SFixed64ByteSize, wire_format.INT64_MIN, 8],
[wire_format.SFixed64ByteSize, wire_format.INT64_MAX, 8],
# FloatByteSize().
[wire_format.FloatByteSize, 0.0, 4],
[wire_format.FloatByteSize, 1000000000.0, 4],
[wire_format.FloatByteSize, -1000000000.0, 4],
# DoubleByteSize().
[wire_format.DoubleByteSize, 0.0, 8],
[wire_format.DoubleByteSize, 1000000000.0, 8],
[wire_format.DoubleByteSize, -1000000000.0, 8],
# BoolByteSize().
[wire_format.BoolByteSize, False, 1],
[wire_format.BoolByteSize, True, 1],
# EnumByteSize().
[wire_format.EnumByteSize, 0, 1],
[wire_format.EnumByteSize, 127, 1],
[wire_format.EnumByteSize, 128, 2],
[wire_format.EnumByteSize, wire_format.UINT32_MAX, 5],
]
for args in NUMERIC_ARGS:
self.NumericByteSizeTestHelper(*args)
# Test strings and bytes.
for byte_size_fn in (wire_format.StringByteSize, wire_format.BytesByteSize):
# 1 byte for tag, 1 byte for length, 3 bytes for contents.
self.assertEqual(5, byte_size_fn(10, 'abc'))
# 2 bytes for tag, 1 byte for length, 3 bytes for contents.
self.assertEqual(6, byte_size_fn(16, 'abc'))
# 2 bytes for tag, 2 bytes for length, 128 bytes for contents.
self.assertEqual(132, byte_size_fn(16, 'a' * 128))
# Test UTF-8 string byte size calculation.
# 1 byte for tag, 1 byte for length, 8 bytes for content.
self.assertEqual(10, wire_format.StringByteSize(
5, unicode('\xd0\xa2\xd0\xb5\xd1\x81\xd1\x82', 'utf-8')))
class MockMessage(object):
def __init__(self, byte_size):
self.byte_size = byte_size
def ByteSize(self):
return self.byte_size
message_byte_size = 10
mock_message = MockMessage(byte_size=message_byte_size)
# Test groups.
# (2 * 1) bytes for begin and end tags, plus message_byte_size.
self.assertEqual(2 + message_byte_size,
wire_format.GroupByteSize(1, mock_message))
# (2 * 2) bytes for begin and end tags, plus message_byte_size.
self.assertEqual(4 + message_byte_size,
wire_format.GroupByteSize(16, mock_message))
# Test messages.
# 1 byte for tag, plus 1 byte for length, plus contents.
self.assertEqual(2 + mock_message.byte_size,
wire_format.MessageByteSize(1, mock_message))
# 2 bytes for tag, plus 1 byte for length, plus contents.
self.assertEqual(3 + mock_message.byte_size,
wire_format.MessageByteSize(16, mock_message))
# 2 bytes for tag, plus 2 bytes for length, plus contents.
mock_message.byte_size = 128
self.assertEqual(4 + mock_message.byte_size,
wire_format.MessageByteSize(16, mock_message))
# Test message set item byte size.
# 4 bytes for tags, plus 1 byte for length, plus 1 byte for type_id,
# plus contents.
mock_message.byte_size = 10
self.assertEqual(mock_message.byte_size + 6,
wire_format.MessageSetItemByteSize(1, mock_message))
# 4 bytes for tags, plus 2 bytes for length, plus 1 byte for type_id,
# plus contents.
mock_message.byte_size = 128
self.assertEqual(mock_message.byte_size + 7,
wire_format.MessageSetItemByteSize(1, mock_message))
# 4 bytes for tags, plus 2 bytes for length, plus 2 byte for type_id,
# plus contents.
self.assertEqual(mock_message.byte_size + 8,
wire_format.MessageSetItemByteSize(128, mock_message))
# Too-long varint.
self.assertRaises(message.EncodeError,
wire_format.UInt64ByteSize, 1, 1 << 128)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
Radium-Devices/Radium_jflte | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
gauravbose/digital-menu | digimenu2/tests/admin_utils/models.py | 107 | 1712 | from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Site(models.Model):
domain = models.CharField(max_length=100)
def __str__(self):
return self.domain
class Article(models.Model):
"""
A simple Article model for testing
"""
site = models.ForeignKey(Site, related_name="admin_articles")
title = models.CharField(max_length=100)
title2 = models.CharField(max_length=100, verbose_name="another name")
created = models.DateTimeField()
def test_from_model(self):
return "nothing"
def test_from_model_with_override(self):
return "nothing"
test_from_model_with_override.short_description = "not What you Expect"
@python_2_unicode_compatible
class Count(models.Model):
num = models.PositiveSmallIntegerField()
parent = models.ForeignKey('self', null=True)
def __str__(self):
return six.text_type(self.num)
class Event(models.Model):
date = models.DateTimeField(auto_now_add=True)
class Location(models.Model):
event = models.OneToOneField(Event, verbose_name='awesome event')
class Guest(models.Model):
event = models.OneToOneField(Event)
name = models.CharField(max_length=255)
class Meta:
verbose_name = "awesome guest"
class EventGuide(models.Model):
event = models.ForeignKey(Event, on_delete=models.DO_NOTHING)
class Vehicle(models.Model):
pass
class VehicleMixin(Vehicle):
vehicle = models.OneToOneField(Vehicle, parent_link=True, related_name='vehicle_%(app_label)s_%(class)s')
class Meta:
abstract = True
class Car(VehicleMixin):
pass
| bsd-3-clause |
EricMuller/mywebmarks-backend | requirements/twisted/Twisted-17.1.0/build/lib.linux-x86_64-3.5/twisted/names/common.py | 55 | 7726 | # -*- test-case-name: twisted.names.test -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Base functionality useful to various parts of Twisted Names.
"""
from __future__ import division, absolute_import
import socket
from zope.interface import implementer
from twisted.names import dns
from twisted.names.error import DNSFormatError, DNSServerError, DNSNameError
from twisted.names.error import DNSNotImplementedError, DNSQueryRefusedError
from twisted.names.error import DNSUnknownError
from twisted.internet import defer, error, interfaces
from twisted.python import failure
# Helpers for indexing the three-tuples that get thrown around by this code a
# lot.
_ANS, _AUTH, _ADD = range(3)
EMPTY_RESULT = (), (), ()
@implementer(interfaces.IResolver)
class ResolverBase:
"""
L{ResolverBase} is a base class for implementations of
L{interfaces.IResolver} which deals with a lot
of the boilerplate of implementing all of the lookup methods.
@cvar _errormap: A C{dict} mapping DNS protocol failure response codes
to exception classes which will be used to represent those failures.
"""
_errormap = {
dns.EFORMAT: DNSFormatError,
dns.ESERVER: DNSServerError,
dns.ENAME: DNSNameError,
dns.ENOTIMP: DNSNotImplementedError,
dns.EREFUSED: DNSQueryRefusedError}
typeToMethod = None
def __init__(self):
self.typeToMethod = {}
for (k, v) in typeToMethod.items():
self.typeToMethod[k] = getattr(self, v)
def exceptionForCode(self, responseCode):
"""
Convert a response code (one of the possible values of
L{dns.Message.rCode} to an exception instance representing it.
@since: 10.0
"""
return self._errormap.get(responseCode, DNSUnknownError)
def query(self, query, timeout=None):
try:
method = self.typeToMethod[query.type]
except KeyError:
return defer.fail(failure.Failure(NotImplementedError(
str(self.__class__) + " " + str(query.type))))
else:
return defer.maybeDeferred(method, query.name.name, timeout)
def _lookup(self, name, cls, type, timeout):
return defer.fail(NotImplementedError("ResolverBase._lookup"))
def lookupAddress(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.A, timeout)
def lookupIPV6Address(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.AAAA, timeout)
def lookupAddress6(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.A6, timeout)
def lookupMailExchange(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.MX, timeout)
def lookupNameservers(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.NS, timeout)
def lookupCanonicalName(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.CNAME, timeout)
def lookupMailBox(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.MB, timeout)
def lookupMailGroup(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.MG, timeout)
def lookupMailRename(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.MR, timeout)
def lookupPointer(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.PTR, timeout)
def lookupAuthority(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.SOA, timeout)
def lookupNull(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.NULL, timeout)
def lookupWellKnownServices(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.WKS, timeout)
def lookupService(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.SRV, timeout)
def lookupHostInfo(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.HINFO, timeout)
def lookupMailboxInfo(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.MINFO, timeout)
def lookupText(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.TXT, timeout)
def lookupSenderPolicy(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.SPF, timeout)
def lookupResponsibility(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.RP, timeout)
def lookupAFSDatabase(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.AFSDB, timeout)
def lookupZone(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.AXFR, timeout)
def lookupNamingAuthorityPointer(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.NAPTR, timeout)
def lookupAllRecords(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.ALL_RECORDS, timeout)
# IResolverSimple
def getHostByName(self, name, timeout=None, effort=10):
# XXX - respect timeout
return self.lookupAllRecords(name, timeout
).addCallback(self._cbRecords, name, effort
)
def _cbRecords(self, records, name, effort):
(ans, auth, add) = records
result = extractRecord(self, dns.Name(name), ans + auth + add, effort)
if not result:
raise error.DNSLookupError(name)
return result
def extractRecord(resolver, name, answers, level=10):
if not level:
return None
if hasattr(socket, 'inet_ntop'):
for r in answers:
if r.name == name and r.type == dns.A6:
return socket.inet_ntop(socket.AF_INET6, r.payload.address)
for r in answers:
if r.name == name and r.type == dns.AAAA:
return socket.inet_ntop(socket.AF_INET6, r.payload.address)
for r in answers:
if r.name == name and r.type == dns.A:
return socket.inet_ntop(socket.AF_INET, r.payload.address)
for r in answers:
if r.name == name and r.type == dns.CNAME:
result = extractRecord(
resolver, r.payload.name, answers, level - 1)
if not result:
return resolver.getHostByName(
str(r.payload.name), effort=level - 1)
return result
# No answers, but maybe there's a hint at who we should be asking about
# this
for r in answers:
if r.type == dns.NS:
from twisted.names import client
r = client.Resolver(servers=[(str(r.payload.name), dns.PORT)])
return r.lookupAddress(str(name)
).addCallback(
lambda records: extractRecord(
r, name,
records[_ANS] + records[_AUTH] + records[_ADD],
level - 1))
typeToMethod = {
dns.A: 'lookupAddress',
dns.AAAA: 'lookupIPV6Address',
dns.A6: 'lookupAddress6',
dns.NS: 'lookupNameservers',
dns.CNAME: 'lookupCanonicalName',
dns.SOA: 'lookupAuthority',
dns.MB: 'lookupMailBox',
dns.MG: 'lookupMailGroup',
dns.MR: 'lookupMailRename',
dns.NULL: 'lookupNull',
dns.WKS: 'lookupWellKnownServices',
dns.PTR: 'lookupPointer',
dns.HINFO: 'lookupHostInfo',
dns.MINFO: 'lookupMailboxInfo',
dns.MX: 'lookupMailExchange',
dns.TXT: 'lookupText',
dns.SPF: 'lookupSenderPolicy',
dns.RP: 'lookupResponsibility',
dns.AFSDB: 'lookupAFSDatabase',
dns.SRV: 'lookupService',
dns.NAPTR: 'lookupNamingAuthorityPointer',
dns.AXFR: 'lookupZone',
dns.ALL_RECORDS: 'lookupAllRecords',
}
| mit |
marmarek/livecd-tools | imgcreate/errors.py | 7 | 2188 | #
# errors.py : exception definitions
#
# Copyright 2007, Red Hat Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
class CreatorError(Exception):
"""An exception base class for all imgcreate errors."""
def __init__(self, msg):
Exception.__init__(self, msg)
# Some error messages may contain unicode strings (especially if your system
# locale is different from 'C', e.g. 'de_DE'). Python's exception class does
# not handle this appropriately (at least until 2.5) because str(Exception)
# returns just self.message without ensuring that all characters can be
# represented using ASCII. So we try to return a str and fall back to repr
# if this does not work.
#
# Please use unicode for your error logging strings so that we can really
# print nice error messages, e.g.:
# log.error(u"Internal error: " % e)
# instead of
# log.error("Internal error: " % e)
# With our custom __str__ and __unicode__ methods both will work but the
# first log call print a more readable error message.
def __str__(self):
try:
return str(self.message)
except UnicodeEncodeError:
return repr(self.message)
def __unicode__(self):
if not self.message:
return unicode("")
return unicode(self.message.decode("utf8"))
class KickstartError(CreatorError):
pass
class MountError(CreatorError):
pass
class SnapshotError(CreatorError):
pass
class SquashfsError(CreatorError):
pass
class ResizeError(CreatorError):
pass
| gpl-2.0 |
itakouna/lymph | lymph/core/monitoring/pusher.py | 4 | 1319 | import logging
import time
import gevent
import msgpack
import zmq.green as zmq
from lymph.core.components import Component
logger = logging.getLogger(__name__)
DEFAULT_MONITOR_ENDPOINT = 'tcp://127.0.0.1:44044'
class MonitorPusher(Component):
def __init__(self, container, aggregator, endpoint=None, interval=2):
super(MonitorPusher, self).__init__()
self.container = container
self.interval = interval
self.endpoint = endpoint or DEFAULT_MONITOR_ENDPOINT
logger.info('connecting to monitor endpoint %s', self.endpoint)
ctx = zmq.Context.instance()
self.socket = ctx.socket(zmq.PUB)
self.socket.connect(self.endpoint)
self.aggregator = aggregator
def on_start(self):
self.loop_greenlet = self.container.spawn(self.loop)
def on_stop(self, **kwargs):
self.loop_greenlet.kill()
def loop(self):
last_stats = time.monotonic()
while True:
gevent.sleep(self.interval)
dt = time.monotonic() - last_stats
series = list(self.aggregator.get_metrics())
stats = {
'time': time.time(),
'series': series,
}
last_stats += dt
self.socket.send_multipart([b'stats', msgpack.dumps(stats)])
| apache-2.0 |
klemensavli/django-localflavor-si | django_localflavor_si/forms.py | 101 | 4678 | """
Slovenian specific form helpers.
"""
from __future__ import absolute_import, unicode_literals
import datetime
import re
from django.contrib.localflavor.si.si_postalcodes import SI_POSTALCODES_CHOICES
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import CharField, Select, ChoiceField
from django.utils.translation import ugettext_lazy as _
class SIEMSOField(CharField):
"""A form for validating Slovenian personal identification number.
Additionally stores gender, nationality and birthday to self.info dictionary.
"""
default_error_messages = {
'invalid': _('This field should contain exactly 13 digits.'),
'date': _('The first 7 digits of the EMSO must represent a valid past date.'),
'checksum': _('The EMSO is not valid.'),
}
emso_regex = re.compile('^(\d{2})(\d{2})(\d{3})(\d{2})(\d{3})(\d)$')
def clean(self, value):
super(SIEMSOField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = value.strip()
m = self.emso_regex.match(value)
if m is None:
raise ValidationError(self.default_error_messages['invalid'])
# Validate EMSO
s = 0
int_values = [int(i) for i in value]
for a, b in zip(int_values, list(range(7, 1, -1)) * 2):
s += a * b
chk = s % 11
if chk == 0:
K = 0
else:
K = 11 - chk
if K == 10 or int_values[-1] != K:
raise ValidationError(self.default_error_messages['checksum'])
# Extract extra info in the identification number
day, month, year, nationality, gender, chksum = [int(i) for i in m.groups()]
if year < 890:
year += 2000
else:
year += 1000
# validate birthday
try:
birthday = datetime.date(year, month, day)
except ValueError:
raise ValidationError(self.error_messages['date'])
if datetime.date.today() < birthday:
raise ValidationError(self.error_messages['date'])
self.info = {
'gender': gender < 500 and 'male' or 'female',
'birthdate': birthday,
'nationality': nationality,
}
return value
class SITaxNumberField(CharField):
"""Slovenian tax number field.
Valid input is SIXXXXXXXX or XXXXXXXX where X is a number.
"""
default_error_messages = {
'invalid': _('Enter a valid tax number in form SIXXXXXXXX'),
}
sitax_regex = re.compile('^(?:SI)?([1-9]\d{7})$')
def clean(self, value):
super(SITaxNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = value.strip()
m = self.sitax_regex.match(value)
if m is None:
raise ValidationError(self.default_error_messages['invalid'])
value = m.groups()[0]
# Validate Tax number
s = 0
int_values = [int(i) for i in value]
for a, b in zip(int_values, range(8, 1, -1)):
s += a * b
chk = 11 - (s % 11)
if chk == 10:
chk = 0
if int_values[-1] != chk:
raise ValidationError(self.default_error_messages['invalid'])
return value
class SIPostalCodeField(ChoiceField):
"""Slovenian post codes field.
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('choices', SI_POSTALCODES_CHOICES)
super(SIPostalCodeField, self).__init__(*args, **kwargs)
class SIPostalCodeSelect(Select):
"""A Select widget that uses Slovenian postal codes as its choices.
"""
def __init__(self, attrs=None):
super(SIPostalCodeSelect, self).__init__(attrs,
choices=SI_POSTALCODES_CHOICES)
class SIPhoneNumberField(CharField):
"""Slovenian phone number field.
Phone number must contain at least local area code.
Country code can be present.
Examples:
* +38640XXXXXX
* 0038640XXXXXX
* 040XXXXXX
* 01XXXXXX
* 0590XXXXX
"""
default_error_messages = {
'invalid': _('Enter phone number in form +386XXXXXXXX or 0XXXXXXXX.'),
}
phone_regex = re.compile('^(?:(?:00|\+)386|0)(\d{7,8})$')
def clean(self, value):
super(SIPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = value.replace(' ', '').replace('-', '').replace('/', '')
m = self.phone_regex.match(value)
if m is None:
raise ValidationError(self.default_error_messages['invalid'])
return m.groups()[0]
| bsd-3-clause |
sand8080/pnp_test | sketch/config.py | 1 | 1453 | # Server Specific Configurations
server = {
'port': '8080',
'host': '0.0.0.0'
}
# Pecan Application Configurations
app = {
'root': 'sketch.controllers.root.RootController',
'modules': ['sketch'],
'static_root': '%(confdir)s/public',
'template_path': '%(confdir)s/sketch/templates',
'debug': True,
'errors': {
404: '/error/404',
'__force_dict__': True
}
}
logging = {
'loggers': {
'root': {'level': 'INFO', 'handlers': ['console']},
'sketch': {'level': 'DEBUG', 'handlers': ['console']},
'pecan.commands.serve': {'level': 'DEBUG', 'handlers': ['console']},
'py.warnings': {'handlers': ['console']},
'__force_dict__': True
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'color'
}
},
'formatters': {
'simple': {
'format': ('%(asctime)s %(levelname)-5.5s [%(name)s]'
'[%(threadName)s] %(message)s')
},
'color': {
'()': 'pecan.log.ColorFormatter',
'format': ('%(asctime)s [%(padded_color_levelname)s] [%(name)s]'
'[%(threadName)s] %(message)s'),
'__force_dict__': True
}
}
}
# Custom Configurations must be in Python dictionary format::
#
# foo = {'bar':'baz'}
#
# All configurations are accessible at::
# pecan.conf
| mit |
DNFcode/edx-platform | common/djangoapps/util/testing.py | 25 | 1894 | import sys
from django.conf import settings
from django.core.urlresolvers import clear_url_caches, resolve
class UrlResetMixin(object):
"""Mixin to reset urls.py before and after a test
Django memoizes the function that reads the urls module (whatever module
urlconf names). The module itself is also stored by python in sys.modules.
To fully reload it, we need to reload the python module, and also clear django's
cache of the parsed urls.
However, the order in which we do this doesn't matter, because neither one will
get reloaded until the next request
Doing this is expensive, so it should only be added to tests that modify settings
that affect the contents of urls.py
"""
def _reset_urls(self, urlconf_modules):
"""Reset `urls.py` for a set of Django apps."""
for urlconf in urlconf_modules:
if urlconf in sys.modules:
reload(sys.modules[urlconf])
clear_url_caches()
# Resolve a URL so that the new urlconf gets loaded
resolve('/')
def setUp(self, *args, **kwargs):
"""Reset Django urls before tests and after tests
If you need to reset `urls.py` from a particular Django app (or apps),
specify these modules in *args.
Examples:
# Reload only the root urls.py
super(MyTestCase, self).setUp()
# Reload urls from my_app
super(MyTestCase, self).setUp("my_app.urls")
# Reload urls from my_app and another_app
super(MyTestCase, self).setUp("my_app.urls", "another_app.urls")
"""
super(UrlResetMixin, self).setUp(**kwargs)
urlconf_modules = [settings.ROOT_URLCONF]
if args:
urlconf_modules.extend(args)
self._reset_urls(urlconf_modules)
self.addCleanup(lambda: self._reset_urls(urlconf_modules))
| agpl-3.0 |
StyXman/ayrton | ayrton/parser/astcompiler/misc.py | 1 | 3583 | def parse_future(tree, feature_flags):
from ayrton.parser.astcompiler import ast
future_lineno = 0
future_column = 0
flags = 0
have_docstring = False
body = None
if isinstance(tree, ast.Module):
body = tree.body
elif isinstance(tree, ast.Interactive):
body = tree.body
if body is None:
return 0, 0, 0
for stmt in body:
if isinstance(stmt, ast.Expr) and isinstance(stmt.value, ast.Str):
if have_docstring:
break
else:
have_docstring = True
elif isinstance(stmt, ast.ImportFrom):
if stmt.module == "__future__":
future_lineno = stmt.lineno
future_column = stmt.col_offset
for alias in stmt.names:
assert isinstance(alias, ast.alias)
# If this is an invalid flag, it will be caught later in
# codegen.py.
flags |= feature_flags.get(alias.name, 0)
else:
break
else:
break
return flags, future_lineno, future_column
class ForbiddenNameAssignment(Exception):
def __init__(self, name, node):
self.name = name
self.node = node
def check_forbidden_name(name, node=None):
"""Raise an error if the name cannot be assigned to."""
if name in ("None", "__debug__"):
raise ForbiddenNameAssignment(name, node)
# XXX Warn about using True and False
# shamelessly lifted off rpython
class unrolling_iterable:
def __init__(self, iterable):
self._items = list(iterable)
self._head = _unroller(self._items)
def __iter__(self):
return iter(self._items)
def get_unroller(self):
return self._head
# ditto
class _unroller:
def __init__(self, items, i=0):
self._items = items
self._i = i
self._next = None
def step(self):
v = self._items[self._i]
if self._next is None:
self._next = _unroller(self._items, self._i+1)
return v, self._next
def dict_to_switch(d):
"""Convert of dictionary with integer keys to a switch statement."""
def lookup(query):
return d[query]
lookup._always_inline_ = True
unrolling_items = unrolling_iterable(d.items())
return lookup
def mangle(name, klass):
if not name.startswith('__'):
return name
# Don't mangle __id__ or names with dots. The only time a name with a dot
# can occur is when we are compiling an import statement that has a package
# name.
if name.endswith('__') or '.' in name:
return name
try:
i = 0
while klass[i] == '_':
i = i + 1
except IndexError:
return name
return "_%s%s" % (klass[i:], name)
def intern_if_common_string(space, w_const):
# only intern identifier-like strings
from pypy.objspace.std.unicodeobject import _isidentifier
if (space.is_w(space.type(w_const), space.w_unicode) and
_isidentifier(space.unicode_w(w_const))):
return space.new_interned_w_str(w_const)
return w_const
def new_identifier(space, name):
# Check whether there are non-ASCII characters in the identifier; if
# so, normalize to NFKC
for c in name:
if ord(c) > 0x80:
break
else:
return name
from pypy.module.unicodedata.interp_ucd import ucd
w_name = space.newtext(name)
w_id = space.call_method(ucd, 'normalize', space.newtext('NFKC'), w_name)
return space.text_w(w_id)
| gpl-3.0 |
rocky4570/moto | moto/logs/models.py | 2 | 11939 | from moto.core import BaseBackend
import boto.logs
from moto.core.utils import unix_time_millis
from .exceptions import (
ResourceNotFoundException,
ResourceAlreadyExistsException
)
class LogEvent:
_event_id = 0
def __init__(self, ingestion_time, log_event):
self.ingestionTime = ingestion_time
self.timestamp = log_event["timestamp"]
self.message = log_event['message']
self.eventId = self.__class__._event_id
self.__class__._event_id += 1
def to_filter_dict(self):
return {
"eventId": str(self.eventId),
"ingestionTime": self.ingestionTime,
# "logStreamName":
"message": self.message,
"timestamp": self.timestamp
}
def to_response_dict(self):
return {
"ingestionTime": self.ingestionTime,
"message": self.message,
"timestamp": self.timestamp
}
class LogStream:
_log_ids = 0
def __init__(self, region, log_group, name):
self.region = region
self.arn = "arn:aws:logs:{region}:{id}:log-group:{log_group}:log-stream:{log_stream}".format(
region=region, id=self.__class__._log_ids, log_group=log_group, log_stream=name)
self.creationTime = unix_time_millis()
self.firstEventTimestamp = None
self.lastEventTimestamp = None
self.lastIngestionTime = None
self.logStreamName = name
self.storedBytes = 0
self.uploadSequenceToken = 0 # I'm guessing this is token needed for sequenceToken by put_events
self.events = []
self.__class__._log_ids += 1
def _update(self):
# events can be empty when stream is described soon after creation
self.firstEventTimestamp = min([x.timestamp for x in self.events]) if self.events else None
self.lastEventTimestamp = max([x.timestamp for x in self.events]) if self.events else None
def to_describe_dict(self):
# Compute start and end times
self._update()
res = {
"arn": self.arn,
"creationTime": self.creationTime,
"logStreamName": self.logStreamName,
"storedBytes": self.storedBytes,
}
if self.events:
rest = {
"firstEventTimestamp": self.firstEventTimestamp,
"lastEventTimestamp": self.lastEventTimestamp,
"lastIngestionTime": self.lastIngestionTime,
"uploadSequenceToken": str(self.uploadSequenceToken),
}
res.update(rest)
return res
def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token):
# TODO: ensure sequence_token
# TODO: to be thread safe this would need a lock
self.lastIngestionTime = unix_time_millis()
# TODO: make this match AWS if possible
self.storedBytes += sum([len(log_event["message"]) for log_event in log_events])
self.events += [LogEvent(self.lastIngestionTime, log_event) for log_event in log_events]
self.uploadSequenceToken += 1
return '{:056d}'.format(self.uploadSequenceToken)
def get_log_events(self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head):
def filter_func(event):
if start_time and event.timestamp < start_time:
return False
if end_time and event.timestamp > end_time:
return False
return True
events = sorted(filter(filter_func, self.events), key=lambda event: event.timestamp, reverse=start_from_head)
back_token = next_token
if next_token is None:
next_token = 0
events_page = [event.to_response_dict() for event in events[next_token: next_token + limit]]
next_token += limit
if next_token >= len(self.events):
next_token = None
return events_page, back_token, next_token
def filter_log_events(self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved):
def filter_func(event):
if start_time and event.timestamp < start_time:
return False
if end_time and event.timestamp > end_time:
return False
return True
events = []
for event in sorted(filter(filter_func, self.events), key=lambda x: x.timestamp):
event_obj = event.to_filter_dict()
event_obj['logStreamName'] = self.logStreamName
events.append(event_obj)
return events
class LogGroup:
def __init__(self, region, name, tags):
self.name = name
self.region = region
self.arn = "arn:aws:logs:{region}:1:log-group:{log_group}".format(
region=region, log_group=name)
self.creationTime = unix_time_millis()
self.tags = tags
self.streams = dict() # {name: LogStream}
def create_log_stream(self, log_stream_name):
if log_stream_name in self.streams:
raise ResourceAlreadyExistsException()
self.streams[log_stream_name] = LogStream(self.region, self.name, log_stream_name)
def delete_log_stream(self, log_stream_name):
if log_stream_name not in self.streams:
raise ResourceNotFoundException()
del self.streams[log_stream_name]
def describe_log_streams(self, descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by):
# responses only logStreamName, creationTime, arn, storedBytes when no events are stored.
log_streams = [(name, stream.to_describe_dict()) for name, stream in self.streams.items() if name.startswith(log_stream_name_prefix)]
def sorter(item):
return item[0] if order_by == 'logStreamName' else item[1].get('lastEventTimestamp', 0)
if next_token is None:
next_token = 0
log_streams = sorted(log_streams, key=sorter, reverse=descending)
new_token = next_token + limit
log_streams_page = [x[1] for x in log_streams[next_token: new_token]]
if new_token >= len(log_streams):
new_token = None
return log_streams_page, new_token
def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token):
if log_stream_name not in self.streams:
raise ResourceNotFoundException()
stream = self.streams[log_stream_name]
return stream.put_log_events(log_group_name, log_stream_name, log_events, sequence_token)
def get_log_events(self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head):
if log_stream_name not in self.streams:
raise ResourceNotFoundException()
stream = self.streams[log_stream_name]
return stream.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head)
def filter_log_events(self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved):
streams = [stream for name, stream in self.streams.items() if not log_stream_names or name in log_stream_names]
events = []
for stream in streams:
events += stream.filter_log_events(log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved)
if interleaved:
events = sorted(events, key=lambda event: event['timestamp'])
if next_token is None:
next_token = 0
events_page = events[next_token: next_token + limit]
next_token += limit
if next_token >= len(events):
next_token = None
searched_streams = [{"logStreamName": stream.logStreamName, "searchedCompletely": True} for stream in streams]
return events_page, next_token, searched_streams
def to_describe_dict(self):
return {
"arn": self.arn,
"creationTime": self.creationTime,
"logGroupName": self.name,
"metricFilterCount": 0,
"retentionInDays": 30,
"storedBytes": sum(s.storedBytes for s in self.streams.values()),
}
class LogsBackend(BaseBackend):
def __init__(self, region_name):
self.region_name = region_name
self.groups = dict() # { logGroupName: LogGroup}
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
def create_log_group(self, log_group_name, tags):
if log_group_name in self.groups:
raise ResourceAlreadyExistsException()
self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags)
def ensure_log_group(self, log_group_name, tags):
if log_group_name in self.groups:
return
self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags)
def delete_log_group(self, log_group_name):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
del self.groups[log_group_name]
def describe_log_groups(self, limit, log_group_name_prefix, next_token):
if log_group_name_prefix is None:
log_group_name_prefix = ''
if next_token is None:
next_token = 0
groups = sorted(group.to_describe_dict() for name, group in self.groups.items() if name.startswith(log_group_name_prefix))
groups_page = groups[next_token:next_token + limit]
next_token += limit
if next_token >= len(groups):
next_token = None
return groups_page, next_token
def create_log_stream(self, log_group_name, log_stream_name):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.create_log_stream(log_stream_name)
def delete_log_stream(self, log_group_name, log_stream_name):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.delete_log_stream(log_stream_name)
def describe_log_streams(self, descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.describe_log_streams(descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by)
def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token):
# TODO: add support for sequence_tokens
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.put_log_events(log_group_name, log_stream_name, log_events, sequence_token)
def get_log_events(self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head)
def filter_log_events(self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved):
if log_group_name not in self.groups:
raise ResourceNotFoundException()
log_group = self.groups[log_group_name]
return log_group.filter_log_events(log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved)
logs_backends = {region.name: LogsBackend(region.name) for region in boto.logs.regions()}
| apache-2.0 |
athena-voice/athena-voice-client | athena/modules/active/voice_browse.py | 1 | 2096 | """
Wraps the Spotify Web Player to play music
Usage Examples:
- "Open facebook.com"
- "Search Neil Degrasse Tyson"
- "Maximize the browser"
"""
from athena.classes.module import Module
from athena.classes.task import ActiveTask
from athena.apis import api_lib
VB_PATTERNS = [r'.*\b(?:search(?: for)?|look up|tell me about)\b(.*)',
r'.*\b(?:go to|open)(.*\.(com|org|net|edu|gov|io|html))\b',
r'.*\b(?:type)\b(.*)',
r'.*\b(?:close|shut)(?: the| this)? (tab|page)\b.*',
r'.*\b(?:close|shut)(?: the| this)? (browser)\b.*',
r'.*\b(delete|clear the)\b.*',
r'.*\b(maximize)\b.*',
r'.*\b(click)\b.*',
r'.*\b(?:next|switch the) (tab|page)\b.*']
class VoiceBrowseTask(ActiveTask):
def __init__(self):
super().__init__(patterns=VB_PATTERNS)
self.groups = {1: 'group1'}
def match(self, text):
return self.match_and_save_groups(text, self.groups)
def action(self, text):
try:
api_lib['voice_browse_api'].driver.current_url
except:
api_lib['voice_browse_api'].driver = None
print('\n~ Browser closed.')
funcs = {
0: api_lib['voice_browse_api'].search,
1: api_lib['voice_browse_api'].open,
2: api_lib['voice_browse_api'].type,
3: api_lib['voice_browse_api'].close_tab,
4: api_lib['voice_browse_api'].close,
5: api_lib['voice_browse_api'].clear,
6: api_lib['voice_browse_api'].maximize,
7: api_lib['voice_browse_api'].click,
8: api_lib['voice_browse_api'].switch_tab,
}
if self.case < 3:
funcs[self.case](self.group1)
else:
funcs[self.case]()
class VoiceBrowse(Module):
def __init__(self):
tasks = [VoiceBrowseTask()]
super().__init__('voice_browse', tasks, priority=2)
| gpl-3.0 |
alheinecke/tensorflow-xsmm | tensorflow/contrib/learn/python/learn/preprocessing/__init__.py | 138 | 1071 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocessing tools useful for building models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.preprocessing.categorical import *
from tensorflow.contrib.learn.python.learn.preprocessing.text import *
# pylint: enable=wildcard-import
| apache-2.0 |
kingvuplus/boom2 | lib/python/Components/ScrollLabel.py | 2 | 7349 | # Embedded file name: /usr/lib/enigma2/python/Components/ScrollLabel.py
import skin
from HTMLComponent import HTMLComponent
from GUIComponent import GUIComponent
from enigma import eLabel, eWidget, eSlider, fontRenderClass, ePoint, eSize
class ScrollLabel(HTMLComponent, GUIComponent):
def __init__(self, text = ''):
GUIComponent.__init__(self)
self.message = text
self.instance = None
self.long_text = None
self.right_text = None
self.scrollbar = None
self.pages = None
self.total = None
self.split = False
self.splitchar = '|'
self.column = 0
return
def applySkin(self, desktop, parent):
ret = False
if self.skinAttributes is not None:
widget_attribs = []
scrollbar_attribs = []
for attrib, value in self.skinAttributes:
if 'borderColor' in attrib or 'borderWidth' in attrib:
scrollbar_attribs.append((attrib, value))
if 'transparent' in attrib or 'backgroundColor' in attrib:
widget_attribs.append((attrib, value))
if 'split' in attrib:
self.split = int(value)
if self.split:
self.right_text = eLabel(self.instance)
if 'colposition' in attrib:
self.column = int(value)
if 'dividechar' in attrib:
self.splitchar = value
if self.split:
skin.applyAllAttributes(self.long_text, desktop, self.skinAttributes + [('halign', 'left')], parent.scale)
skin.applyAllAttributes(self.right_text, desktop, self.skinAttributes + [('transparent', '1'), ('halign', 'left' and self.column or 'right')], parent.scale)
else:
skin.applyAllAttributes(self.long_text, desktop, self.skinAttributes, parent.scale)
skin.applyAllAttributes(self.instance, desktop, widget_attribs, parent.scale)
skin.applyAllAttributes(self.scrollbar, desktop, scrollbar_attribs + widget_attribs, parent.scale)
ret = True
s = self.long_text.size()
self.instance.move(self.long_text.position())
lineheight = fontRenderClass.getInstance().getLineHeight(self.long_text.getFont())
if not lineheight:
lineheight = 30
lines = int(s.height() / lineheight)
self.pageHeight = int(lines * lineheight)
self.instance.resize(eSize(s.width(), self.pageHeight + int(lineheight / 6)))
self.scrollbar.move(ePoint(s.width() - 10, 0))
self.scrollbar.resize(eSize(10, self.pageHeight + int(lineheight / 6)))
self.scrollbar.setOrientation(eSlider.orVertical)
self.scrollbar.setRange(0, 100)
self.scrollbar.setBorderWidth(1)
self.long_text.move(ePoint(0, 0))
self.long_text.resize(eSize(s.width() - 30, self.pageHeight * 40))
if self.split:
self.right_text.move(ePoint(self.column, 0))
self.right_text.resize(eSize(s.width() - self.column - 30, self.pageHeight * 40))
self.setText(self.message)
return ret
def setText(self, text):
self.message = text
if self.long_text is not None and self.pageHeight:
self.long_text.move(ePoint(0, 0))
if self.split:
left = []
right = []
for line in self.message.split('\n'):
line = line.split(self.splitchar, 1)
if len(line) == 1:
line.append('')
left.append(line[0])
right.append(line[1].lstrip(' '))
self.long_text.setText('\n'.join(left))
self.right_text.setText('\n'.join(right))
else:
self.long_text.setText(self.message)
text_height = self.long_text.calculateSize().height()
total = self.pageHeight
pages = 1
while total < text_height:
total += self.pageHeight
pages += 1
if pages > 1:
self.scrollbar.show()
self.total = total
self.pages = pages
self.updateScrollbar()
else:
self.scrollbar.hide()
self.total = None
self.pages = None
return
def appendText(self, text):
old_text = self.getText()
if len(str(old_text)) > 0:
self.message += text
else:
self.message = text
if self.long_text is not None:
self.long_text.setText(self.message)
text_height = self.long_text.calculateSize().height()
total = self.pageHeight
pages = 1
while total < text_height:
total += self.pageHeight
pages += 1
if pages > 1:
self.scrollbar.show()
self.total = total
self.pages = pages
self.updateScrollbar()
else:
self.scrollbar.hide()
self.total = None
self.pages = None
return
def updateScrollbar(self):
start = -self.long_text.position().y() * 100 / self.total
vis = self.pageHeight * 100 / self.total
self.scrollbar.setStartEnd(start, start + vis)
def getText(self):
return self.message
def GUIcreate(self, parent):
self.instance = eWidget(parent)
self.scrollbar = eSlider(self.instance)
self.long_text = eLabel(self.instance)
def GUIdelete(self):
self.long_text = None
self.scrollbar = None
self.instance = None
self.right_text = None
return
def pageUp(self):
if self.total is not None:
curPos = self.long_text.position()
if curPos.y() < 0:
self.long_text.move(ePoint(curPos.x(), curPos.y() + self.pageHeight))
self.split and self.right_text.move(ePoint(curPos.x(), curPos.y() + self.pageHeight))
self.updateScrollbar()
return
def pageDown(self):
if self.total is not None:
curPos = self.long_text.position()
if self.total - self.pageHeight >= abs(curPos.y() - self.pageHeight):
self.long_text.move(ePoint(curPos.x(), curPos.y() - self.pageHeight))
self.split and self.right_text.move(ePoint(curPos.x(), curPos.y() - self.pageHeight))
self.updateScrollbar()
return
def lastPage(self):
if self.pages is not None:
i = 1
while i < self.pages:
self.pageDown()
i += 1
return
def isAtLastPage(self):
if self.total is not None:
curPos = self.long_text.position()
return self.total - self.pageHeight < abs(curPos.y() - self.pageHeight)
else:
return True
return
def produceHTML(self):
return self.getText() | gpl-2.0 |
lexus42/w17 | static/Brython3.1.1-20150328-091302/Lib/_abcoll.py | 688 | 5155 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
DON'T USE THIS MODULE DIRECTLY! The classes here should be imported
via collections; they are defined here only to alleviate certain
bootstrapping issues. Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
__all__ = ["Hashable", "Iterable", "Iterator",
"Sized", "Container", "Callable",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
"ByteString",
]
"""
### collection related types which are not exposed through builtin ###
## iterators ##
#fixme brython
#bytes_iterator = type(iter(b''))
bytes_iterator = type(iter(''))
#fixme brython
#bytearray_iterator = type(iter(bytearray()))
#callable_iterator = ???
dict_keyiterator = type(iter({}.keys()))
dict_valueiterator = type(iter({}.values()))
dict_itemiterator = type(iter({}.items()))
list_iterator = type(iter([]))
list_reverseiterator = type(iter(reversed([])))
range_iterator = type(iter(range(0)))
set_iterator = type(iter(set()))
str_iterator = type(iter(""))
tuple_iterator = type(iter(()))
zip_iterator = type(iter(zip()))
## views ##
dict_keys = type({}.keys())
dict_values = type({}.values())
dict_items = type({}.items())
## misc ##
dict_proxy = type(type.__dict__)
"""
def abstractmethod(self):
return self
### ONE-TRICK PONIES ###
#class Iterable(metaclass=ABCMeta):
class Iterable:
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
if any("__iter__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
#class Sized(metaclass=ABCMeta):
class Sized:
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
if any("__len__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
#class Container(metaclass=ABCMeta):
class Container:
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
if any("__contains__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
### MAPPINGS ###
class Mapping(Sized, Iterable, Container):
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def keys(self):
return KeysView(self)
def items(self):
return ItemsView(self)
def values(self):
return ValuesView(self)
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
def __ne__(self, other):
return not (self == other)
class MutableMapping(Mapping):
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
try:
key = next(iter(self))
except StopIteration:
raise KeyError
value = self[key]
del self[key]
return key, value
def clear(self):
try:
while True:
self.popitem()
except KeyError:
pass
def update(*args, **kwds):
if len(args) > 2:
raise TypeError("update() takes at most 2 positional "
"arguments ({} given)".format(len(args)))
elif not args:
raise TypeError("update() takes at least 1 argument (0 given)")
self = args[0]
other = args[1] if len(args) >= 2 else ()
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
#MutableMapping.register(dict)
| gpl-3.0 |
bheesham/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/stream.py | 673 | 2748 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file exports public symbols.
"""
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_base import InvalidUTF8Exception
from mod_pywebsocket._stream_base import UnsupportedFrameException
from mod_pywebsocket._stream_hixie75 import StreamHixie75
from mod_pywebsocket._stream_hybi import Frame
from mod_pywebsocket._stream_hybi import Stream
from mod_pywebsocket._stream_hybi import StreamOptions
# These methods are intended to be used by WebSocket client developers to have
# their implementations receive broken data in tests.
from mod_pywebsocket._stream_hybi import create_close_frame
from mod_pywebsocket._stream_hybi import create_header
from mod_pywebsocket._stream_hybi import create_length_header
from mod_pywebsocket._stream_hybi import create_ping_frame
from mod_pywebsocket._stream_hybi import create_pong_frame
from mod_pywebsocket._stream_hybi import create_binary_frame
from mod_pywebsocket._stream_hybi import create_text_frame
from mod_pywebsocket._stream_hybi import create_closing_handshake_body
# vi:sts=4 sw=4 et
| mpl-2.0 |
resmo/ansible | lib/ansible/modules/network/aci/aci_aaa_user_certificate.py | 27 | 7876 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Dag Wieers (dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_aaa_user_certificate
short_description: Manage AAA user certificates (aaa:UserCert)
description:
- Manage AAA user certificates on Cisco ACI fabrics.
version_added: '2.5'
options:
aaa_user:
description:
- The name of the user to add a certificate to.
type: str
required: yes
aaa_user_type:
description:
- Whether this is a normal user or an appuser.
type: str
choices: [ appuser, user ]
default: user
certificate:
description:
- The PEM format public key extracted from the X.509 certificate.
type: str
aliases: [ cert_data, certificate_data ]
certificate_name:
description:
- The name of the user certificate entry in ACI.
type: str
aliases: [ cert_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
notes:
- The C(aaa_user) must exist before using this module in your playbook.
The M(aci_aaa_user) module can be used for this.
seealso:
- module: aci_aaa_user
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(aaa:UserCert).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
- name: Add a certificate to user
aci_aaa_user_certificate:
host: apic
username: admin
password: SomeSecretPassword
aaa_user: admin
certificate_name: admin
certificate_data: '{{ lookup("file", "pki/admin.crt") }}'
state: present
delegate_to: localhost
- name: Remove a certificate of a user
aci_aaa_user_certificate:
host: apic
username: admin
password: SomeSecretPassword
aaa_user: admin
certificate_name: admin
state: absent
delegate_to: localhost
- name: Query a certificate of a user
aci_aaa_user_certificate:
host: apic
username: admin
password: SomeSecretPassword
aaa_user: admin
certificate_name: admin
state: query
delegate_to: localhost
register: query_result
- name: Query all certificates of a user
aci_aaa_user_certificate:
host: apic
username: admin
password: SomeSecretPassword
aaa_user: admin
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
ACI_MAPPING = dict(
appuser=dict(
aci_class='aaaAppUser',
aci_mo='userext/appuser-',
),
user=dict(
aci_class='aaaUser',
aci_mo='userext/user-',
),
)
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
aaa_user=dict(type='str', required=True),
aaa_user_type=dict(type='str', default='user', choices=['appuser', 'user']),
certificate=dict(type='str', aliases=['cert_data', 'certificate_data']),
certificate_name=dict(type='str', aliases=['cert_name']), # Not required for querying all objects
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['aaa_user', 'certificate_name']],
['state', 'present', ['aaa_user', 'certificate', 'certificate_name']],
],
)
aaa_user = module.params['aaa_user']
aaa_user_type = module.params['aaa_user_type']
certificate = module.params['certificate']
certificate_name = module.params['certificate_name']
state = module.params['state']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class=ACI_MAPPING[aaa_user_type]['aci_class'],
aci_rn=ACI_MAPPING[aaa_user_type]['aci_mo'] + aaa_user,
module_object=aaa_user,
target_filter={'name': aaa_user},
),
subclass_1=dict(
aci_class='aaaUserCert',
aci_rn='usercert-{0}'.format(certificate_name),
module_object=certificate_name,
target_filter={'name': certificate_name},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='aaaUserCert',
class_config=dict(
data=certificate,
name=certificate_name,
),
)
aci.get_diff(aci_class='aaaUserCert')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
ChrisCuts/fnode | src/FunctionSelectionNode.py | 1 | 6818 | # -*- coding: utf-8 -*-
'''
Created on 17.09.2015
@author: derChris
'''
from pydispatch import dispatcher
import PlugIns.NodeTemplates
import PlugIns.MATLAB.MATLAB
import Pipe
class FunctionSelectionNode(PlugIns.NodeTemplates.NodeTemplate):
_UPDATE_FUNCTION_SELECTION_WIDGET = 'UPDATE_FUNCTION_SELECTION_WIDGET'
_DON_T_CREATE_WIDGET = 'DON_T_CREATE_WIDGET'
def __init__(self, system, data= None):
super().__init__(system, data)
self._function_nodes = []
self._name = 'Function Selection'
if data and 'nodes' in data:
for function_data in data['nodes']:
self._function_nodes.append(PlugIns.MATLAB.MATLAB.mFunctionNode(function_data['name'], system, function_data))
for function_node in self._function_nodes:
for input_connection in function_node.inputs().items():
if input_connection[0] not in self._inputs:
self._inputs.update({input_connection[0]: input_connection[1].copy(self)})
for output_connection in function_node.outputs().items():
if output_connection[0] not in self._outputs:
self._outputs.update({output_connection[0]: output_connection[1].copy(self)})
def get_function_nodes(self):
return self._function_nodes
def remove_function_node(self, node):
self._function_nodes.remove(node)
inputs = {}
outputs = {}
for function_node in self._function_nodes:
inputs.update(function_node.inputs())
outputs.update(function_node.outputs())
for input_connection in self._inputs:
if input_connection not in inputs:
self._inputs[input_connection].clear()
self._inputs.pop(input_connection)
for output_connection in self._outputs:
if output_connection not in outputs:
self._outputs[output_connection].clear()
self._outputs.pop(output_connection)
dispatcher.send(self._UPDATE_FUNCTION_SELECTION_WIDGET, sender= self)
def add_function(self, data):
name = data['name']
self._function_nodes.append(PlugIns.MATLAB.MATLAB.mFunctionNode(name, self._system, data))
for function_node in self._function_nodes:
for input_connection in function_node.inputs().items():
if input_connection[0] not in self._inputs:
self._inputs.update({input_connection[0]: input_connection[1].copy(self)})
for output_connection in function_node.outputs().items():
if output_connection[0] not in self._outputs:
self._outputs.update({output_connection[0]: output_connection[1].copy(self)})
dispatcher.send(self._UPDATE_FUNCTION_SELECTION_WIDGET, sender= self)
def create_sequence_item(self, nodes, pipemap):
### connect input pipes to first node with same input
for tag, connector in self._inputs.items():
if connector.pipe() and id(connector.pipe()) in pipemap:
for function_node in self._function_nodes:
if tag in function_node.inputs():
virtual_connector = PlugIns.NodeTemplates.Connector(tag,
style= 'output')
pipe = Pipe.Pipe(function_node.inputs()[tag], virtual_connector,
flags= self._DON_T_CREATE_WIDGET)
variable_id = pipemap[id(connector.pipe())][0]
pipemap.update({id(pipe) : (variable_id, function_node.inputs()[tag])})
del(pipemap[id(connector.pipe())])
break
virtual_connectors = []
### connect output pipes to last node with same output
for tag, connector in self._outputs.items():
if connector.pipe():
for function_node in reversed(self._function_nodes):
if tag in function_node.outputs():
virtual_connector = PlugIns.NodeTemplates.Connector(tag,
style= 'input')
pipe = Pipe.Pipe(function_node.outputs()[tag], virtual_connector,
flags= self._DON_T_CREATE_WIDGET)
virtual_connectors.append(virtual_connector)
break
### create pipes
subsequence = []
pipes = set()
for num, function_node in enumerate(self._function_nodes):
#connect node with next except it's the last one
if num < len(self._function_nodes)-1:
for tag, connector in function_node.outputs().items():
if tag in self._function_nodes[num+1].inputs():
pipes.add(Pipe.Pipe(connector, self._function_nodes[num+1].inputs()[tag],
flags= self._DON_T_CREATE_WIDGET))
sequenceitem, pipemap = function_node.sequence_data(nodes, pipemap)
subsequence.append(sequenceitem)
for pipe in pipes:
pipe.delete()
### connect virtual output connectors with output connectors
for virtual_connector in virtual_connectors:
pipe = self._outputs[virtual_connector.name()].pipe()
variable_id = pipemap[id(virtual_connector.pipe())][0]
pipemap.update({id(pipe): (variable_id, pipe.connector_at_output())})
# delete pipe
del(pipemap[id(virtual_connector.pipe())])
virtual_connector.clear()
return subsequence, pipemap
def get_data(self):
data = super().get_data()
data.update({'nodes': [function_node.get_data() for function_node in self._function_nodes]})
return data
| gpl-2.0 |
mongodb/mongo-python-driver | pymongo/collection.py | 2 | 132067 | # Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection level utilities for Mongo."""
import datetime
import warnings
from collections import abc
from bson.code import Code
from bson.objectid import ObjectId
from bson.raw_bson import RawBSONDocument
from bson.codec_options import CodecOptions
from bson.son import SON
from pymongo import (common,
helpers,
message)
from pymongo.aggregation import (_CollectionAggregationCommand,
_CollectionRawAggregationCommand)
from pymongo.bulk import BulkOperationBuilder, _Bulk
from pymongo.command_cursor import CommandCursor, RawBatchCommandCursor
from pymongo.collation import validate_collation_or_none
from pymongo.change_stream import CollectionChangeStream
from pymongo.cursor import Cursor, RawBatchCursor
from pymongo.errors import (ConfigurationError,
InvalidName,
InvalidOperation,
OperationFailure)
from pymongo.helpers import _check_write_command_response
from pymongo.message import _UNICODE_REPLACE_CODEC_OPTIONS
from pymongo.operations import IndexModel
from pymongo.read_preferences import ReadPreference
from pymongo.results import (BulkWriteResult,
DeleteResult,
InsertOneResult,
InsertManyResult,
UpdateResult)
from pymongo.write_concern import WriteConcern
_FIND_AND_MODIFY_DOC_FIELDS = {'value': 1}
_HAYSTACK_MSG = (
"geoHaystack indexes are deprecated as of MongoDB 4.4."
" Instead, create a 2d index and use $geoNear or $geoWithin."
" See https://dochub.mongodb.org/core/4.4-deprecate-geoHaystack")
class ReturnDocument(object):
"""An enum used with
:meth:`~pymongo.collection.Collection.find_one_and_replace` and
:meth:`~pymongo.collection.Collection.find_one_and_update`.
"""
BEFORE = False
"""Return the original document before it was updated/replaced, or
``None`` if no document matches the query.
"""
AFTER = True
"""Return the updated/replaced or inserted document."""
class Collection(common.BaseObject):
"""A Mongo collection.
"""
def __init__(self, database, name, create=False, codec_options=None,
read_preference=None, write_concern=None, read_concern=None,
session=None, **kwargs):
"""Get / create a Mongo collection.
Raises :class:`TypeError` if `name` is not an instance of
:class:`basestring` (:class:`str` in python 3). Raises
:class:`~pymongo.errors.InvalidName` if `name` is not a valid
collection name. Any additional keyword arguments will be used
as options passed to the create command. See
:meth:`~pymongo.database.Database.create_collection` for valid
options.
If `create` is ``True``, `collation` is specified, or any additional
keyword arguments are present, a ``create`` command will be
sent, using ``session`` if specified. Otherwise, a ``create`` command
will not be sent and the collection will be created implicitly on first
use. The optional ``session`` argument is *only* used for the ``create``
command, it is not associated with the collection afterward.
:Parameters:
- `database`: the database to get a collection from
- `name`: the name of the collection to get
- `create` (optional): if ``True``, force collection
creation even without options being set
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) database.codec_options is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) database.read_preference is used.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) database.write_concern is used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) database.read_concern is used.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. If a collation is provided,
it will be passed to the create collection command. This option is
only supported on MongoDB 3.4 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession` that is used with
the create collection command
- `**kwargs` (optional): additional keyword arguments will
be passed as options for the create collection command
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Support the `collation` option.
.. versionchanged:: 3.2
Added the read_concern option.
.. versionchanged:: 3.0
Added the codec_options, read_preference, and write_concern options.
Removed the uuid_subtype attribute.
:class:`~pymongo.collection.Collection` no longer returns an
instance of :class:`~pymongo.collection.Collection` for attribute
names with leading underscores. You must use dict-style lookups
instead::
collection['__my_collection__']
Not:
collection.__my_collection__
.. versionchanged:: 2.2
Removed deprecated argument: options
.. versionadded:: 2.1
uuid_subtype attribute
.. mongodoc:: collections
"""
super(Collection, self).__init__(
codec_options or database.codec_options,
read_preference or database.read_preference,
write_concern or database.write_concern,
read_concern or database.read_concern)
if not isinstance(name, str):
raise TypeError("name must be an instance of str")
if not name or ".." in name:
raise InvalidName("collection names cannot be empty")
if "$" in name and not (name.startswith("oplog.$main") or
name.startswith("$cmd")):
raise InvalidName("collection names must not "
"contain '$': %r" % name)
if name[0] == "." or name[-1] == ".":
raise InvalidName("collection names must not start "
"or end with '.': %r" % name)
if "\x00" in name:
raise InvalidName("collection names must not contain the "
"null character")
collation = validate_collation_or_none(kwargs.pop('collation', None))
self.__database = database
self.__name = name
self.__full_name = "%s.%s" % (self.__database.name, self.__name)
if create or kwargs or collation:
self.__create(kwargs, collation, session)
self.__write_response_codec_options = self.codec_options._replace(
unicode_decode_error_handler='replace',
document_class=dict)
def _socket_for_reads(self, session):
return self.__database.client._socket_for_reads(
self._read_preference_for(session), session)
def _socket_for_writes(self, session):
return self.__database.client._socket_for_writes(session)
def _command(self, sock_info, command, slave_ok=False,
read_preference=None,
codec_options=None, check=True, allowable_errors=None,
read_concern=None,
write_concern=None,
collation=None,
session=None,
retryable_write=False,
user_fields=None):
"""Internal command helper.
:Parameters:
- `sock_info` - A SocketInfo instance.
- `command` - The command itself, as a SON instance.
- `slave_ok`: whether to set the SlaveOkay wire protocol bit.
- `codec_options` (optional) - An instance of
:class:`~bson.codec_options.CodecOptions`.
- `check`: raise OperationFailure if there are errors
- `allowable_errors`: errors to ignore if `check` is True
- `read_concern` (optional) - An instance of
:class:`~pymongo.read_concern.ReadConcern`.
- `write_concern`: An instance of
:class:`~pymongo.write_concern.WriteConcern`. This option is only
valid for MongoDB 3.4 and above.
- `collation` (optional) - An instance of
:class:`~pymongo.collation.Collation`.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `retryable_write` (optional): True if this command is a retryable
write.
- `user_fields` (optional): Response fields that should be decoded
using the TypeDecoders from codec_options, passed to
bson._decode_all_selective.
:Returns:
The result document.
"""
with self.__database.client._tmp_session(session) as s:
return sock_info.command(
self.__database.name,
command,
slave_ok,
read_preference or self._read_preference_for(session),
codec_options or self.codec_options,
check,
allowable_errors,
read_concern=read_concern,
write_concern=write_concern,
parse_write_concern_error=True,
collation=collation,
session=s,
client=self.__database.client,
retryable_write=retryable_write,
user_fields=user_fields)
def __create(self, options, collation, session):
"""Sends a create command with the given options.
"""
cmd = SON([("create", self.__name)])
if options:
if "size" in options:
options["size"] = float(options["size"])
cmd.update(options)
with self._socket_for_writes(session) as sock_info:
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
write_concern=self._write_concern_for(session),
collation=collation, session=session)
def __getattr__(self, name):
"""Get a sub-collection of this collection by name.
Raises InvalidName if an invalid collection name is used.
:Parameters:
- `name`: the name of the collection to get
"""
if name.startswith('_'):
full_name = "%s.%s" % (self.__name, name)
raise AttributeError(
"Collection has no attribute %r. To access the %s"
" collection, use database['%s']." % (
name, full_name, full_name))
return self.__getitem__(name)
def __getitem__(self, name):
return Collection(self.__database,
"%s.%s" % (self.__name, name),
False,
self.codec_options,
self.read_preference,
self.write_concern,
self.read_concern)
def __repr__(self):
return "Collection(%r, %r)" % (self.__database, self.__name)
def __eq__(self, other):
if isinstance(other, Collection):
return (self.__database == other.database and
self.__name == other.name)
return NotImplemented
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.__database, self.__name))
@property
def full_name(self):
"""The full name of this :class:`Collection`.
The full name is of the form `database_name.collection_name`.
"""
return self.__full_name
@property
def name(self):
"""The name of this :class:`Collection`."""
return self.__name
@property
def database(self):
"""The :class:`~pymongo.database.Database` that this
:class:`Collection` is a part of.
"""
return self.__database
def with_options(self, codec_options=None, read_preference=None,
write_concern=None, read_concern=None):
"""Get a clone of this collection changing the specified settings.
>>> coll1.read_preference
Primary()
>>> from pymongo import ReadPreference
>>> coll2 = coll1.with_options(read_preference=ReadPreference.SECONDARY)
>>> coll1.read_preference
Primary()
>>> coll2.read_preference
Secondary(tag_sets=None)
:Parameters:
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) the :attr:`codec_options` of this :class:`Collection`
is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) the :attr:`read_preference` of this
:class:`Collection` is used. See :mod:`~pymongo.read_preferences`
for options.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) the :attr:`write_concern` of this :class:`Collection`
is used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) the :attr:`read_concern` of this :class:`Collection`
is used.
"""
return Collection(self.__database,
self.__name,
False,
codec_options or self.codec_options,
read_preference or self.read_preference,
write_concern or self.write_concern,
read_concern or self.read_concern)
def initialize_unordered_bulk_op(self, bypass_document_validation=False):
"""**DEPRECATED** - Initialize an unordered batch of write operations.
Operations will be performed on the server in arbitrary order,
possibly in parallel. All operations will be attempted.
:Parameters:
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance.
See :ref:`unordered_bulk` for examples.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.5
Deprecated. Use :meth:`~pymongo.collection.Collection.bulk_write`
instead.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 2.7
"""
warnings.warn("initialize_unordered_bulk_op is deprecated",
DeprecationWarning, stacklevel=2)
return BulkOperationBuilder(self, False, bypass_document_validation)
def initialize_ordered_bulk_op(self, bypass_document_validation=False):
"""**DEPRECATED** - Initialize an ordered batch of write operations.
Operations will be performed on the server serially, in the
order provided. If an error occurs all remaining operations
are aborted.
:Parameters:
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
Returns a :class:`~pymongo.bulk.BulkOperationBuilder` instance.
See :ref:`ordered_bulk` for examples.
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.5
Deprecated. Use :meth:`~pymongo.collection.Collection.bulk_write`
instead.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 2.7
"""
warnings.warn("initialize_ordered_bulk_op is deprecated",
DeprecationWarning, stacklevel=2)
return BulkOperationBuilder(self, True, bypass_document_validation)
def bulk_write(self, requests, ordered=True,
bypass_document_validation=False, session=None):
"""Send a batch of write operations to the server.
Requests are passed as a list of write operation instances (
:class:`~pymongo.operations.InsertOne`,
:class:`~pymongo.operations.UpdateOne`,
:class:`~pymongo.operations.UpdateMany`,
:class:`~pymongo.operations.ReplaceOne`,
:class:`~pymongo.operations.DeleteOne`, or
:class:`~pymongo.operations.DeleteMany`).
>>> for doc in db.test.find({}):
... print(doc)
...
{'x': 1, '_id': ObjectId('54f62e60fba5226811f634ef')}
{'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')}
>>> # DeleteMany, UpdateOne, and UpdateMany are also available.
...
>>> from pymongo import InsertOne, DeleteOne, ReplaceOne
>>> requests = [InsertOne({'y': 1}), DeleteOne({'x': 1}),
... ReplaceOne({'w': 1}, {'z': 1}, upsert=True)]
>>> result = db.test.bulk_write(requests)
>>> result.inserted_count
1
>>> result.deleted_count
1
>>> result.modified_count
0
>>> result.upserted_ids
{2: ObjectId('54f62ee28891e756a6e1abd5')}
>>> for doc in db.test.find({}):
... print(doc)
...
{'x': 1, '_id': ObjectId('54f62e60fba5226811f634f0')}
{'y': 1, '_id': ObjectId('54f62ee2fba5226811f634f1')}
{'z': 1, '_id': ObjectId('54f62ee28891e756a6e1abd5')}
:Parameters:
- `requests`: A list of write operations (see examples above).
- `ordered` (optional): If ``True`` (the default) requests will be
performed on the server serially, in the order provided. If an error
occurs all remaining operations are aborted. If ``False`` requests
will be performed on the server in arbitrary order, possibly in
parallel, and all operations will be attempted.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
An instance of :class:`~pymongo.results.BulkWriteResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_list("requests", requests)
blk = _Bulk(self, ordered, bypass_document_validation)
for request in requests:
try:
request._add_to_bulk(blk)
except AttributeError:
raise TypeError("%r is not a valid request" % (request,))
write_concern = self._write_concern_for(session)
bulk_api_result = blk.execute(write_concern, session)
if bulk_api_result is not None:
return BulkWriteResult(bulk_api_result, True)
return BulkWriteResult({}, False)
def _legacy_write(self, sock_info, name, cmd, op_id,
bypass_doc_val, func, *args):
"""Internal legacy unacknowledged write helper."""
# Cannot have both unacknowledged write and bypass document validation.
if bypass_doc_val and sock_info.max_wire_version >= 4:
raise OperationFailure("Cannot set bypass_document_validation with"
" unacknowledged write concern")
listeners = self.database.client._event_listeners
publish = listeners.enabled_for_commands
if publish:
start = datetime.datetime.now()
args = args + (sock_info.compression_context,)
rqst_id, msg, max_size = func(*args)
if publish:
duration = datetime.datetime.now() - start
listeners.publish_command_start(
cmd, self.__database.name, rqst_id, sock_info.address, op_id,
sock_info.service_id)
start = datetime.datetime.now()
try:
result = sock_info.legacy_write(rqst_id, msg, max_size, False)
except Exception as exc:
if publish:
dur = (datetime.datetime.now() - start) + duration
if isinstance(exc, OperationFailure):
details = exc.details
# Succeed if GLE was successful and this is a write error.
if details.get("ok") and "n" in details:
reply = message._convert_write_result(
name, cmd, details)
listeners.publish_command_success(
dur, reply, name, rqst_id, sock_info.address,
op_id, sock_info.service_id)
raise
else:
details = message._convert_exception(exc)
listeners.publish_command_failure(
dur, details, name, rqst_id, sock_info.address, op_id,
sock_info.service_id)
raise
if publish:
if result is not None:
reply = message._convert_write_result(name, cmd, result)
else:
# Comply with APM spec.
reply = {'ok': 1}
duration = (datetime.datetime.now() - start) + duration
listeners.publish_command_success(
duration, reply, name, rqst_id, sock_info.address, op_id,
sock_info.service_id)
return result
def _insert_one(
self, doc, ordered,
check_keys, write_concern, op_id, bypass_doc_val,
session):
"""Internal helper for inserting a single document."""
write_concern = write_concern or self.write_concern
acknowledged = write_concern.acknowledged
command = SON([('insert', self.name),
('ordered', ordered),
('documents', [doc])])
if not write_concern.is_server_default:
command['writeConcern'] = write_concern.document
def _insert_command(session, sock_info, retryable_write):
if not sock_info.op_msg_enabled and not acknowledged:
# Legacy OP_INSERT.
return self._legacy_write(
sock_info, 'insert', command, op_id,
bypass_doc_val, message.insert, self.__full_name,
[doc], check_keys, False, write_concern.document, False,
self.__write_response_codec_options)
if bypass_doc_val and sock_info.max_wire_version >= 4:
command['bypassDocumentValidation'] = True
result = sock_info.command(
self.__database.name,
command,
write_concern=write_concern,
codec_options=self.__write_response_codec_options,
check_keys=check_keys,
session=session,
client=self.__database.client,
retryable_write=retryable_write)
_check_write_command_response(result)
self.__database.client._retryable_write(
acknowledged, _insert_command, session)
if not isinstance(doc, RawBSONDocument):
return doc.get('_id')
def insert_one(self, document, bypass_document_validation=False,
session=None):
"""Insert a single document.
>>> db.test.count_documents({'x': 1})
0
>>> result = db.test.insert_one({'x': 1})
>>> result.inserted_id
ObjectId('54f112defba522406c9cc208')
>>> db.test.find_one({'x': 1})
{'x': 1, '_id': ObjectId('54f112defba522406c9cc208')}
:Parameters:
- `document`: The document to insert. Must be a mutable mapping
type. If the document does not have an _id field one will be
added automatically.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.InsertOneResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
common.validate_is_document_type("document", document)
if not (isinstance(document, RawBSONDocument) or "_id" in document):
document["_id"] = ObjectId()
write_concern = self._write_concern_for(session)
return InsertOneResult(
self._insert_one(
document, ordered=True, check_keys=False,
write_concern=write_concern, op_id=None,
bypass_doc_val=bypass_document_validation, session=session),
write_concern.acknowledged)
def insert_many(self, documents, ordered=True,
bypass_document_validation=False, session=None):
"""Insert an iterable of documents.
>>> db.test.count_documents({})
0
>>> result = db.test.insert_many([{'x': i} for i in range(2)])
>>> result.inserted_ids
[ObjectId('54f113fffba522406c9cc20e'), ObjectId('54f113fffba522406c9cc20f')]
>>> db.test.count_documents({})
2
:Parameters:
- `documents`: A iterable of documents to insert.
- `ordered` (optional): If ``True`` (the default) documents will be
inserted on the server serially, in the order provided. If an error
occurs all remaining inserts are aborted. If ``False``, documents
will be inserted on the server in arbitrary order, possibly in
parallel, and all document inserts will be attempted.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
An instance of :class:`~pymongo.results.InsertManyResult`.
.. seealso:: :ref:`writes-and-ids`
.. note:: `bypass_document_validation` requires server version
**>= 3.2**
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.2
Added bypass_document_validation support
.. versionadded:: 3.0
"""
if (not isinstance(documents, abc.Iterable)
or isinstance(documents, abc.Mapping)
or not documents):
raise TypeError("documents must be a non-empty list")
inserted_ids = []
def gen():
"""A generator that validates documents and handles _ids."""
for document in documents:
common.validate_is_document_type("document", document)
if not isinstance(document, RawBSONDocument):
if "_id" not in document:
document["_id"] = ObjectId()
inserted_ids.append(document["_id"])
yield (message._INSERT, document)
write_concern = self._write_concern_for(session)
blk = _Bulk(self, ordered, bypass_document_validation)
blk.ops = [doc for doc in gen()]
blk.execute(write_concern, session=session)
return InsertManyResult(inserted_ids, write_concern.acknowledged)
def _update(self, sock_info, criteria, document, upsert=False,
check_keys=False, multi=False,
write_concern=None, op_id=None, ordered=True,
bypass_doc_val=False, collation=None, array_filters=None,
hint=None, session=None, retryable_write=False):
"""Internal update / replace helper."""
common.validate_boolean("upsert", upsert)
collation = validate_collation_or_none(collation)
write_concern = write_concern or self.write_concern
acknowledged = write_concern.acknowledged
update_doc = SON([('q', criteria),
('u', document),
('multi', multi),
('upsert', upsert)])
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
elif not acknowledged:
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
else:
update_doc['collation'] = collation
if array_filters is not None:
if sock_info.max_wire_version < 6:
raise ConfigurationError(
'Must be connected to MongoDB 3.6+ to use array_filters.')
elif not acknowledged:
raise ConfigurationError(
'arrayFilters is unsupported for unacknowledged writes.')
else:
update_doc['arrayFilters'] = array_filters
if hint is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use hint.')
elif not acknowledged:
raise ConfigurationError(
'hint is unsupported for unacknowledged writes.')
if not isinstance(hint, str):
hint = helpers._index_document(hint)
update_doc['hint'] = hint
command = SON([('update', self.name),
('ordered', ordered),
('updates', [update_doc])])
if not write_concern.is_server_default:
command['writeConcern'] = write_concern.document
if not sock_info.op_msg_enabled and not acknowledged:
# Legacy OP_UPDATE.
return self._legacy_write(
sock_info, 'update', command, op_id,
bypass_doc_val, message.update, self.__full_name, upsert,
multi, criteria, document, False, write_concern.document,
check_keys, self.__write_response_codec_options)
# Update command.
if bypass_doc_val and sock_info.max_wire_version >= 4:
command['bypassDocumentValidation'] = True
# The command result has to be published for APM unmodified
# so we make a shallow copy here before adding updatedExisting.
result = sock_info.command(
self.__database.name,
command,
write_concern=write_concern,
codec_options=self.__write_response_codec_options,
session=session,
client=self.__database.client,
retryable_write=retryable_write).copy()
_check_write_command_response(result)
# Add the updatedExisting field for compatibility.
if result.get('n') and 'upserted' not in result:
result['updatedExisting'] = True
else:
result['updatedExisting'] = False
# MongoDB >= 2.6.0 returns the upsert _id in an array
# element. Break it out for backward compatibility.
if 'upserted' in result:
result['upserted'] = result['upserted'][0]['_id']
if not acknowledged:
return None
return result
def _update_retryable(
self, criteria, document, upsert=False,
check_keys=False, multi=False,
write_concern=None, op_id=None, ordered=True,
bypass_doc_val=False, collation=None, array_filters=None,
hint=None, session=None):
"""Internal update / replace helper."""
def _update(session, sock_info, retryable_write):
return self._update(
sock_info, criteria, document, upsert=upsert,
check_keys=check_keys, multi=multi,
write_concern=write_concern, op_id=op_id, ordered=ordered,
bypass_doc_val=bypass_doc_val, collation=collation,
array_filters=array_filters, hint=hint, session=session,
retryable_write=retryable_write)
return self.__database.client._retryable_write(
(write_concern or self.write_concern).acknowledged and not multi,
_update, session)
def replace_one(self, filter, replacement, upsert=False,
bypass_document_validation=False, collation=None,
hint=None, session=None):
"""Replace a single document matching the filter.
>>> for doc in db.test.find({}):
... print(doc)
...
{'x': 1, '_id': ObjectId('54f4c5befba5220aa4d6dee7')}
>>> result = db.test.replace_one({'x': 1}, {'y': 1})
>>> result.matched_count
1
>>> result.modified_count
1
>>> for doc in db.test.find({}):
... print(doc)
...
{'y': 1, '_id': ObjectId('54f4c5befba5220aa4d6dee7')}
The *upsert* option can be used to insert a new document if a matching
document does not exist.
>>> result = db.test.replace_one({'x': 1}, {'x': 1}, True)
>>> result.matched_count
0
>>> result.modified_count
0
>>> result.upserted_id
ObjectId('54f11e5c8891e756a6e1abd4')
>>> db.test.find_one({'x': 1})
{'x': 1, '_id': ObjectId('54f11e5c8891e756a6e1abd4')}
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The new document.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``. This option is only supported on MongoDB 3.2 and above.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `hint` (optional): An index to use to support the query
predicate specified either by its string name, or in the same
format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). This option is only supported on
MongoDB 4.2 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. versionchanged:: 3.11
Added ``hint`` parameter.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support.
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_replace(replacement)
write_concern = self._write_concern_for(session)
return UpdateResult(
self._update_retryable(
filter, replacement, upsert,
write_concern=write_concern,
bypass_doc_val=bypass_document_validation,
collation=collation, hint=hint, session=session),
write_concern.acknowledged)
def update_one(self, filter, update, upsert=False,
bypass_document_validation=False,
collation=None, array_filters=None, hint=None,
session=None):
"""Update a single document matching the filter.
>>> for doc in db.test.find():
... print(doc)
...
{'x': 1, '_id': 0}
{'x': 1, '_id': 1}
{'x': 1, '_id': 2}
>>> result = db.test.update_one({'x': 1}, {'$inc': {'x': 3}})
>>> result.matched_count
1
>>> result.modified_count
1
>>> for doc in db.test.find():
... print(doc)
...
{'x': 4, '_id': 0}
{'x': 1, '_id': 1}
{'x': 1, '_id': 2}
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation`: (optional) If ``True``, allows the
write to opt-out of document level validation. Default is
``False``. This option is only supported on MongoDB 3.2 and above.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `array_filters` (optional): A list of filters specifying which
array elements an update should apply. This option is only
supported on MongoDB 3.6 and above.
- `hint` (optional): An index to use to support the query
predicate specified either by its string name, or in the same
format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). This option is only supported on
MongoDB 4.2 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. versionchanged:: 3.11
Added ``hint`` parameter.
.. versionchanged:: 3.9
Added the ability to accept a pipeline as the ``update``.
.. versionchanged:: 3.6
Added the ``array_filters`` and ``session`` parameters.
.. versionchanged:: 3.4
Added the ``collation`` option.
.. versionchanged:: 3.2
Added ``bypass_document_validation`` support.
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_update(update)
common.validate_list_or_none('array_filters', array_filters)
write_concern = self._write_concern_for(session)
return UpdateResult(
self._update_retryable(
filter, update, upsert, check_keys=False,
write_concern=write_concern,
bypass_doc_val=bypass_document_validation,
collation=collation, array_filters=array_filters,
hint=hint, session=session),
write_concern.acknowledged)
def update_many(self, filter, update, upsert=False, array_filters=None,
bypass_document_validation=False, collation=None,
hint=None, session=None):
"""Update one or more documents that match the filter.
>>> for doc in db.test.find():
... print(doc)
...
{'x': 1, '_id': 0}
{'x': 1, '_id': 1}
{'x': 1, '_id': 2}
>>> result = db.test.update_many({'x': 1}, {'$inc': {'x': 3}})
>>> result.matched_count
3
>>> result.modified_count
3
>>> for doc in db.test.find():
... print(doc)
...
{'x': 4, '_id': 0}
{'x': 4, '_id': 1}
{'x': 4, '_id': 2}
:Parameters:
- `filter`: A query that matches the documents to update.
- `update`: The modifications to apply.
- `upsert` (optional): If ``True``, perform an insert if no documents
match the filter.
- `bypass_document_validation` (optional): If ``True``, allows the
write to opt-out of document level validation. Default is
``False``. This option is only supported on MongoDB 3.2 and above.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `array_filters` (optional): A list of filters specifying which
array elements an update should apply. This option is only
supported on MongoDB 3.6 and above.
- `hint` (optional): An index to use to support the query
predicate specified either by its string name, or in the same
format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). This option is only supported on
MongoDB 4.2 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.UpdateResult`.
.. versionchanged:: 3.11
Added ``hint`` parameter.
.. versionchanged:: 3.9
Added the ability to accept a pipeline as the `update`.
.. versionchanged:: 3.6
Added ``array_filters`` and ``session`` parameters.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 3.2
Added bypass_document_validation support.
.. versionadded:: 3.0
"""
common.validate_is_mapping("filter", filter)
common.validate_ok_for_update(update)
common.validate_list_or_none('array_filters', array_filters)
write_concern = self._write_concern_for(session)
return UpdateResult(
self._update_retryable(
filter, update, upsert, check_keys=False, multi=True,
write_concern=write_concern,
bypass_doc_val=bypass_document_validation,
collation=collation, array_filters=array_filters,
hint=hint, session=session),
write_concern.acknowledged)
def drop(self, session=None):
"""Alias for :meth:`~pymongo.database.Database.drop_collection`.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
The following two calls are equivalent:
>>> db.foo.drop()
>>> db.drop_collection("foo")
.. versionchanged:: 3.7
:meth:`drop` now respects this :class:`Collection`'s :attr:`write_concern`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
dbo = self.__database.client.get_database(
self.__database.name,
self.codec_options,
self.read_preference,
self.write_concern,
self.read_concern)
dbo.drop_collection(self.__name, session=session)
def _delete(
self, sock_info, criteria, multi,
write_concern=None, op_id=None, ordered=True,
collation=None, hint=None, session=None, retryable_write=False):
"""Internal delete helper."""
common.validate_is_mapping("filter", criteria)
write_concern = write_concern or self.write_concern
acknowledged = write_concern.acknowledged
delete_doc = SON([('q', criteria),
('limit', int(not multi))])
collation = validate_collation_or_none(collation)
if collation is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use collations.')
elif not acknowledged:
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
else:
delete_doc['collation'] = collation
if hint is not None:
if sock_info.max_wire_version < 5:
raise ConfigurationError(
'Must be connected to MongoDB 3.4+ to use hint.')
elif not acknowledged:
raise ConfigurationError(
'hint is unsupported for unacknowledged writes.')
if not isinstance(hint, str):
hint = helpers._index_document(hint)
delete_doc['hint'] = hint
command = SON([('delete', self.name),
('ordered', ordered),
('deletes', [delete_doc])])
if not write_concern.is_server_default:
command['writeConcern'] = write_concern.document
if not sock_info.op_msg_enabled and not acknowledged:
# Legacy OP_DELETE.
return self._legacy_write(
sock_info, 'delete', command, op_id,
False, message.delete, self.__full_name, criteria,
False, write_concern.document,
self.__write_response_codec_options,
int(not multi))
# Delete command.
result = sock_info.command(
self.__database.name,
command,
write_concern=write_concern,
codec_options=self.__write_response_codec_options,
session=session,
client=self.__database.client,
retryable_write=retryable_write)
_check_write_command_response(result)
return result
def _delete_retryable(
self, criteria, multi,
write_concern=None, op_id=None, ordered=True,
collation=None, hint=None, session=None):
"""Internal delete helper."""
def _delete(session, sock_info, retryable_write):
return self._delete(
sock_info, criteria, multi,
write_concern=write_concern, op_id=op_id, ordered=ordered,
collation=collation, hint=hint, session=session,
retryable_write=retryable_write)
return self.__database.client._retryable_write(
(write_concern or self.write_concern).acknowledged and not multi,
_delete, session)
def delete_one(self, filter, collation=None, hint=None, session=None):
"""Delete a single document matching the filter.
>>> db.test.count_documents({'x': 1})
3
>>> result = db.test.delete_one({'x': 1})
>>> result.deleted_count
1
>>> db.test.count_documents({'x': 1})
2
:Parameters:
- `filter`: A query that matches the document to delete.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `hint` (optional): An index to use to support the query
predicate specified either by its string name, or in the same
format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). This option is only supported on
MongoDB 4.4 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.DeleteResult`.
.. versionchanged:: 3.11
Added ``hint`` parameter.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionadded:: 3.0
"""
write_concern = self._write_concern_for(session)
return DeleteResult(
self._delete_retryable(
filter, False,
write_concern=write_concern,
collation=collation, hint=hint, session=session),
write_concern.acknowledged)
def delete_many(self, filter, collation=None, hint=None, session=None):
"""Delete one or more documents matching the filter.
>>> db.test.count_documents({'x': 1})
3
>>> result = db.test.delete_many({'x': 1})
>>> result.deleted_count
3
>>> db.test.count_documents({'x': 1})
0
:Parameters:
- `filter`: A query that matches the documents to delete.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `hint` (optional): An index to use to support the query
predicate specified either by its string name, or in the same
format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). This option is only supported on
MongoDB 4.4 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
- An instance of :class:`~pymongo.results.DeleteResult`.
.. versionchanged:: 3.11
Added ``hint`` parameter.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionadded:: 3.0
"""
write_concern = self._write_concern_for(session)
return DeleteResult(
self._delete_retryable(
filter, True,
write_concern=write_concern,
collation=collation, hint=hint, session=session),
write_concern.acknowledged)
def find_one(self, filter=None, *args, **kwargs):
"""Get a single document from the database.
All arguments to :meth:`find` are also valid arguments for
:meth:`find_one`, although any `limit` argument will be
ignored. Returns a single document, or ``None`` if no matching
document is found.
The :meth:`find_one` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `filter` (optional): a dictionary specifying
the query to be performed OR any other type to be used as
the value for a query for ``"_id"``.
- `*args` (optional): any additional positional arguments
are the same as the arguments to :meth:`find`.
- `**kwargs` (optional): any additional keyword arguments
are the same as the arguments to :meth:`find`.
>>> collection.find_one(max_time_ms=100)
"""
if (filter is not None and not
isinstance(filter, abc.Mapping)):
filter = {"_id": filter}
cursor = self.find(filter, *args, **kwargs)
for result in cursor.limit(-1):
return result
return None
def find(self, *args, **kwargs):
"""Query the database.
The `filter` argument is a prototype document that all results
must match. For example:
>>> db.test.find({"hello": "world"})
only matches documents that have a key "hello" with value
"world". Matches can have other keys *in addition* to
"hello". The `projection` argument is used to specify a subset
of fields that should be included in the result documents. By
limiting results to a certain subset of fields you can cut
down on network traffic and decoding time.
Raises :class:`TypeError` if any of the arguments are of
improper type. Returns an instance of
:class:`~pymongo.cursor.Cursor` corresponding to this query.
The :meth:`find` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `filter` (optional): a SON object specifying elements which
must be present for a document to be included in the
result set
- `projection` (optional): a list of field names that should be
returned in the result set or a dict specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a dict to exclude fields from
the result (e.g. projection={'_id': False}).
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `skip` (optional): the number of documents to omit (from
the start of the result set) when returning the results
- `limit` (optional): the maximum number of results to
return. A limit of 0 (the default) is equivalent to setting no
limit.
- `no_cursor_timeout` (optional): if False (the default), any
returned cursor is closed by the server after 10 minutes of
inactivity. If set to True, the returned cursor will never
time out on the server. Care should be taken to ensure that
cursors with no_cursor_timeout turned on are properly closed.
- `cursor_type` (optional): the type of cursor to return. The valid
options are defined by :class:`~pymongo.cursor.CursorType`:
- :attr:`~pymongo.cursor.CursorType.NON_TAILABLE` - the result of
this find call will return a standard cursor over the result set.
- :attr:`~pymongo.cursor.CursorType.TAILABLE` - the result of this
find call will be a tailable cursor - tailable cursors are only
for use with capped collections. They are not closed when the
last data is retrieved but are kept open and the cursor location
marks the final document position. If more data is received
iteration of the cursor will continue from the last document
received. For details, see the `tailable cursor documentation
<http://www.mongodb.org/display/DOCS/Tailable+Cursors>`_.
- :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` - the result
of this find call will be a tailable cursor with the await flag
set. The server will wait for a few seconds after returning the
full result set so that it can capture and return additional data
added during the query.
- :attr:`~pymongo.cursor.CursorType.EXHAUST` - the result of this
find call will be an exhaust cursor. MongoDB will stream batched
results to the client without waiting for the client to request
each batch, reducing latency. See notes on compatibility below.
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for this query. See
:meth:`~pymongo.cursor.Cursor.sort` for details.
- `allow_partial_results` (optional): if True, mongos will return
partial results if some shards are down instead of returning an
error.
- `oplog_replay` (optional): **DEPRECATED** - if True, set the
oplogReplay query flag. Default: False.
- `batch_size` (optional): Limits the number of documents returned in
a single batch.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `return_key` (optional): If True, return only the index keys in
each document.
- `show_record_id` (optional): If True, adds a field ``$recordId`` in
each document with the storage engine's internal record identifier.
- `snapshot` (optional): **DEPRECATED** - If True, prevents the
cursor from returning a document more than once because of an
intervening write operation.
- `hint` (optional): An index, in the same format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). Pass this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.hint` on the cursor to tell Mongo the
proper index to use for the query.
- `max_time_ms` (optional): Specifies a time limit for a query
operation. If the specified time is exceeded, the operation will be
aborted and :exc:`~pymongo.errors.ExecutionTimeout` is raised. Pass
this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.max_time_ms` on the cursor.
- `max_scan` (optional): **DEPRECATED** - The maximum number of
documents to scan. Pass this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.max_scan` on the cursor.
- `min` (optional): A list of field, limit pairs specifying the
inclusive lower bound for all keys of a specific index in order.
Pass this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.min` on the cursor. ``hint`` must
also be passed to ensure the query utilizes the correct index.
- `max` (optional): A list of field, limit pairs specifying the
exclusive upper bound for all keys of a specific index in order.
Pass this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.max` on the cursor. ``hint`` must
also be passed to ensure the query utilizes the correct index.
- `comment` (optional): A string to attach to the query to help
interpret and trace the operation in the server logs and in profile
data. Pass this as an alternative to calling
:meth:`~pymongo.cursor.Cursor.comment` on the cursor.
- `modifiers` (optional): **DEPRECATED** - A dict specifying
additional MongoDB query modifiers. Use the keyword arguments listed
above instead.
- `allow_disk_use` (optional): if True, MongoDB may use temporary
disk files to store data exceeding the system memory limit while
processing a blocking sort operation. The option has no effect if
MongoDB can satisfy the specified sort using an index, or if the
blocking sort requires less memory than the 100 MiB limit. This
option is only supported on MongoDB 4.4 and above.
.. note:: There are a number of caveats to using
:attr:`~pymongo.cursor.CursorType.EXHAUST` as cursor_type:
- The `limit` option can not be used with an exhaust cursor.
- Exhaust cursors are not supported by mongos and can not be
used with a sharded cluster.
- A :class:`~pymongo.cursor.Cursor` instance created with the
:attr:`~pymongo.cursor.CursorType.EXHAUST` cursor_type requires an
exclusive :class:`~socket.socket` connection to MongoDB. If the
:class:`~pymongo.cursor.Cursor` is discarded without being
completely iterated the underlying :class:`~socket.socket`
connection will be closed and discarded without being returned to
the connection pool.
.. versionchanged:: 3.11
Added the ``allow_disk_use`` option.
Deprecated the ``oplog_replay`` option. Support for this option is
deprecated in MongoDB 4.4. The query engine now automatically
optimizes queries against the oplog without requiring this
option to be set.
.. versionchanged:: 3.7
Deprecated the ``snapshot`` option, which is deprecated in MongoDB
3.6 and removed in MongoDB 4.0.
Deprecated the ``max_scan`` option. Support for this option is
deprecated in MongoDB 4.0. Use ``max_time_ms`` instead to limit
server-side execution time.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.5
Added the options ``return_key``, ``show_record_id``, ``snapshot``,
``hint``, ``max_time_ms``, ``max_scan``, ``min``, ``max``, and
``comment``.
Deprecated the ``modifiers`` option.
.. versionchanged:: 3.4
Added support for the ``collation`` option.
.. versionchanged:: 3.0
Changed the parameter names ``spec``, ``fields``, ``timeout``, and
``partial`` to ``filter``, ``projection``, ``no_cursor_timeout``,
and ``allow_partial_results`` respectively.
Added the ``cursor_type``, ``oplog_replay``, and ``modifiers``
options.
Removed the ``network_timeout``, ``read_preference``, ``tag_sets``,
``secondary_acceptable_latency_ms``, ``max_scan``, ``snapshot``,
``tailable``, ``await_data``, ``exhaust``, ``as_class``, and
slave_okay parameters.
Removed ``compile_re`` option: PyMongo now always
represents BSON regular expressions as :class:`~bson.regex.Regex`
objects. Use :meth:`~bson.regex.Regex.try_compile` to attempt to
convert from a BSON regular expression to a Python regular
expression object.
Soft deprecated the ``manipulate`` option.
.. versionchanged:: 2.7
Added ``compile_re`` option. If set to False, PyMongo represented
BSON regular expressions as :class:`~bson.regex.Regex` objects
instead of attempting to compile BSON regular expressions as Python
native regular expressions, thus preventing errors for some
incompatible patterns, see `PYTHON-500`_.
.. versionchanged:: 2.3
Added the ``tag_sets`` and ``secondary_acceptable_latency_ms``
parameters.
.. _PYTHON-500: https://jira.mongodb.org/browse/PYTHON-500
.. mongodoc:: find
"""
return Cursor(self, *args, **kwargs)
def find_raw_batches(self, *args, **kwargs):
"""Query the database and retrieve batches of raw BSON.
Similar to the :meth:`find` method but returns a
:class:`~pymongo.cursor.RawBatchCursor`.
This example demonstrates how to work with raw batches, but in practice
raw batches should be passed to an external library that can decode
BSON into another data type, rather than used with PyMongo's
:mod:`bson` module.
>>> import bson
>>> cursor = db.test.find_raw_batches()
>>> for batch in cursor:
... print(bson.decode_all(batch))
.. note:: find_raw_batches does not support auto encryption.
.. versionchanged:: 3.12
Instead of ignoring the user-specified read concern, this method
now sends it to the server when connected to MongoDB 3.6+.
Added session support.
.. versionadded:: 3.6
"""
# OP_MSG is required to support encryption.
if self.__database.client._encrypter:
raise InvalidOperation(
"find_raw_batches does not support auto encryption")
return RawBatchCursor(self, *args, **kwargs)
def _count_cmd(self, session, sock_info, slave_ok, cmd, collation):
"""Internal count command helper."""
# XXX: "ns missing" checks can be removed when we drop support for
# MongoDB 3.0, see SERVER-17051.
res = self._command(
sock_info,
cmd,
slave_ok,
allowable_errors=["ns missing"],
codec_options=self.__write_response_codec_options,
read_concern=self.read_concern,
collation=collation,
session=session)
if res.get("errmsg", "") == "ns missing":
return 0
return int(res["n"])
def _count(self, cmd, collation=None, session=None):
"""Internal count helper."""
# XXX: "ns missing" checks can be removed when we drop support for
# MongoDB 3.0, see SERVER-17051.
def _cmd(session, server, sock_info, slave_ok):
return self._count_cmd(
session, sock_info, slave_ok, cmd, collation)
return self.__database.client._retryable_read(
_cmd, self._read_preference_for(session), session)
def _aggregate_one_result(
self, sock_info, slave_ok, cmd, collation, session):
"""Internal helper to run an aggregate that returns a single result."""
result = self._command(
sock_info,
cmd,
slave_ok,
allowable_errors=[26], # Ignore NamespaceNotFound.
codec_options=self.__write_response_codec_options,
read_concern=self.read_concern,
collation=collation,
session=session)
# cursor will not be present for NamespaceNotFound errors.
if 'cursor' not in result:
return None
batch = result['cursor']['firstBatch']
return batch[0] if batch else None
def estimated_document_count(self, **kwargs):
"""Get an estimate of the number of documents in this collection using
collection metadata.
The :meth:`estimated_document_count` method is **not** supported in a
transaction.
All optional parameters should be passed as keyword arguments
to this method. Valid options include:
- `maxTimeMS` (int): The maximum amount of time to allow this
operation to run, in milliseconds.
:Parameters:
- `**kwargs` (optional): See list of options above.
.. versionadded:: 3.7
"""
if 'session' in kwargs:
raise ConfigurationError(
'estimated_document_count does not support sessions')
def _cmd(session, server, sock_info, slave_ok):
if sock_info.max_wire_version >= 12:
# MongoDB 4.9+
pipeline = [
{'$collStats': {'count': {}}},
{'$group': {'_id': 1, 'n': {'$sum': '$count'}}},
]
cmd = SON([('aggregate', self.__name),
('pipeline', pipeline),
('cursor', {})])
cmd.update(kwargs)
result = self._aggregate_one_result(
sock_info, slave_ok, cmd, collation=None, session=session)
if not result:
return 0
return int(result['n'])
else:
# MongoDB < 4.9
cmd = SON([('count', self.__name)])
cmd.update(kwargs)
return self._count_cmd(None, sock_info, slave_ok, cmd, None)
return self.__database.client._retryable_read(
_cmd, self.read_preference, None)
def count_documents(self, filter, session=None, **kwargs):
"""Count the number of documents in this collection.
.. note:: For a fast count of the total documents in a collection see
:meth:`estimated_document_count`.
The :meth:`count_documents` method is supported in a transaction.
All optional parameters should be passed as keyword arguments
to this method. Valid options include:
- `skip` (int): The number of matching documents to skip before
returning results.
- `limit` (int): The maximum number of documents to count. Must be
a positive integer. If not provided, no limit is imposed.
- `maxTimeMS` (int): The maximum amount of time to allow this
operation to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `hint` (string or list of tuples): The index to use. Specify either
the index name as a string or the index specification as a list of
tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]).
This option is only supported on MongoDB 3.6 and above.
The :meth:`count_documents` method obeys the :attr:`read_preference` of
this :class:`Collection`.
.. note:: When migrating from :meth:`count` to :meth:`count_documents`
the following query operators must be replaced:
+-------------+-------------------------------------+
| Operator | Replacement |
+=============+=====================================+
| $where | `$expr`_ |
+-------------+-------------------------------------+
| $near | `$geoWithin`_ with `$center`_ |
+-------------+-------------------------------------+
| $nearSphere | `$geoWithin`_ with `$centerSphere`_ |
+-------------+-------------------------------------+
$expr requires MongoDB 3.6+
:Parameters:
- `filter` (required): A query document that selects which documents
to count in the collection. Can be an empty document to count all
documents.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
.. versionadded:: 3.7
.. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/
.. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/
.. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center
.. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere
"""
pipeline = [{'$match': filter}]
if 'skip' in kwargs:
pipeline.append({'$skip': kwargs.pop('skip')})
if 'limit' in kwargs:
pipeline.append({'$limit': kwargs.pop('limit')})
pipeline.append({'$group': {'_id': 1, 'n': {'$sum': 1}}})
cmd = SON([('aggregate', self.__name),
('pipeline', pipeline),
('cursor', {})])
if "hint" in kwargs and not isinstance(kwargs["hint"], str):
kwargs["hint"] = helpers._index_document(kwargs["hint"])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
def _cmd(session, server, sock_info, slave_ok):
result = self._aggregate_one_result(
sock_info, slave_ok, cmd, collation, session)
if not result:
return 0
return result['n']
return self.__database.client._retryable_read(
_cmd, self._read_preference_for(session), session)
def count(self, filter=None, session=None, **kwargs):
"""**DEPRECATED** - Get the number of documents in this collection.
The :meth:`count` method is deprecated and **not** supported in a
transaction. Please use :meth:`count_documents` or
:meth:`estimated_document_count` instead.
All optional count parameters should be passed as keyword arguments
to this method. Valid options include:
- `skip` (int): The number of matching documents to skip before
returning results.
- `limit` (int): The maximum number of documents to count. A limit
of 0 (the default) is equivalent to setting no limit.
- `maxTimeMS` (int): The maximum amount of time to allow the count
command to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
- `hint` (string or list of tuples): The index to use. Specify either
the index name as a string or the index specification as a list of
tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]).
The :meth:`count` method obeys the :attr:`read_preference` of
this :class:`Collection`.
.. note:: When migrating from :meth:`count` to :meth:`count_documents`
the following query operators must be replaced:
+-------------+-------------------------------------+
| Operator | Replacement |
+=============+=====================================+
| $where | `$expr`_ |
+-------------+-------------------------------------+
| $near | `$geoWithin`_ with `$center`_ |
+-------------+-------------------------------------+
| $nearSphere | `$geoWithin`_ with `$centerSphere`_ |
+-------------+-------------------------------------+
$expr requires MongoDB 3.6+
:Parameters:
- `filter` (optional): A query document that selects which documents
to count in the collection.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
.. versionchanged:: 3.7
Deprecated.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Support the `collation` option.
.. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/
.. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/
.. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center
.. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere
"""
warnings.warn("count is deprecated. Use estimated_document_count or "
"count_documents instead. Please note that $where must "
"be replaced by $expr, $near must be replaced by "
"$geoWithin with $center, and $nearSphere must be "
"replaced by $geoWithin with $centerSphere",
DeprecationWarning, stacklevel=2)
cmd = SON([("count", self.__name)])
if filter is not None:
if "query" in kwargs:
raise ConfigurationError("can't pass both filter and query")
kwargs["query"] = filter
if "hint" in kwargs and not isinstance(kwargs["hint"], str):
kwargs["hint"] = helpers._index_document(kwargs["hint"])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
return self._count(cmd, collation, session)
def create_indexes(self, indexes, session=None, **kwargs):
"""Create one or more indexes on this collection.
>>> from pymongo import IndexModel, ASCENDING, DESCENDING
>>> index1 = IndexModel([("hello", DESCENDING),
... ("world", ASCENDING)], name="hello_world")
>>> index2 = IndexModel([("goodbye", DESCENDING)])
>>> db.test.create_indexes([index1, index2])
["hello_world", "goodbye_-1"]
:Parameters:
- `indexes`: A list of :class:`~pymongo.operations.IndexModel`
instances.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the createIndexes
command (like maxTimeMS) can be passed as keyword arguments.
.. note:: `create_indexes` uses the `createIndexes`_ command
introduced in MongoDB **2.6** and cannot be used with earlier
versions.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for arbitrary keyword
arguments.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
.. versionadded:: 3.0
.. _createIndexes: https://docs.mongodb.com/manual/reference/command/createIndexes/
"""
common.validate_list('indexes', indexes)
return self.__create_indexes(indexes, session, **kwargs)
def __create_indexes(self, indexes, session, **kwargs):
"""Internal createIndexes helper.
:Parameters:
- `indexes`: A list of :class:`~pymongo.operations.IndexModel`
instances.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the createIndexes
command (like maxTimeMS) can be passed as keyword arguments.
"""
names = []
with self._socket_for_writes(session) as sock_info:
supports_collations = sock_info.max_wire_version >= 5
supports_quorum = sock_info.max_wire_version >= 9
def gen_indexes():
for index in indexes:
if not isinstance(index, IndexModel):
raise TypeError(
"%r is not an instance of "
"pymongo.operations.IndexModel" % (index,))
document = index.document
if "collation" in document and not supports_collations:
raise ConfigurationError(
"Must be connected to MongoDB "
"3.4+ to use collations.")
if 'bucketSize' in document:
# The bucketSize option is required by geoHaystack.
warnings.warn(
_HAYSTACK_MSG, DeprecationWarning, stacklevel=4)
names.append(document["name"])
yield document
cmd = SON([('createIndexes', self.name),
('indexes', list(gen_indexes()))])
cmd.update(kwargs)
if 'commitQuorum' in kwargs and not supports_quorum:
raise ConfigurationError(
"Must be connected to MongoDB 4.4+ to use the "
"commitQuorum option for createIndexes")
self._command(
sock_info, cmd, read_preference=ReadPreference.PRIMARY,
codec_options=_UNICODE_REPLACE_CODEC_OPTIONS,
write_concern=self._write_concern_for(session),
session=session)
return names
def create_index(self, keys, session=None, **kwargs):
"""Creates an index on this collection.
Takes either a single key or a list of (key, direction) pairs.
The key(s) must be an instance of :class:`basestring`
(:class:`str` in python 3), and the direction(s) must be one of
(:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`,
:data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`,
:data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`,
:data:`~pymongo.TEXT`).
To create a single key ascending index on the key ``'mike'`` we just
use a string argument::
>>> my_collection.create_index("mike")
For a compound index on ``'mike'`` descending and ``'eliot'``
ascending we need to use a list of tuples::
>>> my_collection.create_index([("mike", pymongo.DESCENDING),
... ("eliot", pymongo.ASCENDING)])
All optional index creation parameters should be passed as
keyword arguments to this method. For example::
>>> my_collection.create_index([("mike", pymongo.DESCENDING)],
... background=True)
Valid options include, but are not limited to:
- `name`: custom name to use for this index - if none is
given, a name will be generated.
- `unique`: if ``True``, creates a uniqueness constraint on the
index.
- `background`: if ``True``, this index should be created in the
background.
- `sparse`: if ``True``, omit from the index any documents that lack
the indexed field.
- `bucketSize`: for use with geoHaystack indexes.
Number of documents to group together within a certain proximity
to a given longitude and latitude.
- `min`: minimum value for keys in a :data:`~pymongo.GEO2D`
index.
- `max`: maximum value for keys in a :data:`~pymongo.GEO2D`
index.
- `expireAfterSeconds`: <int> Used to create an expiring (TTL)
collection. MongoDB will automatically delete documents from
this collection after <int> seconds. The indexed field must
be a UTC datetime or the data will not expire.
- `partialFilterExpression`: A document that specifies a filter for
a partial index. Requires MongoDB >=3.2.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. Requires MongoDB >= 3.4.
- `wildcardProjection`: Allows users to include or exclude specific
field paths from a `wildcard index`_ using the {"$**" : 1} key
pattern. Requires MongoDB >= 4.2.
- `hidden`: if ``True``, this index will be hidden from the query
planner and will not be evaluated as part of query plan
selection. Requires MongoDB >= 4.4.
See the MongoDB documentation for a full list of supported options by
server version.
.. warning:: `dropDups` is not supported by MongoDB 3.0 or newer. The
option is silently ignored by the server and unique index builds
using the option will fail if a duplicate value is detected.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
:Parameters:
- `keys`: a single key or a list of (key, direction)
pairs specifying the index to create
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): any additional index creation
options (see the above list) should be passed as keyword
arguments
.. versionchanged:: 3.11
Added the ``hidden`` option.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for passing maxTimeMS
in kwargs.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4. Support the `collation` option.
.. versionchanged:: 3.2
Added partialFilterExpression to support partial indexes.
.. versionchanged:: 3.0
Renamed `key_or_list` to `keys`. Removed the `cache_for` option.
:meth:`create_index` no longer caches index names. Removed support
for the drop_dups and bucket_size aliases.
.. mongodoc:: indexes
.. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/#wildcard-index-core
"""
cmd_options = {}
if "maxTimeMS" in kwargs:
cmd_options["maxTimeMS"] = kwargs.pop("maxTimeMS")
index = IndexModel(keys, **kwargs)
return self.__create_indexes([index], session, **cmd_options)[0]
def drop_indexes(self, session=None, **kwargs):
"""Drops all indexes on this collection.
Can be used on non-existant collections or collections with no indexes.
Raises OperationFailure on an error.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the createIndexes
command (like maxTimeMS) can be passed as keyword arguments.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for arbitrary keyword
arguments.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
self.drop_index("*", session=session, **kwargs)
def drop_index(self, index_or_name, session=None, **kwargs):
"""Drops the specified index on this collection.
Can be used on non-existant collections or collections with no
indexes. Raises OperationFailure on an error (e.g. trying to
drop an index that does not exist). `index_or_name`
can be either an index name (as returned by `create_index`),
or an index specifier (as passed to `create_index`). An index
specifier should be a list of (key, direction) pairs. Raises
TypeError if index is not an instance of (str, unicode, list).
.. warning::
if a custom name was used on index creation (by
passing the `name` parameter to :meth:`create_index`) the index
**must** be dropped by name.
:Parameters:
- `index_or_name`: index (or name of index) to drop
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): optional arguments to the createIndexes
command (like maxTimeMS) can be passed as keyword arguments.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter. Added support for arbitrary keyword
arguments.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
name = index_or_name
if isinstance(index_or_name, list):
name = helpers._gen_index_name(index_or_name)
if not isinstance(name, str):
raise TypeError("index_or_name must be an instance of str or list")
cmd = SON([("dropIndexes", self.__name), ("index", name)])
cmd.update(kwargs)
with self._socket_for_writes(session) as sock_info:
self._command(sock_info,
cmd,
read_preference=ReadPreference.PRIMARY,
allowable_errors=["ns not found", 26],
write_concern=self._write_concern_for(session),
session=session)
def list_indexes(self, session=None):
"""Get a cursor over the index documents for this collection.
>>> for index in db.test.list_indexes():
... print(index)
...
SON([('v', 2), ('key', SON([('_id', 1)])), ('name', '_id_')])
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
:Returns:
An instance of :class:`~pymongo.command_cursor.CommandCursor`.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionadded:: 3.0
"""
codec_options = CodecOptions(SON)
coll = self.with_options(codec_options=codec_options,
read_preference=ReadPreference.PRIMARY)
read_pref = ((session and session._txn_read_preference())
or ReadPreference.PRIMARY)
def _cmd(session, server, sock_info, slave_ok):
cmd = SON([("listIndexes", self.__name), ("cursor", {})])
if sock_info.max_wire_version > 2:
with self.__database.client._tmp_session(session, False) as s:
try:
cursor = self._command(sock_info, cmd, slave_ok,
read_pref,
codec_options,
session=s)["cursor"]
except OperationFailure as exc:
# Ignore NamespaceNotFound errors to match the behavior
# of reading from *.system.indexes.
if exc.code != 26:
raise
cursor = {'id': 0, 'firstBatch': []}
cmd_cursor = CommandCursor(
coll, cursor, sock_info.address, session=s,
explicit_session=session is not None)
else:
res = message._first_batch(
sock_info, self.__database.name, "system.indexes",
{"ns": self.__full_name}, 0, slave_ok, codec_options,
read_pref, cmd,
self.database.client._event_listeners)
cursor = res["cursor"]
# Note that a collection can only have 64 indexes, so there
# will never be a getMore call.
cmd_cursor = CommandCursor(coll, cursor, sock_info.address)
cmd_cursor._maybe_pin_connection(sock_info)
return cmd_cursor
return self.__database.client._retryable_read(
_cmd, read_pref, session)
def index_information(self, session=None):
"""Get information on this collection's indexes.
Returns a dictionary where the keys are index names (as
returned by create_index()) and the values are dictionaries
containing information about each index. The dictionary is
guaranteed to contain at least a single key, ``"key"`` which
is a list of (key, direction) pairs specifying the index (as
passed to create_index()). It will also contain any other
metadata about the indexes, except for the ``"ns"`` and
``"name"`` keys, which are cleaned. Example output might look
like this:
>>> db.test.create_index("x", unique=True)
'x_1'
>>> db.test.index_information()
{'_id_': {'key': [('_id', 1)]},
'x_1': {'unique': True, 'key': [('x', 1)]}}
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
cursor = self.list_indexes(session=session)
info = {}
for index in cursor:
index["key"] = index["key"].items()
index = dict(index)
info[index.pop("name")] = index
return info
def options(self, session=None):
"""Get the options set on this collection.
Returns a dictionary of options and their values - see
:meth:`~pymongo.database.Database.create_collection` for more
information on the possible options. Returns an empty
dictionary if the collection has not been created yet.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
dbo = self.__database.client.get_database(
self.__database.name,
self.codec_options,
self.read_preference,
self.write_concern,
self.read_concern)
cursor = dbo.list_collections(
session=session, filter={"name": self.__name})
result = None
for doc in cursor:
result = doc
break
if not result:
return {}
options = result.get("options", {})
if "create" in options:
del options["create"]
return options
def _aggregate(self, aggregation_command, pipeline, cursor_class, session,
explicit_session, **kwargs):
cmd = aggregation_command(
self, cursor_class, pipeline, kwargs, explicit_session,
user_fields={'cursor': {'firstBatch': 1}})
return self.__database.client._retryable_read(
cmd.get_cursor, cmd.get_read_preference(session), session,
retryable=not cmd._performs_write)
def aggregate(self, pipeline, session=None, **kwargs):
"""Perform an aggregation using the aggregation framework on this
collection.
All optional `aggregate command`_ parameters should be passed as
keyword arguments to this method. Valid options include, but are not
limited to:
- `allowDiskUse` (bool): Enables writing to temporary files. When set
to True, aggregation stages can write data to the _tmp subdirectory
of the --dbpath directory. The default is False.
- `maxTimeMS` (int): The maximum amount of time to allow the operation
to run in milliseconds.
- `batchSize` (int): The maximum number of documents to return per
batch.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
The :meth:`aggregate` method obeys the :attr:`read_preference` of this
:class:`Collection`, except when ``$out`` or ``$merge`` are used, in
which case :attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`
is used.
.. note:: This method does not support the 'explain' option. Please
use :meth:`~pymongo.database.Database.command` instead. An
example is included in the :ref:`aggregate-examples` documentation.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
:Parameters:
- `pipeline`: a list of aggregation pipeline stages
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
:Returns:
A :class:`~pymongo.command_cursor.CommandCursor` over the result
set.
.. versionchanged:: 4.0
Removed the ``useCursor`` option.
.. versionchanged:: 3.9
Apply this collection's read concern to pipelines containing the
`$out` stage when connected to MongoDB >= 4.2.
Added support for the ``$merge`` pipeline stage.
Aggregations that write always use read preference
:attr:`~pymongo.read_preferences.ReadPreference.PRIMARY`.
.. versionchanged:: 3.6
Added the `session` parameter. Added the `maxAwaitTimeMS` option.
Deprecated the `useCursor` option.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4. Support the `collation` option.
.. versionchanged:: 3.0
The :meth:`aggregate` method always returns a CommandCursor. The
pipeline argument must be a list.
.. versionchanged:: 2.7
When the cursor option is used, return
:class:`~pymongo.command_cursor.CommandCursor` instead of
:class:`~pymongo.cursor.Cursor`.
.. versionchanged:: 2.6
Added cursor support.
.. versionadded:: 2.3
.. seealso:: :doc:`/examples/aggregation`
.. _aggregate command:
https://docs.mongodb.com/manual/reference/command/aggregate
"""
with self.__database.client._tmp_session(session, close=False) as s:
return self._aggregate(_CollectionAggregationCommand,
pipeline,
CommandCursor,
session=s,
explicit_session=session is not None,
**kwargs)
def aggregate_raw_batches(self, pipeline, session=None, **kwargs):
"""Perform an aggregation and retrieve batches of raw BSON.
Similar to the :meth:`aggregate` method but returns a
:class:`~pymongo.cursor.RawBatchCursor`.
This example demonstrates how to work with raw batches, but in practice
raw batches should be passed to an external library that can decode
BSON into another data type, rather than used with PyMongo's
:mod:`bson` module.
>>> import bson
>>> cursor = db.test.aggregate_raw_batches([
... {'$project': {'x': {'$multiply': [2, '$x']}}}])
>>> for batch in cursor:
... print(bson.decode_all(batch))
.. note:: aggregate_raw_batches does not support auto encryption.
.. versionchanged:: 3.12
Added session support.
.. versionadded:: 3.6
"""
# OP_MSG is required to support encryption.
if self.__database.client._encrypter:
raise InvalidOperation(
"aggregate_raw_batches does not support auto encryption")
with self.__database.client._tmp_session(session, close=False) as s:
return self._aggregate(_CollectionRawAggregationCommand,
pipeline,
RawBatchCommandCursor,
session=s,
explicit_session=session is not None,
**kwargs)
def watch(self, pipeline=None, full_document=None, resume_after=None,
max_await_time_ms=None, batch_size=None, collation=None,
start_at_operation_time=None, session=None, start_after=None):
"""Watch changes on this collection.
Performs an aggregation with an implicit initial ``$changeStream``
stage and returns a
:class:`~pymongo.change_stream.CollectionChangeStream` cursor which
iterates over changes on this collection.
Introduced in MongoDB 3.6.
.. code-block:: python
with db.collection.watch() as stream:
for change in stream:
print(change)
The :class:`~pymongo.change_stream.CollectionChangeStream` iterable
blocks until the next change document is returned or an error is
raised. If the
:meth:`~pymongo.change_stream.CollectionChangeStream.next` method
encounters a network error when retrieving a batch from the server,
it will automatically attempt to recreate the cursor such that no
change events are missed. Any error encountered during the resume
attempt indicates there may be an outage and will be raised.
.. code-block:: python
try:
with db.collection.watch(
[{'$match': {'operationType': 'insert'}}]) as stream:
for insert_change in stream:
print(insert_change)
except pymongo.errors.PyMongoError:
# The ChangeStream encountered an unrecoverable error or the
# resume attempt failed to recreate the cursor.
logging.error('...')
For a precise description of the resume process see the
`change streams specification`_.
.. note:: Using this helper method is preferred to directly calling
:meth:`~pymongo.collection.Collection.aggregate` with a
``$changeStream`` stage, for the purpose of supporting
resumability.
.. warning:: This Collection's :attr:`read_concern` must be
``ReadConcern("majority")`` in order to use the ``$changeStream``
stage.
:Parameters:
- `pipeline` (optional): A list of aggregation pipeline stages to
append to an initial ``$changeStream`` stage. Not all
pipeline stages are valid after a ``$changeStream`` stage, see the
MongoDB documentation on change streams for the supported stages.
- `full_document` (optional): The fullDocument to pass as an option
to the ``$changeStream`` stage. Allowed values: 'updateLookup'.
When set to 'updateLookup', the change notification for partial
updates will include both a delta describing the changes to the
document, as well as a copy of the entire document that was
changed from some time after the change occurred.
- `resume_after` (optional): A resume token. If provided, the
change stream will start returning changes that occur directly
after the operation specified in the resume token. A resume token
is the _id value of a change document.
- `max_await_time_ms` (optional): The maximum time in milliseconds
for the server to wait for changes before responding to a getMore
operation.
- `batch_size` (optional): The maximum number of documents to return
per batch.
- `collation` (optional): The :class:`~pymongo.collation.Collation`
to use for the aggregation.
- `start_at_operation_time` (optional): If provided, the resulting
change stream will only return changes that occurred at or after
the specified :class:`~bson.timestamp.Timestamp`. Requires
MongoDB >= 4.0.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `start_after` (optional): The same as `resume_after` except that
`start_after` can resume notifications after an invalidate event.
This option and `resume_after` are mutually exclusive.
:Returns:
A :class:`~pymongo.change_stream.CollectionChangeStream` cursor.
.. versionchanged:: 3.9
Added the ``start_after`` parameter.
.. versionchanged:: 3.7
Added the ``start_at_operation_time`` parameter.
.. versionadded:: 3.6
.. mongodoc:: changeStreams
.. _change streams specification:
https://github.com/mongodb/specifications/blob/master/source/change-streams/change-streams.rst
"""
return CollectionChangeStream(
self, pipeline, full_document, resume_after, max_await_time_ms,
batch_size, collation, start_at_operation_time, session,
start_after)
def rename(self, new_name, session=None, **kwargs):
"""Rename this collection.
If operating in auth mode, client must be authorized as an
admin to perform this operation. Raises :class:`TypeError` if
`new_name` is not an instance of :class:`basestring`
(:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidName`
if `new_name` is not a valid collection name.
:Parameters:
- `new_name`: new name for this collection
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional arguments to the rename command
may be passed as keyword arguments to this helper method
(i.e. ``dropTarget=True``)
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
if not isinstance(new_name, str):
raise TypeError("new_name must be an instance of str")
if not new_name or ".." in new_name:
raise InvalidName("collection names cannot be empty")
if new_name[0] == "." or new_name[-1] == ".":
raise InvalidName("collecion names must not start or end with '.'")
if "$" in new_name and not new_name.startswith("oplog.$main"):
raise InvalidName("collection names must not contain '$'")
new_name = "%s.%s" % (self.__database.name, new_name)
cmd = SON([("renameCollection", self.__full_name), ("to", new_name)])
cmd.update(kwargs)
write_concern = self._write_concern_for_cmd(cmd, session)
with self._socket_for_writes(session) as sock_info:
with self.__database.client._tmp_session(session) as s:
return sock_info.command(
'admin', cmd,
write_concern=write_concern,
parse_write_concern_error=True,
session=s, client=self.__database.client)
def distinct(self, key, filter=None, session=None, **kwargs):
"""Get a list of distinct values for `key` among all documents
in this collection.
Raises :class:`TypeError` if `key` is not an instance of
:class:`basestring` (:class:`str` in python 3).
All optional distinct parameters should be passed as keyword arguments
to this method. Valid options include:
- `maxTimeMS` (int): The maximum amount of time to allow the count
command to run, in milliseconds.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`. This option is only supported
on MongoDB 3.4 and above.
The :meth:`distinct` method obeys the :attr:`read_preference` of
this :class:`Collection`.
:Parameters:
- `key`: name of the field for which we want to get the distinct
values
- `filter` (optional): A query document that specifies the documents
from which to retrieve the distinct values.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): See list of options above.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Support the `collation` option.
"""
if not isinstance(key, str):
raise TypeError("key must be an instance of str")
cmd = SON([("distinct", self.__name),
("key", key)])
if filter is not None:
if "query" in kwargs:
raise ConfigurationError("can't pass both filter and query")
kwargs["query"] = filter
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
def _cmd(session, server, sock_info, slave_ok):
return self._command(
sock_info, cmd, slave_ok, read_concern=self.read_concern,
collation=collation, session=session,
user_fields={"values": 1})["values"]
return self.__database.client._retryable_read(
_cmd, self._read_preference_for(session), session)
def _map_reduce(self, map, reduce, out, session, read_pref, **kwargs):
"""Internal mapReduce helper."""
cmd = SON([("mapReduce", self.__name),
("map", map),
("reduce", reduce),
("out", out)])
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd.update(kwargs)
inline = 'inline' in out
if inline:
user_fields = {'results': 1}
else:
user_fields = None
read_pref = ((session and session._txn_read_preference())
or read_pref)
with self.__database.client._socket_for_reads(read_pref, session) as (
sock_info, slave_ok):
if (sock_info.max_wire_version >= 4 and
('readConcern' not in cmd) and
inline):
read_concern = self.read_concern
else:
read_concern = None
if 'writeConcern' not in cmd and not inline:
write_concern = self._write_concern_for(session)
else:
write_concern = None
return self._command(
sock_info, cmd, slave_ok, read_pref,
read_concern=read_concern,
write_concern=write_concern,
collation=collation, session=session,
user_fields=user_fields)
def map_reduce(self, map, reduce, out, full_response=False, session=None,
**kwargs):
"""Perform a map/reduce operation on this collection.
If `full_response` is ``False`` (default) returns a
:class:`~pymongo.collection.Collection` instance containing
the results of the operation. Otherwise, returns the full
response from the server to the `map reduce command`_.
:Parameters:
- `map`: map function (as a JavaScript string)
- `reduce`: reduce function (as a JavaScript string)
- `out`: output collection name or `out object` (dict). See
the `map reduce command`_ documentation for available options.
Note: `out` options are order sensitive. :class:`~bson.son.SON`
can be used to specify multiple options.
e.g. SON([('replace', <collection name>), ('db', <database name>)])
- `full_response` (optional): if ``True``, return full response to
this command - otherwise just return the result collection
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional arguments to the
`map reduce command`_ may be passed as keyword arguments to this
helper method, e.g.::
>>> db.test.map_reduce(map, reduce, "myresults", limit=2)
.. note:: The :meth:`map_reduce` method does **not** obey the
:attr:`read_preference` of this :class:`Collection`. To run
mapReduce on a secondary use the :meth:`inline_map_reduce` method
instead.
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation (if the
output is not inline) when using MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
.. seealso:: :doc:`/examples/aggregation`
.. versionchanged:: 3.4
Added the `collation` option.
.. versionchanged:: 2.2
Removed deprecated arguments: merge_output and reduce_output
.. _map reduce command: http://docs.mongodb.org/manual/reference/command/mapReduce/
.. mongodoc:: mapreduce
"""
if not isinstance(out, (str, abc.Mapping)):
raise TypeError("'out' must be an instance of str or a mapping")
response = self._map_reduce(map, reduce, out, session,
ReadPreference.PRIMARY, **kwargs)
if full_response or not response.get('result'):
return response
elif isinstance(response['result'], dict):
dbase = response['result']['db']
coll = response['result']['collection']
return self.__database.client[dbase][coll]
else:
return self.__database[response["result"]]
def inline_map_reduce(self, map, reduce, full_response=False, session=None,
**kwargs):
"""Perform an inline map/reduce operation on this collection.
Perform the map/reduce operation on the server in RAM. A result
collection is not created. The result set is returned as a list
of documents.
If `full_response` is ``False`` (default) returns the
result documents in a list. Otherwise, returns the full
response from the server to the `map reduce command`_.
The :meth:`inline_map_reduce` method obeys the :attr:`read_preference`
of this :class:`Collection`.
:Parameters:
- `map`: map function (as a JavaScript string)
- `reduce`: reduce function (as a JavaScript string)
- `full_response` (optional): if ``True``, return full response to
this command - otherwise just return the result collection
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional arguments to the
`map reduce command`_ may be passed as keyword arguments to this
helper method, e.g.::
>>> db.test.inline_map_reduce(map, reduce, limit=2)
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the `collation` option.
"""
res = self._map_reduce(map, reduce, {"inline": 1}, session,
self.read_preference, **kwargs)
if full_response:
return res
else:
return res.get("results")
def _write_concern_for_cmd(self, cmd, session):
raw_wc = cmd.get('writeConcern')
if raw_wc is not None:
return WriteConcern(**raw_wc)
else:
return self._write_concern_for(session)
def __find_and_modify(self, filter, projection, sort, upsert=None,
return_document=ReturnDocument.BEFORE,
array_filters=None, hint=None, session=None,
**kwargs):
"""Internal findAndModify helper."""
common.validate_is_mapping("filter", filter)
if not isinstance(return_document, bool):
raise ValueError("return_document must be "
"ReturnDocument.BEFORE or ReturnDocument.AFTER")
collation = validate_collation_or_none(kwargs.pop('collation', None))
cmd = SON([("findAndModify", self.__name),
("query", filter),
("new", return_document)])
cmd.update(kwargs)
if projection is not None:
cmd["fields"] = helpers._fields_list_to_dict(projection,
"projection")
if sort is not None:
cmd["sort"] = helpers._index_document(sort)
if upsert is not None:
common.validate_boolean("upsert", upsert)
cmd["upsert"] = upsert
if hint is not None:
if not isinstance(hint, str):
hint = helpers._index_document(hint)
write_concern = self._write_concern_for_cmd(cmd, session)
def _find_and_modify(session, sock_info, retryable_write):
if array_filters is not None:
if sock_info.max_wire_version < 6:
raise ConfigurationError(
'Must be connected to MongoDB 3.6+ to use '
'arrayFilters.')
if not write_concern.acknowledged:
raise ConfigurationError(
'arrayFilters is unsupported for unacknowledged '
'writes.')
cmd["arrayFilters"] = array_filters
if hint is not None:
if sock_info.max_wire_version < 8:
raise ConfigurationError(
'Must be connected to MongoDB 4.2+ to use hint.')
if not write_concern.acknowledged:
raise ConfigurationError(
'hint is unsupported for unacknowledged writes.')
cmd['hint'] = hint
if (sock_info.max_wire_version >= 4 and
not write_concern.is_server_default):
cmd['writeConcern'] = write_concern.document
out = self._command(sock_info, cmd,
read_preference=ReadPreference.PRIMARY,
write_concern=write_concern,
collation=collation, session=session,
retryable_write=retryable_write,
user_fields=_FIND_AND_MODIFY_DOC_FIELDS)
_check_write_command_response(out)
return out.get("value")
return self.__database.client._retryable_write(
write_concern.acknowledged, _find_and_modify, session)
def find_one_and_delete(self, filter,
projection=None, sort=None, hint=None,
session=None, **kwargs):
"""Finds a single document and deletes it, returning the document.
>>> db.test.count_documents({'x': 1})
2
>>> db.test.find_one_and_delete({'x': 1})
{'x': 1, '_id': ObjectId('54f4e12bfba5220aa4d6dee8')}
>>> db.test.count_documents({'x': 1})
1
If multiple documents match *filter*, a *sort* can be applied.
>>> for doc in db.test.find({'x': 1}):
... print(doc)
...
{'x': 1, '_id': 0}
{'x': 1, '_id': 1}
{'x': 1, '_id': 2}
>>> db.test.find_one_and_delete(
... {'x': 1}, sort=[('_id', pymongo.DESCENDING)])
{'x': 1, '_id': 2}
The *projection* option can be used to limit the fields returned.
>>> db.test.find_one_and_delete({'x': 1}, projection={'_id': False})
{'x': 1}
:Parameters:
- `filter`: A query that matches the document to delete.
- `projection` (optional): a list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is deleted.
- `hint` (optional): An index to use to support the query predicate
specified either by its string name, or in the same format as
passed to :meth:`~pymongo.collection.Collection.create_index`
(e.g. ``[('field', ASCENDING)]``). This option is only supported
on MongoDB 4.4 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.11
Added ``hint`` parameter.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionchanged:: 3.4
Added the `collation` option.
.. versionadded:: 3.0
"""
kwargs['remove'] = True
return self.__find_and_modify(filter, projection, sort,
hint=hint, session=session, **kwargs)
def find_one_and_replace(self, filter, replacement,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE,
hint=None, session=None, **kwargs):
"""Finds a single document and replaces it, returning either the
original or the replaced document.
The :meth:`find_one_and_replace` method differs from
:meth:`find_one_and_update` by replacing the document matched by
*filter*, rather than modifying the existing document.
>>> for doc in db.test.find({}):
... print(doc)
...
{'x': 1, '_id': 0}
{'x': 1, '_id': 1}
{'x': 1, '_id': 2}
>>> db.test.find_one_and_replace({'x': 1}, {'y': 1})
{'x': 1, '_id': 0}
>>> for doc in db.test.find({}):
... print(doc)
...
{'y': 1, '_id': 0}
{'x': 1, '_id': 1}
{'x': 1, '_id': 2}
:Parameters:
- `filter`: A query that matches the document to replace.
- `replacement`: The replacement document.
- `projection` (optional): A list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a mapping to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is replaced.
- `upsert` (optional): When ``True``, inserts a new document if no
document matches the query. Defaults to ``False``.
- `return_document`: If
:attr:`ReturnDocument.BEFORE` (the default),
returns the original document before it was replaced, or ``None``
if no document matches. If
:attr:`ReturnDocument.AFTER`, returns the replaced
or inserted document.
- `hint` (optional): An index to use to support the query
predicate specified either by its string name, or in the same
format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). This option is only supported on
MongoDB 4.4 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.11
Added the ``hint`` option.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Added the ``collation`` option.
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionadded:: 3.0
"""
common.validate_ok_for_replace(replacement)
kwargs['update'] = replacement
return self.__find_and_modify(filter, projection,
sort, upsert, return_document,
hint=hint, session=session, **kwargs)
def find_one_and_update(self, filter, update,
projection=None, sort=None, upsert=False,
return_document=ReturnDocument.BEFORE,
array_filters=None, hint=None, session=None,
**kwargs):
"""Finds a single document and updates it, returning either the
original or the updated document.
>>> db.test.find_one_and_update(
... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}})
{'_id': 665, 'done': False, 'count': 25}}
Returns ``None`` if no document matches the filter.
>>> db.test.find_one_and_update(
... {'_exists': False}, {'$inc': {'count': 1}})
When the filter matches, by default :meth:`find_one_and_update`
returns the original version of the document before the update was
applied. To return the updated (or inserted in the case of
*upsert*) version of the document instead, use the *return_document*
option.
>>> from pymongo import ReturnDocument
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... return_document=ReturnDocument.AFTER)
{'_id': 'userid', 'seq': 1}
You can limit the fields returned with the *projection* option.
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... projection={'seq': True, '_id': False},
... return_document=ReturnDocument.AFTER)
{'seq': 2}
The *upsert* option can be used to create the document if it doesn't
already exist.
>>> db.example.delete_many({}).deleted_count
1
>>> db.example.find_one_and_update(
... {'_id': 'userid'},
... {'$inc': {'seq': 1}},
... projection={'seq': True, '_id': False},
... upsert=True,
... return_document=ReturnDocument.AFTER)
{'seq': 1}
If multiple documents match *filter*, a *sort* can be applied.
>>> for doc in db.test.find({'done': True}):
... print(doc)
...
{'_id': 665, 'done': True, 'result': {'count': 26}}
{'_id': 701, 'done': True, 'result': {'count': 17}}
>>> db.test.find_one_and_update(
... {'done': True},
... {'$set': {'final': True}},
... sort=[('_id', pymongo.DESCENDING)])
{'_id': 701, 'done': True, 'result': {'count': 17}}
:Parameters:
- `filter`: A query that matches the document to update.
- `update`: The update operations to apply.
- `projection` (optional): A list of field names that should be
returned in the result document or a mapping specifying the fields
to include or exclude. If `projection` is a list "_id" will
always be returned. Use a dict to exclude fields from
the result (e.g. projection={'_id': False}).
- `sort` (optional): a list of (key, direction) pairs
specifying the sort order for the query. If multiple documents
match the query, they are sorted and the first is updated.
- `upsert` (optional): When ``True``, inserts a new document if no
document matches the query. Defaults to ``False``.
- `return_document`: If
:attr:`ReturnDocument.BEFORE` (the default),
returns the original document before it was updated. If
:attr:`ReturnDocument.AFTER`, returns the updated
or inserted document.
- `array_filters` (optional): A list of filters specifying which
array elements an update should apply. This option is only
supported on MongoDB 3.6 and above.
- `hint` (optional): An index to use to support the query
predicate specified either by its string name, or in the same
format as passed to
:meth:`~pymongo.collection.Collection.create_index` (e.g.
``[('field', ASCENDING)]``). This option is only supported on
MongoDB 4.4 and above.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional command arguments can be passed
as keyword arguments (for example maxTimeMS can be used with
recent server versions).
.. versionchanged:: 3.11
Added the ``hint`` option.
.. versionchanged:: 3.9
Added the ability to accept a pipeline as the ``update``.
.. versionchanged:: 3.6
Added the ``array_filters`` and ``session`` options.
.. versionchanged:: 3.4
Added the ``collation`` option.
.. versionchanged:: 3.2
Respects write concern.
.. warning:: Starting in PyMongo 3.2, this command uses the
:class:`~pymongo.write_concern.WriteConcern` of this
:class:`~pymongo.collection.Collection` when connected to MongoDB >=
3.2. Note that using an elevated write concern with this command may
be slower compared to using the default write concern.
.. versionadded:: 3.0
"""
common.validate_ok_for_update(update)
common.validate_list_or_none('array_filters', array_filters)
kwargs['update'] = update
return self.__find_and_modify(filter, projection,
sort, upsert, return_document,
array_filters, hint=hint,
session=session, **kwargs)
def __iter__(self):
return self
def __next__(self):
raise TypeError("'Collection' object is not iterable")
next = __next__
def __call__(self, *args, **kwargs):
"""This is only here so that some API misusages are easier to debug.
"""
if "." not in self.__name:
raise TypeError("'Collection' object is not callable. If you "
"meant to call the '%s' method on a 'Database' "
"object it is failing because no such method "
"exists." %
self.__name)
raise TypeError("'Collection' object is not callable. If you meant to "
"call the '%s' method on a 'Collection' object it is "
"failing because no such method exists." %
self.__name.split(".")[-1])
| apache-2.0 |
ehenneken/loris | loris/parameters.py | 3 | 15597 | # parameters.py
# -*- coding: utf-8 -*-
'''
IIIF Image API parameters as objects.
The attributes of this class should make it possible to work with most imaging
libraries without any further need to process the IIIF syntax.
'''
import re
from decimal import Decimal
from math import floor
from logging import getLogger
from loris_exception import SyntaxException
from loris_exception import RequestException
logger = getLogger(__name__)
FULL_MODE = 'full'
PCT_MODE = 'pct'
PIXEL_MODE = 'pixel'
DECIMAL_ONE = Decimal('1.0')
class RegionParameter(object):
'''Internal representation of the region slice of an IIIF image URI.
Slots:
uri_value (str):
The region slice of the URI.
canonical_uri_value (str):
The normalized (pixel-based, in-bounds) region slice of the URI.
mode (str):
One of 'full', 'pct', or 'pixel'
img_info (ImageInfo)
pixel_x (int)
decimal_x (Decimal)
pixel_y (int)
decimal_y (Decimal)
pixel_w (int)
decimal_w (Decimal)
pixel_h (int)
decimal_h (Decimal)
'''
__slots__ = ('uri_value','canonical_uri_value','pixel_x','decimal_x',
'pixel_y','decimal_y','pixel_w','decimal_w','pixel_h','decimal_h',
'mode','img_info')
def __str__(self):
return self.uri_value
def __init__(self, uri_value, img_info):
'''Parse the uri_value into the object.
Args:
uri_value (str): The region slice of an IIIF image request URI.
img_info (ImgInfo)
Raises:
SyntaxException
RequestException
'''
self.uri_value = uri_value
self.img_info = img_info
try:
self.mode = RegionParameter.__mode_from_region_segment(self.uri_value, self.img_info)
logger.debug('Region mode is "%s" (from "%s")' % (self.mode,uri_value))
except SyntaxException:
raise
if self.mode == FULL_MODE:
self.canonical_uri_value = FULL_MODE
self.pixel_x = 0
self.decimal_x = 0
self.pixel_y = 0
self.decimal_y = 0
self.pixel_w = img_info.width
self.decimal_w = DECIMAL_ONE
self.pixel_h = img_info.height
self.decimal_h = DECIMAL_ONE
else:
try:
if self.mode == PCT_MODE:
self.__populate_slots_from_pct()
else: # self.mode == PIXEL_MODE:
self.__populate_slots_from_pixels()
except (SyntaxException, RequestException):
raise
logger.debug('decimal_x: %s' % (str(self.decimal_x),))
logger.debug('pixel_x: %d' % (self.pixel_x,))
logger.debug('decimal_y: %s' % (str(self.decimal_y),))
logger.debug('pixel_y: %d' % (self.pixel_y,))
logger.debug('decimal_w: %s' % (str(self.decimal_w),))
logger.debug('pixel_w: %d' % (self.pixel_w,))
logger.debug('decimal_h: %s' % (str(self.decimal_h),))
logger.debug('pixel_h: %d' % (self.pixel_h,))
# Adjust OOB requests that are allowed
# TODO: consider raising an exception that we can use to redirect
if (self.decimal_x + self.decimal_w) > DECIMAL_ONE:
self.decimal_w = DECIMAL_ONE - self.decimal_x
self.pixel_w = img_info.width - self.pixel_x
logger.info('decimal_w adjusted to: %s' % (str(self.decimal_w)),)
logger.info('pixel_w adjusted to: %d' % (self.pixel_w,))
if (self.decimal_y + self.decimal_h) > DECIMAL_ONE:
self.decimal_h = DECIMAL_ONE - self.decimal_y
self.pixel_h = img_info.height - self.pixel_y
logger.info('decimal_h adjusted to: %s' % (str(self.decimal_h)),)
logger.debug('pixel_h adjusted to: %s' % (str(self.pixel_h)),)
# Catch OOB errors:
if any(axis < 0 for axis in (self.pixel_x, self.pixel_y)):
msg = 'x and y region parameters must be 0 or greater (%s)' % (self.uri_value,)
raise RequestException(http_status=400, message=msg)
if self.decimal_x >= DECIMAL_ONE:
msg = 'Region x parameter is greater than the width of the image.\n'
msg +='Image width is %d' % (img_info.width,)
raise RequestException(http_status=400, message=msg)
if self.decimal_y >= DECIMAL_ONE:
msg = 'Region y parameter is greater than the height of the image.\n'
msg +='Image height is %d' % (img_info.height,)
raise RequestException(http_status=400, message=msg)
# set canonical_uri_value to the equivalent pixel-based syntax after
# all adjustments have been made.
px = (self.pixel_x, self.pixel_y, self.pixel_w, self.pixel_h)
self.canonical_uri_value = ','.join(map(str, px))
logger.debug('canonical uri_value for region %s' % (self.canonical_uri_value,))
def __populate_slots_from_pct(self):
'''
Raises:
SyntaxException
RequestException
'''
# we convert these to pixels and update uri_value
dimensions = map(float, self.uri_value.split(':')[1].split(','))
if len(dimensions) != 4:
msg = 'Exactly (4) coordinates must be supplied'
raise SyntaxException(http_status=400, message=msg)
if any(n > 100.0 for n in dimensions):
msg = 'Region percentages must be less than or equal to 100.'
raise RequestException(http_status=400, message=msg)
if any((n <= 0) for n in dimensions[2:]):
msg = 'Width and Height Percentages must be greater than 0.'
raise RequestException(http_status=400, message=msg)
# decimals
self.decimal_x, self.decimal_y, self.decimal_w, \
self.decimal_h = map(RegionParameter.__pct_to_decimal, dimensions)
# pixels
self.pixel_x = int(floor(self.decimal_x * self.img_info.width))
self.pixel_y = int(floor(self.decimal_y * self.img_info.height))
self.pixel_w = int(floor(self.decimal_w * self.img_info.width))
self.pixel_h = int(floor(self.decimal_h * self.img_info.height))
def __populate_slots_from_pixels(self):
'''
Raises:
RequestException
SyntaxException
'''
dimensions = map(int, self.uri_value.split(','))
if any(n <= 0 for n in dimensions[2:]):
msg = 'Width and height must be greater than 0'
raise RequestException(http_status=400, message=msg)
if len(dimensions) != 4:
msg = 'Exactly (4) coordinates must be supplied'
raise SyntaxException(http_status=400, message=msg)
# pixels
self.pixel_x, self.pixel_y, self.pixel_w, self.pixel_h = dimensions
# decimals
self.decimal_x = self.pixel_x / Decimal(str(self.img_info.width))
self.decimal_y = self.pixel_y / Decimal(str(self.img_info.height))
self.decimal_w = self.pixel_w / Decimal(str(self.img_info.width))
self.decimal_h = self.pixel_h / Decimal(str(self.img_info.height))
@staticmethod
def __mode_from_region_segment(region_segment, img_info):
'''
Get the mode of the request from the region segment.
Args:
region_segment (str)
Returns:
PCT_MODE, FULL_MODE, or
PIXEL_MODE
Raises:
SyntaxException if this can't be determined.
'''
comma_segments = region_segment.split(',')
if region_segment == 'full':
return FULL_MODE
elif len(comma_segments) == 4 and all([
comma_segments[0] == '0',
comma_segments[1] == '0',
comma_segments[2] == str(img_info.width),
comma_segments[3] == str(img_info.height)
]):
return FULL_MODE
elif all([n.isdigit() for n in comma_segments]):
return PIXEL_MODE
elif region_segment.split(':')[0] == 'pct':
return PCT_MODE
else:
msg = 'Region syntax "%s" is not valid' % (region_segment,)
raise SyntaxException(http_status=400, message=msg)
@staticmethod
def __pct_to_decimal(n):
return Decimal(str(n)) / Decimal('100.0')
class SizeParameter(object):
'''Internal representation of the size slice of an IIIF image URI.
Slots:
uri_value (str):
The region slice of the URI.
mode (str):
One of 'full', 'pct', or 'pixel'
canonical_uri_value (str):
The uri_value after it has been normalized to the 'w,' form.
force_aspect (bool):
True if the aspect ratio of the image should not be preserved.
w (int):
The width.
h (int):
The height.
'''
__slots__ = ('uri_value','canonical_uri_value','mode','force_aspect','w','h')
def __init__(self, uri_value, region_parameter):
'''Parse the URI slice into an object.
Args:
uri_value (str):
The size slice of an IIIF image URI.
region_parameter (RegionParameter):
The region parameter of the request.
Raises:
SyntaxException
RequestException
'''
self.uri_value = uri_value
self.mode = SizeParameter.__mode_from_size_segment(self.uri_value)
logger.debug('Size mode is "%s" (from "%s")' % (self.mode,uri_value))
if self.mode == FULL_MODE:
self.force_aspect = False
self.w = region_parameter.pixel_w
self.h = region_parameter.pixel_h
self.canonical_uri_value = FULL_MODE
else:
try:
if self.mode == PCT_MODE:
self.__populate_slots_from_pct(region_parameter)
else: # self.mode == PIXEL_MODE:
self.__populate_slots_from_pixels(region_parameter)
except (SyntaxException, RequestException):
raise
if self.force_aspect:
self.canonical_uri_value = '%d,%d' % (self.w,self.h)
else:
self.canonical_uri_value = '%d,' % (self.w,)
logger.debug('canonical uri_value for size: %s' % (self.canonical_uri_value,))
logger.debug('w %d', self.w)
logger.debug('h %d', self.h)
if any((dim <= 0 and dim != None) for dim in (self.w, self.h)):
msg = 'Width and height must both be positive numbers'
raise RequestException(http_status=400, message=msg)
def __populate_slots_from_pct(self,region_parameter):
self.force_aspect = False
pct_decimal = Decimal(str(self.uri_value.split(':')[1])) * Decimal('0.01')
logger.debug(pct_decimal <= Decimal(0))
logger.debug('pct_decimal: %s', pct_decimal)
if pct_decimal <= Decimal('0'):
msg = 'Percentage supplied is less than 0 (%s).' % (self.uri_value,)
raise RequestException(http_status=400, message=msg)
w_decimal = region_parameter.pixel_w * pct_decimal
h_decimal = region_parameter.pixel_h * pct_decimal
logger.debug('w_decimal %s', w_decimal)
logger.debug('h_decimal %s', h_decimal)
# handle teeny, tiny requests.
if 0 < w_decimal < 1:
self.w = 1
else:
self.w = int(w_decimal)
if 0 < h_decimal < 1:
self.h = 1
else:
self.h = int(h_decimal)
def __populate_slots_from_pixels(self,region_parameter):
if self.uri_value.endswith(','):
self.force_aspect = False
self.w = int(self.uri_value[:-1])
reduce_by = Decimal(str(self.w)) / region_parameter.pixel_w
self.h = region_parameter.pixel_h * reduce_by
elif self.uri_value.startswith(','):
self.force_aspect = False
self.h = int(self.uri_value[1:])
reduce_by = Decimal(str(self.h)) / region_parameter.pixel_h
self.w = region_parameter.pixel_w * reduce_by
elif self.uri_value[0] == '!':
self.force_aspect = False
request_w, request_h = map(int, self.uri_value[1:].split(','))
ratio_w = Decimal(str(request_w)) / region_parameter.pixel_w
ratio_h = Decimal(str(request_h)) / region_parameter.pixel_h
ratio = min(ratio_w, ratio_h)
self.w = int(region_parameter.pixel_w * ratio)
self.h = int(region_parameter.pixel_h * ratio)
else:
self.force_aspect = True
self.w, self.h = map(int, self.uri_value.split(','))
@staticmethod
def __mode_from_size_segment(size_segment):
'''
Get the mode of the request from the size segment.
Args:
size_segment (str)
Returns:
PCT_MODE, FULL_MODE, or PIXEL_MODE
Raises:
SyntaxException if this can't be determined.
'''
# TODO: wish this were cleaner.
if size_segment.split(':')[0] == 'pct':
return PCT_MODE
elif size_segment == 'full':
return FULL_MODE
elif not ',' in size_segment:
msg = 'Size syntax "%s" is not valid' % (size_segment,)
raise SyntaxException(http_status=400, message=msg)
elif all([(n.isdigit() or n == '') for n in size_segment.split(',')]):
return PIXEL_MODE
elif all([n.isdigit() for n in size_segment[1:].split(',')]) and \
len(size_segment.split(',')) == 2:
return PIXEL_MODE
else:
msg = 'Size syntax "%s" is not valid' % (size_segment,)
raise SyntaxException(http_status=400, message=msg)
def __str__(self):
return self.uri_value
class RotationParameter(object):
'''Internal representation of the rotation slice of an IIIF image URI.
Slots:
uri_value (str)
canonical_uri_value (str)
mirror (str)
rotation (str)
'''
ROTATION_REGEX = re.compile('^!?[\d.]+$')
__slots__ = ('uri_value','canonical_uri_value','mirror','rotation')
def __init__(self, uri_value):
'''Take the uri value and round it to the nearest 90.
Args:
uri_value (str): the rotation slice of the request URI.
Raises:
SyntaxException:
If the argument is not a digit, is < 0, or > 360
'''
if not RotationParameter.ROTATION_REGEX.match(uri_value):
msg = 'Rotation "%s" is not a number' % (uri_value,)
raise SyntaxException(http_status=400, message=msg)
if uri_value[0] == '!':
self.mirror = True
self.rotation = uri_value[1:]
self.canonical_uri_value = '!%g' % (float(uri_value[1:]),)
else:
self.mirror = False
self.rotation = uri_value
self.canonical_uri_value = '%g' % (float(uri_value),)
if not 0.0 <= float(self.rotation) <= 360.0:
msg = 'Rotation argument "%s" is not between 0 and 360' % (uri_value,)
raise SyntaxException(http_status=400, message=msg)
logger.debug('canonical rotation is %s' % (self.canonical_uri_value,))
| bsd-2-clause |
sonaht/ansible | lib/ansible/modules/network/nxos/nxos_aaa_server_host.py | 59 | 11475 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_aaa_server_host
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages AAA server host-specific configuration.
description:
- Manages AAA server host-specific configuration.
author: Jason Edelman (@jedelman8)
notes:
- Changes to the AAA server host key (shared secret) are not idempotent.
- If C(state=absent) removes the whole host configuration.
options:
server_type:
description:
- The server type is either radius or tacacs.
required: true
choices: ['radius', 'tacacs']
address:
description:
- Address or name of the radius or tacacs host.
required: true
key:
description:
- Shared secret for the specified host.
required: false
default: null
encrypt_type:
description:
- The state of encryption applied to the entered key.
O for clear text, 7 for encrypted. Type-6 encryption is
not supported.
required: false
default: null
choices: ['0', '7']
host_timeout:
description:
- Timeout period for specified host, in seconds. Range is 1-60.
required: false
default: null
auth_port:
description:
- Alternate UDP port for RADIUS authentication.
required: false
default: null
acct_port:
description:
- Alternate UDP port for RADIUS accounting.
required: false
default: null
tacacs_port:
description:
- Alternate TCP port TACACS Server.
required: false
default: null
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# Radius Server Host Basic settings
- name: "Radius Server Host Basic settings"
nxos_aaa_server_host:
state: present
server_type: radius
address: 1.2.3.4
acct_port: 2084
host_timeout: 10
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Radius Server Host Key Configuration
- name: "Radius Server Host Key Configuration"
nxos_aaa_server_host:
state: present
server_type: radius
address: 1.2.3.4
key: hello
encrypt_type: 7
host: inventory_hostname }}
username: "{{ un }}"
password: "{{ pwd }}"
# TACACS Server Host Configuration
- name: "Tacacs Server Host Configuration"
nxos_aaa_server_host:
state: present
server_type: tacacs
tacacs_port: 89
host_timeout: 10
address: 5.6.7.8
host: inventory_hostname }}
username: un }}
password: pwd }}
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"address": "1.2.3.4", "auth_port": "2084",
"host_timeout": "10", "server_type": "radius"}
existing:
description:
- k/v pairs of existing configuration
returned: always
type: dict
sample: {}
end_state:
description: k/v pairs of configuration after module execution
returned: always
type: dict
sample: {"address": "1.2.3.4", "auth_port": "2084",
"host_timeout": "10", "server_type": "radius"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["radius-server host 1.2.3.4 auth-port 2084 timeout 10"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.nxos import load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def _match_dict(match_list, key_map):
no_blanks = []
match_dict = {}
for match_set in match_list:
match_set = tuple(v for v in match_set if v)
no_blanks.append(match_set)
for info in no_blanks:
words = info[0].strip().split()
length = len(words)
alt_key = key_map.get(words[0])
first = alt_key or words[0]
last = words[length - 1]
match_dict[first] = last.replace('\"', '')
return match_dict
def get_aaa_host_info(module, server_type, address):
aaa_host_info = {}
command = 'show run | inc {0}-server.host.{1}'.format(server_type, address)
body = execute_show_command(command, module, command_type='cli_show_ascii')
if body:
try:
pattern = ('(acct-port \d+)|(timeout \d+)|(auth-port \d+)|'
'(key 7 "\w+")|( port \d+)')
raw_match = re.findall(pattern, body[0])
aaa_host_info = _match_dict(raw_match, {'acct-port': 'acct_port',
'auth-port': 'auth_port',
'port': 'tacacs_port',
'timeout': 'host_timeout'})
if aaa_host_info:
aaa_host_info['server_type'] = server_type
aaa_host_info['address'] = address
except TypeError:
return {}
else:
return {}
return aaa_host_info
def config_aaa_host(server_type, address, params, clear=False):
cmds = []
if clear:
cmds.append('no {0}-server host {1}'.format(server_type, address))
cmd_str = '{0}-server host {1}'.format(server_type, address)
key = params.get('key')
enc_type = params.get('encrypt_type', '')
host_timeout = params.get('host_timeout')
auth_port = params.get('auth_port')
acct_port = params.get('acct_port')
port = params.get('tacacs_port')
if auth_port:
cmd_str += ' auth-port {0}'.format(auth_port)
if acct_port:
cmd_str += ' acct-port {0}'.format(acct_port)
if port:
cmd_str += ' port {0}'.format(port)
if host_timeout:
cmd_str += ' timeout {0}'.format(host_timeout)
if key:
cmds.append('{0}-server host {1} key {2} {3}'.format(server_type,
address,
enc_type, key))
cmds.append(cmd_str)
return cmds
def main():
argument_spec = dict(
server_type=dict(choices=['radius', 'tacacs'], required=True),
address=dict(type='str', required=True),
key=dict(type='str'),
encrypt_type=dict(type='str', choices=['0', '7']),
host_timeout=dict(type='str'),
auth_port=dict(type='str'),
acct_port=dict(type='str'),
tacacs_port=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
server_type = module.params['server_type']
address = module.params['address']
key = module.params['key']
encrypt_type = module.params['encrypt_type']
host_timeout = module.params['host_timeout']
auth_port = module.params['auth_port']
acct_port = module.params['acct_port']
tacacs_port = module.params['tacacs_port']
state = module.params['state']
args = dict(server_type=server_type, address=address, key=key,
encrypt_type=encrypt_type, host_timeout=host_timeout,
auth_port=auth_port, acct_port=acct_port,
tacacs_port=tacacs_port)
proposed = dict((k, v) for k, v in args.items() if v is not None)
changed = False
if encrypt_type and not key:
module.fail_json(msg='encrypt_type must be used with key')
if tacacs_port and server_type != 'tacacs':
module.fail_json(
msg='tacacs_port can only be used with server_type=tacacs')
if (auth_port or acct_port) and server_type != 'radius':
module.fail_json(msg='auth_port and acct_port can only be used'
'when server_type=radius')
existing = get_aaa_host_info(module, server_type, address)
end_state = existing
commands = []
if state == 'present':
host_timeout = proposed.get('host_timeout')
if host_timeout:
try:
if int(host_timeout) < 1 or int(host_timeout) > 60:
raise ValueError
except ValueError:
module.fail_json(
msg='host_timeout must be an integer between 1 and 60')
delta = dict(
set(proposed.items()).difference(existing.items()))
if delta:
union = existing.copy()
union.update(delta)
command = config_aaa_host(server_type, address, union)
if command:
commands.append(command)
elif state == 'absent':
intersect = dict(
set(proposed.items()).intersection(existing.items()))
if intersect.get('address') and intersect.get('server_type'):
command = 'no {0}-server host {1}'.format(
intersect.get('server_type'), intersect.get('address'))
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_aaa_host_info(module, server_type, address)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
results['end_state'] = end_state
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
esakellari/root | interpreter/llvm/src/tools/clang/bindings/python/examples/cindex/cindex-includes.py | 110 | 1644 | #!/usr/bin/env python
#===- cindex-includes.py - cindex/Python Inclusion Graph -----*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
"""
A simple command line tool for dumping a Graphviz description (dot) that
describes include dependencies.
"""
def main():
import sys
from clang.cindex import Index
from optparse import OptionParser, OptionGroup
parser = OptionParser("usage: %prog [options] {filename} [clang-args*]")
parser.disable_interspersed_args()
(opts, args) = parser.parse_args()
if len(args) == 0:
parser.error('invalid number arguments')
# FIXME: Add an output file option
out = sys.stdout
index = Index.create()
tu = index.parse(None, args)
if not tu:
parser.error("unable to load input")
# A helper function for generating the node name.
def name(f):
if f:
return "\"" + f.name + "\""
# Generate the include graph
out.write("digraph G {\n")
for i in tu.get_includes():
line = " ";
if i.is_input_file:
# Always write the input file as a node just in case it doesn't
# actually include anything. This would generate a 1 node graph.
line += name(i.include)
else:
line += '%s->%s' % (name(i.source), name(i.include))
line += "\n";
out.write(line)
out.write("}\n")
if __name__ == '__main__':
main()
| lgpl-2.1 |
diamondman/proteusisc | proteusisc/frame.py | 1 | 7853 | import collections
from .primitive import DeviceTarget, Primitive
class Frame(collections.MutableSequence):
def __init__(self, chain, *prims, fill=False):
self._chain = chain
self._prims = [None for i in range(len(chain._devices))]
self._valid_prim = None
#self._prim_type = None
self._layer = None
self._dev_specific = True
if prims:
self.add(*prims)
if fill:
self.fill()
def add(self, *args: Primitive):
for prim in args:
if prim is None:
raise ValueError("None is not a valid prim. "
"Maybe you called add(*frame) "
"for a frame that has non device "
"specific prims.")#pragma: no cover
elif not self._valid_prim:
self._valid_prim = prim
#self._prim_type = type(prim)
self._layer = type(prim)._layer
self._dev_specific = isinstance(prim, DeviceTarget)
if self._dev_specific:
self[prim._device_index] = prim
else:
self[0] = prim
elif not self._dev_specific:
raise ValueError("Only one non device specific prim "
"allowed in a Frame at once.")
elif self._group_type == prim._group_type\
and self._prim_type == type(prim):
self[prim._device_index] = prim
else:
raise ValueError("Incompatible primitives")
return self
def fill(self):
if not self._valid_prim:
raise ValueError("No valid primitives inserted before fill.")
if self._dev_specific:
for i, p in enumerate(self):
if p is None:
self[i] = self._valid_prim.get_placeholder_for_dev(
self._chain._devices[i])
return self
@property
def _group_type(self):
return self._valid_prim._group_type
@property
def _prim_type(self):
return type(self._valid_prim)
def __len__(self):
return len(self._prims)
def __delitem__(self, index):
self._prims.__delitem__(index)
def insert(self, index, value):
self._prims.insert(index, value)
def __setitem__(self, index, value):
self._prims.__setitem__(index, value)
def __getitem__(self, index):
return self._prims.__getitem__(index)
def __repr__(self):
return "<Frame%s>"%self._prims #pragma: no cover
@classmethod
def from_prim(cls, chain, *prim):
return cls(chain, *prim, fill=True)
def expand_macro(self, sm):
cls = type(self._valid_prim)
if issubclass(cls, DeviceTarget):
return cls.expand_frame(self, sm)
else:
prims = self._valid_prim.expand(self._chain, sm)
if self.debug:
print(self, prims)
if prims is None: #TODO REMOVE AFTER DEV
#Will cause infinite loop in real system
return FrameSequence(self._chain, self)
return FrameSequence(self._chain,
*[Frame.from_prim(self._chain, prim)
for prim in prims])
def signature(self):
return self._valid_prim.signature()
def merge(self, t):
if self._layer != t._layer or self._prim_type != t._prim_type:
return None
if self._dev_specific:
resl = [self[i].merge(t[i])
for i in range(len(self))]
if all(resl):
return Frame(self._chain, *resl)
else:
res = self._valid_prim.merge(t._valid_prim)
if res:
return Frame.from_prim(self._chain, res)
def mergable(self, targetf):
if len(self) is not len(targetf):
return False
return all((self[i].mergable(targetf[i])
for i in range(len(self))))
@property
def debug(self):
if self._chain:
return self._chain._debug
return False
class FrameSequence(collections.MutableSequence):
def __init__(self, chain, *init_prims_lists):
self._chain = chain
self._frames = []
if init_prims_lists:
if isinstance(init_prims_lists[0], Frame):
for frame in init_prims_lists:
self._frames.append(frame)
else:
for p in init_prims_lists[0]:
self._frames.append(Frame(self._chain, p))
for ps in init_prims_lists[1:]:
self.addstream(ps)
#Needleman–Wunsch algorithm
def _lcs(self, prims):
#Initialize with 0
lengths = [[0 for j in range(len(prims)+1)]
for i in range(len(self)+1)]
for i, f in enumerate(self):
for j, p in enumerate(prims):
if f.signature() == p.signature() and\
p.can_join_frame(f):
lengths[i+1][j+1] = lengths[i][j] + 1
else:
lengths[i+1][j+1] = max(lengths[i+1][j],
lengths[i][j+1])
result = []
x, y = len(self), len(prims)
while x != 0 and y != 0:
if lengths[x][y] == lengths[x-1][y]:
x -= 1
elif lengths[x][y] == lengths[x][y-1]:
y -= 1
else:
result = [self[x-1].signature()] + result
x -= 1
y -= 1
return result
def addstream(self, prims):
i1, i2, selfoffset = 0, 0, 0
for c in self._lcs(prims):
while True:
canjoin = prims[i2].can_join_frame(self[i1])
if self[i1].signature() ==\
prims[i2].signature() == c and\
canjoin:
self._frames[i1].add(prims[i2])
i1 += 1
i2 += 1
break
elif self[i1].signature() == c and canjoin:
#s2 does not match.
self.insert(i1+selfoffset,
Frame(self._chain,prims[i2]))
i2 += 1
selfoffset += 1
elif (type(prims[i2]),prims[i2]._group_type) == c:
#s1 does not match.
i1 += 1
else: #NEITHER IN SEQUENCE
i1 += 1
self.insert(i1+selfoffset,
Frame(self._chain,prims[i2]))
i2 += 1
selfoffset += 1
for p in prims[i2:]:
self.append(Frame(self._chain, p))
return self
def finalize(self):
for f in self._frames:
f.fill()
return self
def __len__(self):
return len(self._frames)
def __delitem__(self, index):
self._frames.__delitem__(index)
def insert(self, index, value):
self._frames.insert(index, value)
def __setitem__(self, index, value):
self._frames.__setitem__(index, value)
def __getitem__(self, index):
return self._frames.__getitem__(index)
def snapshot(self): #pragma: no cover
tracks = [[] for i in range(len(self._chain._devices))]
for frame in self:
if frame._dev_specific:
for p in frame:
tracks[p._device_index].append(p.snapshot())
else:
tracks[0].append(frame[0].snapshot())
for i, p in enumerate(frame[1:]):
tracks[i+1].append({'valid':False})
return tracks
| lgpl-2.1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.