code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
import unittest
import pytest
import cupy
from cupy import testing
from cupy_tests.core_tests.fusion_tests import fusion_utils
@testing.gpu
@testing.slow
@pytest.mark.skipif(
cupy.cuda.runtime.is_hip, reason='HIP does not support this')
class TestFusionExample(unittest.TestCase):
def generate_inputs(self, xp):
shape = (8, 64, 112, 112)
_, chan, _, _ = shape
x = testing.shaped_random(shape, xp, 'float32', scale=10, seed=0)
gamma = xp.ones(chan)
beta = xp.zeros(chan)
running_mean = xp.zeros(chan)
running_var = xp.ones(chan)
size = x.size // gamma.size
adjust = size / max(size - 1., 1.)
return (x, gamma, beta, running_mean, running_var, size, adjust), {}
@fusion_utils.check_fusion()
def test_batchnorm(self, xp):
def batchnorm(x, gamma, beta, running_mean, running_var, size, adjust):
decay = 0.9
eps = 2e-5
expander = (None, slice(None), None, None)
gamma = gamma[expander]
beta = beta[expander]
mean = xp.sum(x, axis=(0, 2, 3)) / size
diff = x - mean[expander]
var = xp.sum(diff * diff, axis=(0, 2, 3)) / size
inv_std = 1. / xp.sqrt(var + eps)
y = gamma * diff * inv_std[expander] + beta
running_mean *= decay
running_mean += (1 - decay) * mean
running_var *= decay
running_var += (1 - decay) * adjust * var
return y
return batchnorm
|
cupy/cupy
|
tests/cupy_tests/core_tests/fusion_tests/test_example.py
|
Python
|
mit
| 1,541
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Comment'
db.delete_table('tickets_comment')
# Adding field 'Ticket.comment'
db.add_column('tickets_ticket', 'comment',
self.gf('django.db.models.fields.TextField')(blank=True, default=''),
keep_default=False)
# Adding field 'Ticket.action'
db.add_column('tickets_ticket', 'action',
self.gf('django.db.models.fields.TextField')(blank=True, default=''),
keep_default=False)
def backwards(self, orm):
# Adding model 'Comment'
db.create_table('tickets_comment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ticket', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['tickets.Ticket'])),
('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.User'])),
('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)),
('comment', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('tickets', ['Comment'])
# Deleting field 'Ticket.comment'
db.delete_column('tickets_ticket', 'comment')
# Deleting field 'Ticket.action'
db.delete_column('tickets_ticket', 'action')
models = {
'accounts.user': {
'Meta': {'object_name': 'User'},
'apartment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'null': 'True', 'to': "orm['buildings.Apartment']"}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '160', 'unique': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'original_email': ('django.db.models.fields.EmailField', [], {'blank': 'True', 'max_length': '160'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'phone': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '20'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '10', 'default': "'resident'"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'null': 'True', 'to': "orm['sites.Site']"})
},
'amenities.amenity': {
'Meta': {'object_name': 'Amenity'},
'building': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['buildings.Building']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_available': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
'buildings.apartment': {
'Meta': {'object_name': 'Apartment', 'unique_together': "(('building', 'number'),)"},
'area': ('django.db.models.fields.FloatField', [], {'blank': 'True', 'null': 'True'}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['buildings.Building']"}),
'floor': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_rooms': ('django.db.models.fields.PositiveIntegerField', [], {'blank': 'True', 'null': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'owner_email': ('django.db.models.fields.EmailField', [], {'blank': 'True', 'max_length': '75'}),
'owner_name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '60'}),
'owner_phone': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '12'})
},
'buildings.building': {
'Meta': {'object_name': 'Building', 'unique_together': "(('address_1', 'address_2', 'city', 'postcode', 'country'),)"},
'address_1': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'address_2': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '100'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'default': "'FI'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'blank': 'True', 'null': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'blank': 'True', 'null': 'True'}),
'num_floors': ('django.db.models.fields.PositiveIntegerField', [], {'blank': 'True', 'null': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'year': ('django.db.models.fields.PositiveIntegerField', [], {'blank': 'True', 'null': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'tickets.ticket': {
'Meta': {'object_name': 'Ticket'},
'action': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'amenity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'null': 'True', 'to': "orm['amenities.Amenity']"}),
'apartment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'null': 'True', 'to': "orm['buildings.Apartment']"}),
'building': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['buildings.Building']"}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}),
'reporter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.User']"}),
'status': ('model_utils.fields.StatusField', [], {'max_length': '100', 'default': "'new'", 'no_check_for_status': 'True'})
}
}
complete_apps = ['tickets']
|
danjac/ownblock
|
ownblock/ownblock/apps/tickets/migrations/0002_auto__del_comment__add_field_ticket_comment__add_field_ticket_action.py
|
Python
|
mit
| 7,671
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 0, transform = "Quantization", sigma = 0.0, exog_count = 20, ar_order = 0);
|
antoinecarme/pyaf
|
tests/artificial/transf_Quantization/trend_MovingAverage/cycle_0/ar_/test_artificial_1024_Quantization_MovingAverage_0__20.py
|
Python
|
bsd-3-clause
| 272
|
#!/usr/bin/python2
# Copyright (c) 2013, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import errno
import getopt
import os
import sys
import roslib
roslib.load_manifest('tools')
import rospy
import std_msgs.msg
import dataflow
from baxter_msgs.msg import (
UpdateSources,
UpdateStatus,
)
class Updater(object):
"""
Control the updater on the robot.
Signals:
status_changed: Fired when the update status changes. Passes
the current UpdateStatus message.
"""
def __init__(self):
self.status_changed = dataflow.Signal()
self._status = UpdateStatus()
self._avail_updates = UpdateSources()
self._update_sources = rospy.Subscriber(
'/usb/update_sources',
UpdateSources,
self._on_update_sources)
self._updater_status_sub = rospy.Subscriber(
'/updater/status',
UpdateStatus,
self._on_update_status)
self._updater_start = rospy.Publisher(
'/updater/start',
std_msgs.msg.String)
self._updater_stop = rospy.Publisher(
'/updater/stop',
std_msgs.msg.Empty)
dataflow.wait_for(
lambda: self._avail_updates.uuid != '',
timeout = 1.0,
timeout_msg = "Failed to get list of available updates")
def _on_update_sources(self, msg):
if msg.uuid != self._avail_updates.uuid:
self._avail_updates = msg
def _on_update_status(self, msg):
if self._status != msg:
self._status = msg
self.status_changed(msg)
def list(self):
"""
Return a list of tuples (version, uuid) of all available updates
"""
return [(u.version, u.uuid) for u in self._avail_updates.sources]
def command_update(self, uuid):
"""
Command the robot to launch the update with the given uuid.
@param uuid - uuid of the update to start.
"""
if not any([u.uuid == uuid for u in self._avail_updates.sources]):
raise OSError(errno.EINVAL, "Invalid update uuid '%s'" % (uuid,))
self._updater_start.publish(std_msgs.msg.String(uuid))
def stop_update(self):
"""
Stop the current update process, if any.
"""
self._updater_stop.publish()
def run_update(updater, uuid):
"""
Run and monitor the progress of an update.
@param updater - Instance of Updater to use.
@param uuid - update uuid.
"""
# Work around lack of a nonlocal keyword in python 2.x
class NonLocal(object): pass
nl = NonLocal
nl.rc = 1
nl.done = False
def on_update_status(msg):
if msg.status == UpdateStatus.STS_IDLE:
nl.done = True
elif msg.status == UpdateStatus.STS_INVALID:
print ("Invalid update uuid, '%s'." % (uuid,))
nl.done = True
elif msg.status == UpdateStatus.STS_BUSY:
print ("Update already in progress (may be shutting down).")
nl.done = True
elif msg.status == UpdateStatus.STS_CANCELLED:
print ("Update cancelled.")
nl.done = True
elif msg.status == UpdateStatus.STS_ERR:
print ("Update failed: %s." % (msg.long_description,))
nl.done = True
nl.rc = 1
elif msg.status == UpdateStatus.STS_LOAD_KEXEC:
print ("Robot will now reboot to finish updating...")
nl.rc = 0
else:
print ("Updater: %s" % (msg.long_description))
def on_shutdown():
updater.stop_update()
rospy.on_shutdown(on_shutdown)
updater.status_changed.connect(on_update_status)
try:
updater.command_update(uuid)
except OSError, e:
if e.errno == errno.EINVAL:
print(str(e))
return 1
raise
try:
dataflow.wait_for(
lambda: nl.done == True,
timeout = 5 * 60,
timeout_msg = "Timeout waiting for update to succeed")
except Exception, e:
if not (hasattr(e, 'errno') and e.errno == errno.ESHUTDOWN):
print (str(e))
nl.rc = 1
return nl.rc
def usage():
print """
%s [ARGUMENTS]
-h, --help This screen
-l, --list List available updates
-u, --update [UUID] Launch the given update
""" % (os.path.basename(sys.argv[0]),)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'hlu:',
['help', 'list', 'update=',])
except getopt.GetoptError as err:
print str(err)
usage()
sys.exit(2)
cmd = None
uuid = ''
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit(0)
elif o in ('-l', '--list'):
cmd = 'list'
elif o in ('-u', '--update'):
cmd = 'update'
uuid = a
rospy.init_node('update_robot', anonymous=True)
updater = Updater()
if cmd == 'list':
updates = updater.list()
if not len(updates):
print ("No available updates")
else:
print ("%-30s%s" % ("Version", "UUID"))
for update in updates:
print("%-30s%s" % (update[0], update[1]))
sys.exit(0)
elif cmd == 'update':
if uuid == '':
print "Error: no update uuid specified"
sys.exit(1)
sys.exit(run_update(updater, uuid))
if __name__ == '__main__':
main()
|
abestick/ee125
|
baxter/tools/src/update_robot.py
|
Python
|
bsd-3-clause
| 7,023
|
#!/usr/bin/env python2
# Using class variables incorrectly
# What do you expect in the print statements below?
class A(object):
x = 1 # class variable
class B(A):
pass
class C(A):
pass
print A.x, B.x, C.x
B.x = 2
print A.x, B.x, C.x
A.x = 3
print A.x, B.x, C.x
# answer:
# The class variable x is looked up by Python in the dictionary of the
# subclass, and each superclass, until it is found. In the case of the last
# line of the example code, class C doesn't have its own copy of x, so Python
# looks to class A for x. Thus, when you change the base class value at
# runtime, it will change the value for all the subclasses that haven't
# changed it.
|
enrimatta/RU_Python_IV
|
challenges/challenge02.py
|
Python
|
gpl-2.0
| 688
|
from django.contrib import auth
from django.core import validators
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.db.models.manager import EmptyManager
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
import datetime
import urllib
UNUSABLE_PASSWORD = '!' # This will never be a valid hash
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
def get_hexdigest(algorithm, salt, raw_password):
"""
Returns a string of the hexdigest of the given plaintext password and salt
using the given algorithm ('md5', 'sha1' or 'crypt').
"""
raw_password, salt = smart_str(raw_password), smart_str(salt)
if algorithm == 'crypt':
try:
import crypt
except ImportError:
raise ValueError('"crypt" password algorithm not supported in this environment')
return crypt.crypt(raw_password, salt)
# The rest of the supported algorithms are supported by hashlib, but
# hashlib is only available in Python 2.5.
try:
import hashlib
except ImportError:
if algorithm == 'md5':
import md5
return md5.new(salt + raw_password).hexdigest()
elif algorithm == 'sha1':
import sha
return sha.new(salt + raw_password).hexdigest()
else:
if algorithm == 'md5':
return hashlib.md5(salt + raw_password).hexdigest()
elif algorithm == 'sha1':
return hashlib.sha1(salt + raw_password).hexdigest()
raise ValueError("Got unknown password algorithm type in password.")
def check_password(raw_password, enc_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
algo, salt, hsh = enc_password.split('$')
return hsh == get_hexdigest(algo, salt, raw_password)
class SiteProfileNotAvailable(Exception):
pass
class Permission(models.Model):
"""The permissions system provides a way to assign permissions to specific users and groups of users.
The permission system is used by the Django admin site, but may also be useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form and add an object.
- The "change" permission limits a user's ability to view the change list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object instance. It is possible to say "Mary may change news stories," but it's not currently possible to say "Mary may change news stories, but only the ones she created herself" or "Mary may only change news stories that have a certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically created for each Django model.
"""
name = models.CharField(_('name'), max_length=50)
content_type = models.ForeignKey(ContentType)
codename = models.CharField(_('codename'), max_length=100)
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('content_type', 'codename')
def __unicode__(self):
return u"%s | %s | %s" % (self.content_type.app_label, self.content_type, self.name)
class Group(models.Model):
"""Groups are a generic way of categorizing users to apply permissions, or some other label, to those users. A user can belong to any number of groups.
A user in a group automatically has all the permissions granted to that group. For example, if the group Site editors has the permission can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to apply some label, or extended functionality, to them. For example, you could create a group 'Special users', and you could write code that would do special things to those users -- such as giving them access to a members-only portion of your site, or sending them members-only e-mail messages.
"""
name = models.CharField(_('name'), max_length=80, unique=True)
permissions = models.ManyToManyField(Permission, verbose_name=_('permissions'), blank=True, filter_interface=models.HORIZONTAL)
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
ordering = ('name',)
class Admin:
search_fields = ('name',)
def __unicode__(self):
return self.name
class UserManager(models.Manager):
def create_user(self, username, email, password=None):
"Creates and saves a User with the given username, e-mail and password."
now = datetime.datetime.now()
user = self.model(None, username, '', '', email.strip().lower(), 'placeholder', False, True, False, now, now)
if password:
user.set_password(password)
else:
user.set_unusable_password()
user.save()
return user
def create_superuser(self, username, email, password):
u = self.create_user(username, email, password)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save()
def make_random_password(self, length=10, allowed_chars='abcdefghjkmnpqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ23456789'):
"Generates a random password with the given length and given allowed_chars"
# Note that default value of allowed_chars does not have "I" or letters
# that look like it -- just to avoid confusion.
from random import choice
return ''.join([choice(allowed_chars) for i in range(length)])
class User(models.Model):
"""Users within the Django authentication system are represented by this model.
Username and password are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=30, unique=True, validator_list=[validators.isAlphaNumeric], help_text=_("Required. 30 characters or fewer. Alphanumeric characters only (letters, digits and underscores)."))
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('e-mail address'), blank=True)
password = models.CharField(_('password'), max_length=128, help_text=_("Use '[algo]$[salt]$[hexdigest]' or use the <a href=\"password/\">change password form</a>."))
is_staff = models.BooleanField(_('staff status'), default=False, help_text=_("Designates whether the user can log into this admin site."))
is_active = models.BooleanField(_('active'), default=True, help_text=_("Designates whether this user should be treated as active. Unselect this instead of deleting accounts."))
is_superuser = models.BooleanField(_('superuser status'), default=False, help_text=_("Designates that this user has all permissions without explicitly assigning them."))
last_login = models.DateTimeField(_('last login'), default=datetime.datetime.now)
date_joined = models.DateTimeField(_('date joined'), default=datetime.datetime.now)
groups = models.ManyToManyField(Group, verbose_name=_('groups'), blank=True,
help_text=_("In addition to the permissions manually assigned, this user will also get all permissions granted to each group he/she is in."))
user_permissions = models.ManyToManyField(Permission, verbose_name=_('user permissions'), blank=True, filter_interface=models.HORIZONTAL)
objects = UserManager()
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
ordering = ('username',)
class Admin:
fields = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}),
(_('Permissions'), {'fields': ('is_staff', 'is_active', 'is_superuser', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
(_('Groups'), {'fields': ('groups',)}),
)
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff')
list_filter = ('is_staff', 'is_superuser')
search_fields = ('username', 'first_name', 'last_name', 'email')
def __unicode__(self):
return self.username
def get_absolute_url(self):
return "/users/%s/" % urllib.quote(smart_str(self.username))
def is_anonymous(self):
"Always returns False. This is a way of comparing User objects to anonymous users."
return False
def is_authenticated(self):
"""Always return True. This is a way to tell if the user has been authenticated in templates.
"""
return True
def get_full_name(self):
"Returns the first_name plus the last_name, with a space in between."
full_name = u'%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def set_password(self, raw_password):
import random
algo = 'sha1'
salt = get_hexdigest(algo, str(random.random()), str(random.random()))[:5]
hsh = get_hexdigest(algo, salt, raw_password)
self.password = '%s$%s$%s' % (algo, salt, hsh)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
encryption formats behind the scenes.
"""
# Backwards-compatibility check. Older passwords won't include the
# algorithm or salt.
if '$' not in self.password:
is_correct = (self.password == get_hexdigest('md5', '', raw_password))
if is_correct:
# Convert the password to the new, more secure format.
self.set_password(raw_password)
self.save()
return is_correct
return check_password(raw_password, self.password)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = UNUSABLE_PASSWORD
def has_usable_password(self):
return self.password != UNUSABLE_PASSWORD
def get_group_permissions(self):
"""
Returns a list of permission strings that this user has through
his/her groups. This method queries all available auth backends.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
permissions.update(backend.get_group_permissions(self))
return permissions
def get_all_permissions(self):
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_all_permissions"):
permissions.update(backend.get_all_permissions(self))
return permissions
def has_perm(self, perm):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general.
"""
# Inactive users have no permissions.
if not self.is_active:
return False
# Superusers have all permissions.
if self.is_superuser:
return True
# Otherwise we need to check the backends.
for backend in auth.get_backends():
if hasattr(backend, "has_perm"):
if backend.has_perm(self, perm):
return True
return False
def has_perms(self, perm_list):
"""Returns True if the user has each of the specified permissions."""
for perm in perm_list:
if not self.has_perm(perm):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app
label. Uses pretty much the same logic as has_perm, above.
"""
if not self.is_active:
return False
if self.is_superuser:
return True
for backend in auth.get_backends():
if hasattr(backend, "has_module_perms"):
if backend.has_module_perms(self, app_label):
return True
return False
def get_and_delete_messages(self):
messages = []
for m in self.message_set.all():
messages.append(m.message)
m.delete()
return messages
def email_user(self, subject, message, from_email=None):
"Sends an e-mail to this User."
from django.core.mail import send_mail
send_mail(subject, message, from_email, [self.email])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
"""
if not hasattr(self, '_profile_cache'):
from django.conf import settings
if not settings.AUTH_PROFILE_MODULE:
raise SiteProfileNotAvailable
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
model = models.get_model(app_label, model_name)
self._profile_cache = model._default_manager.get(user__id__exact=self.id)
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
return self._profile_cache
class Message(models.Model):
"""
The message system is a lightweight way to queue messages for given
users. A message is associated with a User instance (so it is only
applicable for registered users). There's no concept of expiration or
timestamps. Messages are created by the Django admin after successful
actions. For example, "The poll Foo was created successfully." is a
message.
"""
user = models.ForeignKey(User)
message = models.TextField(_('message'))
def __unicode__(self):
return self.message
class AnonymousUser(object):
id = None
username = ''
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager()
_user_permissions = EmptyManager()
def __init__(self):
pass
def __unicode__(self):
return 'AnonymousUser'
def __str__(self):
return unicode(self).encode('utf-8')
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return 1 # instances always return the same hash value
def save(self):
raise NotImplementedError
def delete(self):
raise NotImplementedError
def set_password(self, raw_password):
raise NotImplementedError
def check_password(self, raw_password):
raise NotImplementedError
def _get_groups(self):
return self._groups
groups = property(_get_groups)
def _get_user_permissions(self):
return self._user_permissions
user_permissions = property(_get_user_permissions)
def has_perm(self, perm):
return False
def has_module_perms(self, module):
return False
def get_and_delete_messages(self):
return []
def is_anonymous(self):
return True
def is_authenticated(self):
return False
|
paulsmith/geodjango
|
django/contrib/auth/models.py
|
Python
|
bsd-3-clause
| 15,774
|
#!/usr/bin/env python
import argparse
import redis
import flask
import calendar
import dateutil.parser
from gevent.wsgi import WSGIServer
from flask import Flask, jsonify
from flask_cors import CORS, cross_origin
app = Flask(__name__)
CORS(app)
REDIS_POOL = None
@app.route('/')
@cross_origin()
def hello_world():
return 'OK'
@app.route('/search', methods=["POST", 'GET'])
@cross_origin()
def search():
redis_client = redis.Redis(connection_pool=REDIS_POOL)
return jsonify(redis_client.keys())
def process_targets(targets, redis_client):
result = []
for target in targets:
if '*' in target:
result.extend(redis_client.keys(target))
else:
result.append(target)
return result
@app.route('/query', methods=["POST", 'GET'])
def query():
request = flask.request.get_json()
response = []
stime = calendar.timegm(dateutil.parser.parse(request['range']['from']).timetuple())
etime = calendar.timegm(dateutil.parser.parse(request['range']['to']).timetuple())
redis_client = redis.Redis(connection_pool=REDIS_POOL)
targets = process_targets([t['target'] for t in request['targets']], redis_client)
for target in targets:
args = ['ts.range', target, int(stime), int(etime)]
if 'intervalMs' in request and request['intervalMs'] > 0 and request['intervalMs']/1000 > 1:
args += ['avg', int(round(request['intervalMs']/1000))]
print(args)
redis_resp = redis_client.execute_command(*args)
datapoints = [(x2.decode("ascii"), x1*1000) for x1, x2 in redis_resp]
response.append(dict(target=target, datapoints=datapoints))
return jsonify(response)
@app.route('/annotations')
def annotations():
return jsonify([])
def main():
global REDIS_POOL
parser = argparse.ArgumentParser()
parser.add_argument("--host", help="server address to listen to", default="0.0.0.0")
parser.add_argument("--port", help="port number to listen to", default=8080, type=int)
parser.add_argument("--redis-server", help="redis server address", default="localhost")
parser.add_argument("--redis-port", help="redis server port", default=6379, type=int)
args = parser.parse_args()
REDIS_POOL = redis.ConnectionPool(host=args.redis_server, port=args.redis_port)
http_server = WSGIServer(('', args.port), app)
http_server.serve_forever()
if __name__ == '__main__':
main()
|
danni-m/redis-tsdb
|
tools/GrafanaDatastoreServer.py
|
Python
|
agpl-3.0
| 2,440
|
# Copyright 2012 John Kleint
# This is free software, licensed under the MIT License; see LICENSE.txt.
"""BlameThrower analyzer unit tests."""
import unittest
from test import AnalyneTest
class AnalyzerTests(AnalyneTest):
def test_httpbin_pylint(self):
self.assert_analynes_equal('analyzers', 'pylint', 'httpbin')
def test_shove_pylint(self):
self.assert_analynes_equal('analyzers', 'pylint', 'shove')
def test_apricot_jslint(self):
self.assert_analynes_equal('analyzers', 'jslint', 'apricot')
def test_os_utils_findbugs(self):
self.assert_analynes_equal('analyzers', 'findbugs', 'os-utils')
if __name__ == "__main__":
unittest.main()
|
jkleint/blamethrower
|
test/analyzers/test_analyzers.py
|
Python
|
mit
| 696
|
"""
SoftLayer.tests.CLI.modules.server_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is a series of integration tests designed to test the complete
command line interface.
:license: MIT, see LICENSE for more details.
"""
try:
# Python 3.x compatibility
import builtins # NOQA
builtins_name = 'builtins'
except ImportError:
builtins_name = '__builtin__'
import mock
from SoftLayer.CLI import exceptions
from SoftLayer.CLI.server import create
from SoftLayer import testing
import json
import tempfile
class ServerCLITests(testing.TestCase):
def test_server_cancel_reasons(self):
result = self.run_command(['server', 'cancel-reasons'])
output = json.loads(result.output)
self.assertEqual(result.exit_code, 0)
self.assertEqual(len(output), 10)
@mock.patch('SoftLayer.HardwareManager'
'.get_available_dedicated_server_packages')
def test_server_create_options(self, packages):
packages.return_value = [(999, 'Chassis 999')]
result = self.run_command(['server', 'create-options', '999'])
expected = {
'cpu': [
{'Description': 'Dual Quad Core Pancake 200 - 1.60GHz',
'ID': 723},
{'Description': 'Dual Quad Core Pancake 200 - 1.80GHz',
'ID': 724}],
'datacenter': ['RANDOM_LOCATION'],
'disk': ['250_SATA_II', '500_SATA_II'],
'disk_controllers': ['None', 'RAID0'],
'dual nic': ['1000_DUAL', '100_DUAL', '10_DUAL'],
'memory': [4, 6],
'os (CENTOS)': ['CENTOS_6_64_LAMP', 'CENTOS_6_64_MINIMAL'],
'os (REDHAT)': ['REDHAT_6_64_LAMP', 'REDHAT_6_64_MINIMAL'],
'os (UBUNTU)': ['UBUNTU_12_64_LAMP', 'UBUNTU_12_64_MINIMAL'],
'os (WIN)': [
'WIN_2008-DC_64',
'WIN_2008-ENT_64',
'WIN_2008-STD-R2_64',
'WIN_2008-STD_64',
'WIN_2012-DC-HYPERV_64'],
'single nic': ['100', '1000']}
self.assertEqual(result.exit_code, 0)
self.assertEqual(json.loads(result.output), expected)
@mock.patch('SoftLayer.HardwareManager'
'.get_available_dedicated_server_packages')
def test_server_create_options_with_invalid_chassis(self, packages):
packages.return_value = [(998, 'Legacy Chassis')]
result = self.run_command(['server', 'create-options', '999'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.CLIAbort)
@mock.patch('SoftLayer.HardwareManager'
'.get_available_dedicated_server_packages')
@mock.patch('SoftLayer.HardwareManager.get_bare_metal_package_id')
def test_server_create_options_for_bmc(self, bmpi, packages):
packages.return_value = [(1099, 'Bare Metal Instance')]
bmpi.return_value = '1099'
result = self.run_command(['server', 'create-options', '1099'])
expected = {
'memory/cpu': [
{'cpu': ['2'], 'memory': '2'},
{'cpu': ['2', '4'], 'memory': '4'},
],
'datacenter': ['RANDOM_LOCATION'],
'disk': ['250_SATA_II', '500_SATA_II'],
'dual nic': ['1000_DUAL', '100_DUAL', '10_DUAL'],
'os (CENTOS)': ['CENTOS_6_64_LAMP', 'CENTOS_6_64_MINIMAL'],
'os (REDHAT)': ['REDHAT_6_64_LAMP', 'REDHAT_6_64_MINIMAL'],
'os (UBUNTU)': ['UBUNTU_12_64_LAMP', 'UBUNTU_12_64_MINIMAL'],
'os (WIN)': [
'WIN_2008-DC_64',
'WIN_2008-ENT_64',
'WIN_2008-STD-R2_64',
'WIN_2008-STD_64',
'WIN_2012-DC-HYPERV_64'],
'single nic': ['100', '1000']}
self.assertEqual(result.exit_code, 0)
self.assertEqual(json.loads(result.output), expected)
def test_server_details(self):
result = self.run_command(['server', 'detail', '1234',
'--passwords', '--price'])
expected = {
'status': 'ACTIVE',
'datacenter': 'TEST00',
'created': '2013-08-01 15:23:45',
'notes': 'These are test notes.',
'hostname': 'hardware-test1.test.sftlyr.ws',
'public_ip': '172.16.1.100',
'private_ip': '10.1.0.2',
'ipmi_ip': '10.1.0.3',
'price rate': 1.54,
'memory': 2048,
'cores': 2,
'ptr': '2.0.1.10.in-addr.arpa',
'os': 'Ubuntu',
'id': 1000,
'tags': ['test_tag'],
'users': ['root abc123'],
'vlans': [{'id': 9653, 'number': 1800, 'type': 'PRIVATE'},
{'id': 19082, 'number': 3672, 'type': 'PUBLIC'}],
'owner': 'chechu'
}
self.assertEqual(result.exit_code, 0)
self.assertEqual(json.loads(result.output), expected)
def test_list_servers(self):
result = self.run_command(['server', 'list', '--tag=openstack'])
expected = [
{
'datacenter': 'TEST00',
'primary_ip': '172.16.1.100',
'host': 'hardware-test1.test.sftlyr.ws',
'memory': 2048,
'cores': 2,
'id': 1000,
'backend_ip': '10.1.0.2',
'active_transaction': 'TXN_NAME',
'owner': 'chechu'
},
{
'datacenter': 'TEST00',
'primary_ip': '172.16.4.94',
'host': 'hardware-test2.test.sftlyr.ws',
'memory': 4096,
'cores': 4,
'id': 1001,
'backend_ip': '10.1.0.3',
'active_transaction': None,
'owner': 'chechu'
},
{
'datacenter': 'TEST00',
'primary_ip': '172.16.4.95',
'host': 'hardware-bad-memory.test.sftlyr.ws',
'memory': 0,
'cores': 4,
'id': 1002,
'backend_ip': '10.1.0.4',
'active_transaction': None,
'owner': 'chechu'
}
]
self.assertEqual(result.exit_code, 0)
self.assertEqual(json.loads(result.output), expected)
@mock.patch('SoftLayer.CLI.formatting.no_going_back')
@mock.patch('SoftLayer.HardwareManager.reload')
def test_server_reload(self, reload_mock, ngb_mock):
ngb_mock.return_value = False
# Check the positive case
result = self.run_command(['--really', 'server', 'reload', '12345',
'--key=4567'])
self.assertEqual(result.exit_code, 0)
reload_mock.assert_called_with(12345, None, [4567])
# Now check to make sure we properly call CLIAbort in the negative case
result = self.run_command(['server', 'reload', '12345'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.CLIAbort)
@mock.patch('SoftLayer.CLI.formatting.no_going_back')
@mock.patch('SoftLayer.HardwareManager.cancel_hardware')
def test_cancel_server(self, cancel_mock, ngb_mock):
ngb_mock.return_value = False
# Check the positive case
result = self.run_command(['--really', 'server', 'cancel', '12345',
'--reason=Test', '--comment=Test'])
self.assertEqual(result.exit_code, 0)
cancel_mock.assert_called_with(12345, "Test", "Test", False)
# Test
result = self.run_command(['server', 'cancel', '12345',
'--reason=Test', '--comment=Test'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.CLIAbort)
@mock.patch('SoftLayer.CLI.formatting.confirm')
def test_server_power_off(self, confirm_mock):
# Check the positive case
result = self.run_command(['--really', 'server', 'power-off', '12345'])
self.assert_called_with('SoftLayer_Hardware_Server', 'powerOff',
identifier=12345)
# Now check to make sure we properly call CLIAbort in the negative case
confirm_mock.return_value = False
result = self.run_command(['server', 'power-off', '12345'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.CLIAbort)
def test_server_reboot_default(self):
result = self.run_command(['--really', 'server', 'reboot', '12345'])
self.assertEqual(result.exit_code, 0)
self.assert_called_with('SoftLayer_Hardware_Server', 'rebootDefault',
identifier=12345)
def test_server_reboot_soft(self):
result = self.run_command(['--really', 'server', 'reboot', '12345',
'--soft'])
self.assertEqual(result.exit_code, 0)
self.assert_called_with('SoftLayer_Hardware_Server', 'rebootSoft',
identifier=12345)
def test_server_reboot_hard(self):
result = self.run_command(['--really', 'server', 'reboot', '12345',
'--hard'])
self.assertEqual(result.exit_code, 0)
self.assert_called_with('SoftLayer_Hardware_Server', 'rebootHard',
identifier=12345)
@mock.patch('SoftLayer.CLI.formatting.confirm')
def test_server_reboot_negative(self, confirm_mock):
confirm_mock.return_value = False
result = self.run_command(['server', 'reboot', '12345'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.CLIAbort)
def test_server_power_on(self):
result = self.run_command(['--really', 'server', 'power-on', '12345'])
self.assertEqual(result.exit_code, 0)
self.assert_called_with('SoftLayer_Hardware_Server', 'powerOn',
identifier=12345)
def test_server_power_cycle(self):
result = self.run_command(['--really', 'server', 'power-cycle',
'12345'])
self.assertEqual(result.exit_code, 0)
self.assert_called_with('SoftLayer_Hardware_Server', 'powerCycle',
identifier=12345)
@mock.patch('SoftLayer.CLI.formatting.confirm')
def test_server_power_cycle_negative(self, confirm_mock):
confirm_mock.return_value = False
result = self.run_command(['server', 'power-cycle', '12345'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.CLIAbort)
def test_nic_edit_server(self):
result = self.run_command(['server', 'nic-edit', '12345', 'public',
'--speed=100'])
self.assertEqual(result.exit_code, 0)
self.assert_called_with('SoftLayer_Hardware_Server',
'setPublicNetworkInterfaceSpeed',
args=(100,),
identifier=12345)
@mock.patch('SoftLayer.HardwareManager'
'.get_available_dedicated_server_packages')
def test_list_chassis_server(self, packages):
packages.return_value = [(1, 'Chassis 1', 'Some chassis'),
(2, 'Chassis 2', 'Another chassis')]
result = self.run_command(['server', 'list-chassis'])
expected = [{'chassis': 'Chassis 1', 'code': 1},
{'chassis': 'Chassis 2', 'code': 2}]
self.assertEqual(result.exit_code, 0)
self.assertEqual(json.loads(result.output), expected)
@mock.patch('SoftLayer.HardwareManager.verify_order')
def test_create_server_test_flag(self, verify_mock):
verify_mock.return_value = {
'prices': [
{
'recurringFee': 0.0,
'setupFee': 0.0,
'item': {'description': 'First Item'},
},
{
'recurringFee': 25.0,
'setupFee': 0.0,
'item': {'description': 'Second Item'},
}
]
}
result = self.run_command(['server', 'create',
'--chassis=999',
'--hostname=test',
'--domain=example.com',
'--datacenter=TEST00',
'--cpu=4',
'--network=100',
'--disk=250_SATA_II',
'--disk=250_SATA_II',
'--os=UBUNTU_12_64_MINIMAL',
'--memory=4',
'--controller=RAID0',
'--test',
'--key=1234',
'--key=456',
'--vlan-public=10234',
'--vlan-private=20468',
'--postinstall='
'http://somescript.foo/myscript.sh',
],
fmt='raw')
self.assertEqual(result.exit_code, 0)
self.assertIn("First Item", result.output)
self.assertIn("Second Item", result.output)
self.assertIn("Total monthly cost", result.output)
@mock.patch('SoftLayer.HardwareManager.verify_order')
def test_create_server_test_no_disk(self, verify_mock):
verify_mock.return_value = {
'prices': [
{
'recurringFee': 0.0,
'setupFee': 0.0,
'item': {'description': 'First Item'},
},
{
'recurringFee': 25.0,
'setupFee': 0.0,
'item': {'description': 'Second Item'},
}
]
}
result = self.run_command(['server', 'create',
'--chassis=999',
'--hostname=test',
'--domain=example.com',
'--datacenter=TEST00',
'--cpu=4',
'--network=100',
'--os=UBUNTU_12_64_MINIMAL',
'--memory=4',
'--controller=RAID0',
'--test',
'--key=1234',
'--key=456',
'--vlan-public=10234',
'--vlan-private=20468',
'--postinstall='
'http://somescript.foo/myscript.sh',
],
fmt='raw')
self.assertEqual(result.exit_code, 0)
@mock.patch('SoftLayer.HardwareManager.verify_order')
def test_create_server_test_no_disk_no_raid(self, verify_mock):
verify_mock.return_value = {
'prices': [
{
'recurringFee': 0.0,
'setupFee': 0.0,
'item': {'description': 'First Item'},
},
{
'recurringFee': 25.0,
'setupFee': 0.0,
'item': {'description': 'Second Item'},
}
]
}
result = self.run_command(['server', 'create',
'--chassis=999',
'--hostname=test',
'--domain=example.com',
'--datacenter=TEST00',
'--cpu=4',
'--network=100',
'--os=UBUNTU_12_64_MINIMAL',
'--memory=4',
'--test',
'--vlan-public=10234',
'--vlan-private=20468',
],
fmt='raw')
self.assertEqual(result.exit_code, 0)
@mock.patch('SoftLayer.HardwareManager.place_order')
def test_create_server(self, order_mock):
order_mock.return_value = {
'orderId': 98765,
'orderDate': '2013-08-02 15:23:47'
}
result = self.run_command(['--really', 'server', 'create',
'--chassis=999',
'--hostname=test',
'--domain=example.com',
'--datacenter=TEST00',
'--cpu=4',
'--network=100',
'--os=UBUNTU_12_64_MINIMAL',
'--memory=4',
'--vlan-public=10234',
'--vlan-private=20468',
])
self.assertEqual(result.exit_code, 0)
self.assertEqual(json.loads(result.output),
{'id': 98765, 'created': '2013-08-02 15:23:47'})
def test_create_server_missing_required(self):
# This is missing a required argument
result = self.run_command(['server', 'create',
# Note: no chassis id
'--hostname=test',
'--domain=example.com',
'--datacenter=TEST00',
'--cpu=4',
'--network=100',
'--os=UBUNTU_12_64_MINIMAL',
'--memory=4',
])
self.assertEqual(result.exit_code, 1)
self.assertIsInstance(result.exception, SystemExit)
@mock.patch('SoftLayer.CLI.template.export_to_template')
def test_create_server_with_export(self, export_to_template):
result = self.run_command(['server', 'create',
# Note: no chassis id
'--chassis=999',
'--hostname=test',
'--domain=example.com',
'--datacenter=TEST00',
'--cpu=4',
'--memory=4',
'--network=100',
'--os=UBUNTU_12_64_MINIMAL',
'--key=1234',
'--export=/path/to/test_file.txt',
])
self.assertEqual(result.exit_code, 0)
self.assertIn("Successfully exported options to a template file.",
result.output)
export_to_template.assert_called_with('/path/to/test_file.txt',
{'domain': 'example.com',
'san': False,
'dedicated': False,
'private': False,
'disk': (),
'userdata': None,
'network': '100',
'billing': 'monthly',
'userfile': None,
'hostname': 'test',
'template': None,
'memory': 4,
'test': False,
'postinstall': None,
'controller': None,
'chassis': '999',
'key': ('1234',),
'vlan_private': None,
'wait': None,
'datacenter': 'TEST00',
'os': 'UBUNTU_12_64_MINIMAL',
'cpu': 4,
'vlan_public': None},
exclude=['wait', 'test'])
@mock.patch('SoftLayer.HardwareManager'
'.get_available_dedicated_server_packages')
@mock.patch('SoftLayer.HardwareManager.get_bare_metal_package_id')
@mock.patch('SoftLayer.HardwareManager.verify_order')
def test_create_server_test_for_bmc(self, verify_mock, bmpi, packages):
packages.return_value = [(1099, 'Bare Metal Instance', 'BMC')]
bmpi.return_value = '1099'
# First, test the --test flag
verify_mock.return_value = {
'prices': [
{
'recurringFee': 0.0,
'setupFee': 0.0,
'item': {'description': 'First Item'},
},
{
'recurringFee': 25.0,
'setupFee': 0.0,
'item': {'description': 'Second Item'},
}
]
}
result = self.run_command(['server', 'create',
'--chassis=1099',
'--hostname=test',
'--domain=example.com',
'--datacenter=TEST00',
'--cpu=2',
'--memory=2',
'--network=100',
'--disk=250_SATA_II',
'--disk=250_SATA_II',
'--os=UBUNTU_12_64_MINIMAL',
'--vlan-public=10234',
'--vlan-private=20468',
'--key=1234',
'--key=456',
'--test',
'--postinstall='
'http://somescript.foo/myscript.sh',
'--billing=hourly',
])
self.assertEqual(result.exit_code, 0)
self.assertIn("First Item", result.output)
self.assertIn("Second Item", result.output)
self.assertIn("Total monthly cost", result.output)
@mock.patch('SoftLayer.HardwareManager'
'.get_available_dedicated_server_packages')
@mock.patch('SoftLayer.HardwareManager.get_bare_metal_package_id')
@mock.patch('SoftLayer.HardwareManager.place_order')
def test_create_server_for_bmc(self, order_mock, bmpi, packages):
order_mock.return_value = {
'orderId': 98765,
'orderDate': '2013-08-02 15:23:47'
}
result = self.run_command(['--really', 'server', 'create',
'--chassis=1099',
'--hostname=test',
'--domain=example.com',
'--datacenter=TEST00',
'--cpu=4',
'--memory=4',
'--network=100',
'--disk=250_SATA_II',
'--disk=250_SATA_II',
'--os=UBUNTU_12_64_MINIMAL',
'--vlan-public=10234',
'--vlan-private=20468',
'--key=1234',
'--key=456',
'--billing=hourly',
])
self.assertEqual(result.exit_code, 0)
self.assertEqual(json.loads(result.output),
{'id': 98765, 'created': '2013-08-02 15:23:47'})
def test_edit_server_userdata_and_file(self):
# Test both userdata and userfile at once
with tempfile.NamedTemporaryFile() as userfile:
result = self.run_command(['server', 'edit', '1000',
'--hostname=hardware-test1',
'--domain=test.sftlyr.ws',
'--userdata=My data',
'--userfile=%s' % userfile.name])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.ArgumentError)
def test_edit_server_userdata(self):
result = self.run_command(['server', 'edit', '1000',
'--hostname=hardware-test1',
'--domain=test.sftlyr.ws',
'--userdata=My data'])
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output, "")
self.assert_called_with('SoftLayer_Hardware_Server', 'editObject',
args=({'domain': 'test.sftlyr.ws',
'hostname': 'hardware-test1'},),
identifier=1000)
@mock.patch('SoftLayer.HardwareManager.edit')
def test_edit_server_failed(self, edit_mock):
edit_mock.return_value = False
result = self.run_command(['server', 'edit', '1000',
'--hostname=hardware-test1',
'--domain=test.sftlyr.ws',
'--userdata=My data'])
self.assertEqual(result.exit_code, 2)
self.assertEqual(result.output, "")
edit_mock.assert_called_with(1000,
userdata='My data',
domain='test.sftlyr.ws',
hostname='hardware-test1')
def test_edit_server_userfile(self):
with tempfile.NamedTemporaryFile() as userfile:
userfile.write(b"some data")
userfile.flush()
result = self.run_command(['server', 'edit', '1000',
'--userfile=%s' % userfile.name])
self.assertEqual(result.exit_code, 0)
self.assertEqual(result.output, "")
self.assert_called_with('SoftLayer_Hardware_Server',
'setUserMetadata',
args=(['some data'],),
identifier=1000)
def test_get_default_value_returns_none_for_unknown_category(self):
option_mock = {'categories': {'cat1': []}}
output = create._get_default_value(option_mock, 'nope')
self.assertEqual(None, output)
|
cloudify-cosmo/softlayer-python
|
SoftLayer/tests/CLI/modules/server_tests.py
|
Python
|
mit
| 27,613
|
#
# Copyright (c) 2008--2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
from config_common.rhn_log import log_debug, die
import handler_base
class Handler(handler_base.HandlerBase):
def run(self):
log_debug(2)
r = self.repository
files = r.list_files()
if not files:
die(1, "No managed files.")
label = "Config Channel"
maxlen = max(map(lambda s: len(s[0]), files))
maxlen = max(maxlen, len(label)) + 2
print "DoFoS %*s %s" % (maxlen, label, "File")
for file in files:
# checking to see if the filetype is in the 'file' entry,
# and if it is and that type is '1', it is a file
if (len(file) < 3) or file[2] == 1:
print "F %*s %s" % (maxlen, file[0], file[1])
elif file[2] == 2 :
# the filetype is a directory
print "D %*s %s" % (maxlen, file[0], file[1])
else:
print "S %*s %s" % (maxlen, file[0], file[1])
|
colloquium/spacewalk
|
client/tools/rhncfg/config_client/rhncfgcli_list.py
|
Python
|
gpl-2.0
| 1,604
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------
# Filename: MAX7219fonts.py
# -----------------------------------------------------------
# Fonts data for use by the MAX7219array.py library
#
# v1.0
# JLC Archibald
# -----------------------------------------------------------
# Structure:
# - each font is a list of 256 characters
# - each character represented as an 8x8 binary bitmap:
# - each character's data comprises an 8-byte list
# - each byte represents one column of the character
# - the bytes are in column order left-to-right
# - the bits in each byte are in row order: MSB (bottom row)
# to LSB (top row)
# - some fonts only have non-zero (ie non-blank) data for
# characters in the range 0x20 to 0x7F
# -----------------------------------------------------------
# Each font's source is listed below, although some have had
# to be transposed to the above structure
# -----------------------------------------------------------
# Additional 8x8 fonts can be added as follows:
# - add additional list data at the bottom of this file
# - ensure that the file structure is maintained, and
# that the new font data is in the same form
# - include zero data for any non-repesented characters, so
# that every font variable is a 256x8 nested list
# - import the variable names representing the additional
# fonts into the MAX7219array.py library, and into the
# main script where they will be used as arguments to
# the library functions
# -----------------------------------------------------------
#
# Fonts data begins here:
# -----------------------------------------------------------
# Bit patterns for the CP437 font
# See https://en.wikipedia.org/wiki/Code_page_437
# Source: max7219 module by RM Hull
# (see https://github.com/rm-hull/max7219)
AKTOS_TINY_FONT = {
u"a": [0, 32, 84, 84, 84, 120, 0, 0],
u"b": [0, 127, 80, 72, 72, 48, 0, 0],
u"c": [0, 56, 68, 68, 68, 0, 0, 0],
u"ç": [0, 56, 68, 196, 68, 0, 0, 0],
u"d": [0, 48, 72, 72, 80, 127, 0, 0],
u"e": [56, 84, 84, 84, 24, 0, 0, 0],
u"f": [0, 16, 252, 18, 2, 4, 0, 0],
u"g": [0, 24, 164, 164, 164, 124, 0, 0],
u"ğ": [0, 24, 165, 165, 165, 125, 0, 0],
u"h": [0, 0, 127, 16, 8, 8, 112, 0],
u"ı": [0, 0, 0, 124, 0, 0, 0, 0],
u"i": [0, 0, 0, 122, 0, 0, 0, 0],
u"j": [0, 0, 128, 132, 125, 0, 0, 0],
u"k": [0, 0, 127, 16, 40, 68, 0, 0],
u"l": [0, 0, 1, 63, 64, 0, 0, 0],
u"m": [124, 4, 24, 4, 120, 0, 0, 0],
u"n": [0, 0, 124, 8, 4, 4, 120, 0],
u"o": [0, 56, 68, 68, 68, 56, 0, 0],
u"ö": [0, 56, 69, 68, 69, 56, 0, 0],
u"p": [0, 0, 252, 36, 36, 24, 0, 0],
u"r": [0, 0, 124, 8, 4, 4, 0, 0],
u"s": [0, 72, 84, 84, 84, 32, 0, 0],
u"ş": [0, 0, 0, 72, 84, 212, 84, 32],
u"t": [0, 0, 4, 63, 68, 0, 0, 0],
u"u": [0, 0, 60, 64, 64, 32, 124, 0],
u"ü": [0, 0, 60, 65, 64, 33, 124, 0],
u"v": [0, 28, 32, 64, 32, 28, 0, 0],
u"w": [0, 60, 64, 48, 64, 60, 0, 0],
u"x": [0, 68, 40, 16, 40, 68, 0, 0],
u"y": [0, 28, 160, 160, 160, 124, 0, 0],
u"z": [68, 100, 84, 76, 68, 0, 0, 0],
u"A": [126, 9, 9, 9, 126, 0, 0, 0],
u"E": [127, 73, 73, 65, 0, 0, 0, 0],
u"H": [127, 8, 8, 127, 0, 0, 0, 0],
u"0": [0, 62, 81, 73, 69, 62, 0, 0],
u"1": [0, 0, 66, 127, 64, 0, 0, 0],
u"2": [0, 0, 66, 97, 81, 73, 70, 0],
u"3": [0, 0, 33, 65, 69, 75, 49, 0],
u"4": [0, 24, 20, 18, 127, 16, 0, 0],
u"5": [0, 0, 39, 69, 69, 69, 57, 0],
u"6": [0, 60, 74, 73, 73, 48, 0, 0],
u"7": [0, 0, 1, 113, 9, 5, 3, 0],
u"8": [0, 0, 54, 73, 73, 73, 54, 0],
u"9": [0, 0, 6, 73, 73, 41, 30, 0],
}
# -----------------------------------------------------------
# Bit patterns for SINCLAIRS_FONT
# (based on the character set from the Sinclair ZX Spectrum)
# Source: www.henningkarlsen.com/electronics/r_fonts.php
# Transposed by JLCArchibald
# Note: Only contains characters 0x20 - 0x7E inclusive
# All others will appear as blanks
SINCLAIRS_FONT = [
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x00
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x01
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x02
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x03
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x04
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x05
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x06
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x07
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x08
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x09
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x0A
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x0B
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x0C
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x0D
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x0E
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x0F
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x10
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x11
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x12
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x13
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x14
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x15
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x16
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x17
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x18
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x19
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x1A
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x1B
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x1C
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x1D
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x1E
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x1F
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # ' '
[ 0x00, 0x00, 0x00, 0x00, 0x5F, 0x00, 0x00, 0x00 ], # '!'
[ 0x00, 0x00, 0x00, 0x03, 0x00, 0x03, 0x00, 0x00 ], # '"'
[ 0x00, 0x24, 0x7E, 0x24, 0x24, 0x7E, 0x24, 0x00 ], # '#'
[ 0x00, 0x2E, 0x2A, 0x7F, 0x2A, 0x3A, 0x00, 0x00 ], # '$'
[ 0x00, 0x46, 0x26, 0x10, 0x08, 0x64, 0x62, 0x00 ], # '%'
[ 0x00, 0x20, 0x54, 0x4A, 0x54, 0x20, 0x50, 0x00 ], # '&'
[ 0x00, 0x00, 0x00, 0x04, 0x02, 0x00, 0x00, 0x00 ], # '''
[ 0x00, 0x00, 0x00, 0x3C, 0x42, 0x00, 0x00, 0x00 ], # '('
[ 0x00, 0x00, 0x00, 0x42, 0x3C, 0x00, 0x00, 0x00 ], # ')'
[ 0x00, 0x10, 0x54, 0x38, 0x54, 0x10, 0x00, 0x00 ], # '*'
[ 0x00, 0x10, 0x10, 0x7C, 0x10, 0x10, 0x00, 0x00 ], # '+'
[ 0x00, 0x00, 0x00, 0x80, 0x60, 0x00, 0x00, 0x00 ], # '
[ 0x00, 0x10, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00 ], # '-'
[ 0x00, 0x00, 0x00, 0x60, 0x60, 0x00, 0x00, 0x00 ], # '.'
[ 0x00, 0x40, 0x20, 0x10, 0x08, 0x04, 0x00, 0x00 ], # '/'
[ 0x3C, 0x62, 0x52, 0x4A, 0x46, 0x3C, 0x00, 0x00 ], # '0'
[ 0x44, 0x42, 0x7E, 0x40, 0x40, 0x00, 0x00, 0x00 ], # '1'
[ 0x64, 0x52, 0x52, 0x52, 0x52, 0x4C, 0x00, 0x00 ], # '2'
[ 0x24, 0x42, 0x42, 0x4A, 0x4A, 0x34, 0x00, 0x00 ], # '3'
[ 0x30, 0x28, 0x24, 0x7E, 0x20, 0x20, 0x00, 0x00 ], # '4'
[ 0x2E, 0x4A, 0x4A, 0x4A, 0x4A, 0x32, 0x00, 0x00 ], # '5'
[ 0x3C, 0x4A, 0x4A, 0x4A, 0x4A, 0x30, 0x00, 0x00 ], # '6'
[ 0x02, 0x02, 0x62, 0x12, 0x0A, 0x06, 0x00, 0x00 ], # '7'
[ 0x34, 0x4A, 0x4A, 0x4A, 0x4A, 0x34, 0x00, 0x00 ], # '8'
[ 0x0C, 0x52, 0x52, 0x52, 0x52, 0x3C, 0x00, 0x00 ], # '9'
[ 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00 ], # ':'
[ 0x00, 0x00, 0x80, 0x64, 0x00, 0x00, 0x00, 0x00 ], # ';'
[ 0x00, 0x00, 0x10, 0x28, 0x44, 0x00, 0x00, 0x00 ], # '<'
[ 0x00, 0x28, 0x28, 0x28, 0x28, 0x28, 0x00, 0x00 ], # '='
[ 0x00, 0x00, 0x44, 0x28, 0x10, 0x00, 0x00, 0x00 ], # '>'
[ 0x00, 0x04, 0x02, 0x02, 0x52, 0x0A, 0x04, 0x00 ], # '?'
[ 0x00, 0x3C, 0x42, 0x5A, 0x56, 0x5A, 0x1C, 0x00 ], # '@'
[ 0x7C, 0x12, 0x12, 0x12, 0x12, 0x7C, 0x00, 0x00 ], # 'A'
[ 0x7E, 0x4A, 0x4A, 0x4A, 0x4A, 0x34, 0x00, 0x00 ], # 'B'
[ 0x3C, 0x42, 0x42, 0x42, 0x42, 0x24, 0x00, 0x00 ], # 'C'
[ 0x7E, 0x42, 0x42, 0x42, 0x24, 0x18, 0x00, 0x00 ], # 'D'
[ 0x7E, 0x4A, 0x4A, 0x4A, 0x4A, 0x42, 0x00, 0x00 ], # 'E'
[ 0x7E, 0x0A, 0x0A, 0x0A, 0x0A, 0x02, 0x00, 0x00 ], # 'F'
[ 0x3C, 0x42, 0x42, 0x52, 0x52, 0x34, 0x00, 0x00 ], # 'G'
[ 0x7E, 0x08, 0x08, 0x08, 0x08, 0x7E, 0x00, 0x00 ], # 'H'
[ 0x00, 0x42, 0x42, 0x7E, 0x42, 0x42, 0x00, 0x00 ], # 'I'
[ 0x30, 0x40, 0x40, 0x40, 0x40, 0x3E, 0x00, 0x00 ], # 'J'
[ 0x7E, 0x08, 0x08, 0x14, 0x22, 0x40, 0x00, 0x00 ], # 'K'
[ 0x7E, 0x40, 0x40, 0x40, 0x40, 0x40, 0x00, 0x00 ], # 'L'
[ 0x7E, 0x04, 0x08, 0x08, 0x04, 0x7E, 0x00, 0x00 ], # 'M'
[ 0x7E, 0x04, 0x08, 0x10, 0x20, 0x7E, 0x00, 0x00 ], # 'N'
[ 0x3C, 0x42, 0x42, 0x42, 0x42, 0x3C, 0x00, 0x00 ], # 'O'
[ 0x7E, 0x12, 0x12, 0x12, 0x12, 0x0C, 0x00, 0x00 ], # 'P'
[ 0x3C, 0x42, 0x52, 0x62, 0x42, 0x3C, 0x00, 0x00 ], # 'Q'
[ 0x7E, 0x12, 0x12, 0x12, 0x32, 0x4C, 0x00, 0x00 ], # 'R'
[ 0x24, 0x4A, 0x4A, 0x4A, 0x4A, 0x30, 0x00, 0x00 ], # 'S'
[ 0x02, 0x02, 0x02, 0x7E, 0x02, 0x02, 0x02, 0x00 ], # 'T'
[ 0x3E, 0x40, 0x40, 0x40, 0x40, 0x3E, 0x00, 0x00 ], # 'U'
[ 0x1E, 0x20, 0x40, 0x40, 0x20, 0x1E, 0x00, 0x00 ], # 'V'
[ 0x3E, 0x40, 0x20, 0x20, 0x40, 0x3E, 0x00, 0x00 ], # 'W'
[ 0x42, 0x24, 0x18, 0x18, 0x24, 0x42, 0x00, 0x00 ], # 'X'
[ 0x02, 0x04, 0x08, 0x70, 0x08, 0x04, 0x02, 0x00 ], # 'Y'
[ 0x42, 0x62, 0x52, 0x4A, 0x46, 0x42, 0x00, 0x00 ], # 'Z'
[ 0x00, 0x00, 0x7E, 0x42, 0x42, 0x00, 0x00, 0x00 ], # '['
[ 0x00, 0x04, 0x08, 0x10, 0x20, 0x40, 0x00, 0x00 ], # backslash
[ 0x00, 0x00, 0x42, 0x42, 0x7E, 0x00, 0x00, 0x00 ], # '
[ 0x00, 0x08, 0x04, 0x7E, 0x04, 0x08, 0x00, 0x00 ], # '^'
[ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00 ], # '_'
[ 0x3C, 0x42, 0x99, 0xA5, 0xA5, 0x81, 0x42, 0x3C ], # '`'
[ 0x00, 0x20, 0x54, 0x54, 0x54, 0x78, 0x00, 0x00 ], # 'a'
[ 0x00, 0x7E, 0x48, 0x48, 0x48, 0x30, 0x00, 0x00 ], # 'b'
[ 0x00, 0x00, 0x38, 0x44, 0x44, 0x44, 0x00, 0x00 ], # 'c'
[ 0x00, 0x30, 0x48, 0x48, 0x48, 0x7E, 0x00, 0x00 ], # 'd'
[ 0x00, 0x38, 0x54, 0x54, 0x54, 0x48, 0x00, 0x00 ], # 'e'
[ 0x00, 0x00, 0x00, 0x7C, 0x0A, 0x02, 0x00, 0x00 ], # 'f'
[ 0x00, 0x18, 0xA4, 0xA4, 0xA4, 0xA4, 0x7C, 0x00 ], # 'g'
[ 0x00, 0x7E, 0x08, 0x08, 0x08, 0x70, 0x00, 0x00 ], # 'h'
[ 0x00, 0x00, 0x00, 0x48, 0x7A, 0x40, 0x00, 0x00 ], # 'i'
[ 0x00, 0x00, 0x40, 0x80, 0x80, 0x7A, 0x00, 0x00 ], # 'j'
[ 0x00, 0x7E, 0x18, 0x24, 0x40, 0x00, 0x00, 0x00 ], # 'k'
[ 0x00, 0x00, 0x00, 0x3E, 0x40, 0x40, 0x00, 0x00 ], # 'l'
[ 0x00, 0x7C, 0x04, 0x78, 0x04, 0x78, 0x00, 0x00 ], # 'm'
[ 0x00, 0x7C, 0x04, 0x04, 0x04, 0x78, 0x00, 0x00 ], # 'n'
[ 0x00, 0x38, 0x44, 0x44, 0x44, 0x38, 0x00, 0x00 ], # 'o'
[ 0x00, 0xFC, 0x24, 0x24, 0x24, 0x18, 0x00, 0x00 ], # 'p'
[ 0x00, 0x18, 0x24, 0x24, 0x24, 0xFC, 0x80, 0x00 ], # 'q'
[ 0x00, 0x00, 0x78, 0x04, 0x04, 0x04, 0x00, 0x00 ], # 'r'
[ 0x00, 0x48, 0x54, 0x54, 0x54, 0x20, 0x00, 0x00 ], # 's'
[ 0x00, 0x00, 0x04, 0x3E, 0x44, 0x40, 0x00, 0x00 ], # 't'
[ 0x00, 0x3C, 0x40, 0x40, 0x40, 0x3C, 0x00, 0x00 ], # 'u'
[ 0x00, 0x0C, 0x30, 0x40, 0x30, 0x0C, 0x00, 0x00 ], # 'v'
[ 0x00, 0x3C, 0x40, 0x38, 0x40, 0x3C, 0x00, 0x00 ], # 'w'
[ 0x00, 0x44, 0x28, 0x10, 0x28, 0x44, 0x00, 0x00 ], # 'x'
[ 0x00, 0x1C, 0xA0, 0xA0, 0xA0, 0x7C, 0x00, 0x00 ], # 'y'
[ 0x00, 0x44, 0x64, 0x54, 0x4C, 0x44, 0x00, 0x00 ], # 'z'
[ 0x00, 0x08, 0x08, 0x76, 0x42, 0x42, 0x00, 0x00 ], # '{'
[ 0x00, 0x00, 0x00, 0x7E, 0x00, 0x00, 0x00, 0x00 ], # '|'
[ 0x00, 0x42, 0x42, 0x76, 0x08, 0x08, 0x00, 0x00 ], # '}'
[ 0x00, 0x00, 0x04, 0x02, 0x04, 0x02, 0x00, 0x00 ], # '~'
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x7F
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x80
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x81
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x82
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x83
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x84
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x85
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x86
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x87
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x88
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x89
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x8A
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x8B
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x8C
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x8D
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x8E
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x8F
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x90
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x91
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x92
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x93
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x94
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x95
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x96
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x97
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x98
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x99
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x9A
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x9B
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x9C
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x9D
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x9E
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x9F
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA0
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA1
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA2
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA3
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA4
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA5
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA6
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA7
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA8
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA9
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xAA
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xAB
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xAC
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xAD
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xAE
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xAF
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB0
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB1
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB2
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB3
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB4
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB5
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB6
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB7
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB8
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB9
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xBA
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xBB
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xBC
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xBD
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xBE
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xBF
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC0
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC1
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC2
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC3
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC4
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC5
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC6
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC7
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC8
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC9
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xCA
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xCB
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xCC
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xCD
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xCE
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xCF
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD0
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD1
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD2
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD3
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD4
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD5
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD6
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD7
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD8
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD9
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xDA
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xDB
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xDC
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xDD
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xDE
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xDF
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE0
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE1
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE2
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE3
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE4
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE5
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE6
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE7
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE8
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE9
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xEA
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xEB
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xEC
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xED
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xEE
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xEF
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF0
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF1
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF2
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF3
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF4
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF5
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF6
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF7
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF8
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF9
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xFA
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xFB
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xFC
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xFD
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xFE
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xFF
]; # end of SINCLAIRS_FONT
# -----------------------------------------------------------
# Bit patterns for LCD_FONT
# Source: www.avrfreaks.net/index.php?name=PNphpBB2&file=viewtopic&t=69880
# Transposed by JLCArchibald
# Note: Only contains characters 0x20 - 0x7F inclusive
# All others will appear as blanks
LCD_FONT = [
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x00
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x01
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x02
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x03
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x04
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x05
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x06
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x07
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x08
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x09
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x0A
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x0B
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x0C
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x0D
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x0E
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x0F
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x10
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x11
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x12
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x13
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x14
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x15
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x16
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x17
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x18
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x19
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x1A
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x1B
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x1C
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x1D
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x1E
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x1F
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # ' '
[ 0x00, 0x00, 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00 ], # '!'
[ 0x00, 0x03, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00 ], # '"'
[ 0x14, 0x7f, 0x14, 0x7f, 0x14, 0x00, 0x00, 0x00 ], # '#'
[ 0x24, 0x2a, 0x7f, 0x2a, 0x12, 0x00, 0x00, 0x00 ], # '$'
[ 0x23, 0x13, 0x08, 0x64, 0x62, 0x00, 0x00, 0x00 ], # '%'
[ 0x36, 0x49, 0x55, 0x22, 0x50, 0x00, 0x00, 0x00 ], # '&'
[ 0x00, 0x05, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00 ], # '''
[ 0x00, 0x1c, 0x22, 0x41, 0x00, 0x00, 0x00, 0x00 ], # '('
[ 0x00, 0x41, 0x22, 0x1c, 0x00, 0x00, 0x00, 0x00 ], # ')'
[ 0x14, 0x08, 0x3e, 0x08, 0x14, 0x00, 0x00, 0x00 ], # '*'
[ 0x08, 0x08, 0x3e, 0x08, 0x08, 0x00, 0x00, 0x00 ], # '+'
[ 0x00, 0x50, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00 ], # '
[ 0x08, 0x08, 0x08, 0x08, 0x08, 0x00, 0x00, 0x00 ], # '-'
[ 0x00, 0x60, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00 ], # '.'
[ 0x20, 0x10, 0x08, 0x04, 0x02, 0x00, 0x00, 0x00 ], # '/'
[ 0x3e, 0x51, 0x49, 0x45, 0x3e, 0x00, 0x00, 0x00 ], # '0'
[ 0x00, 0x42, 0x7f, 0x40, 0x00, 0x00, 0x00, 0x00 ], # '1'
[ 0x42, 0x61, 0x51, 0x49, 0x46, 0x00, 0x00, 0x00 ], # '2'
[ 0x21, 0x41, 0x45, 0x4b, 0x31, 0x00, 0x00, 0x00 ], # '3'
[ 0x18, 0x14, 0x12, 0x7f, 0x10, 0x00, 0x00, 0x00 ], # '4'
[ 0x27, 0x45, 0x45, 0x45, 0x39, 0x00, 0x00, 0x00 ], # '5'
[ 0x3c, 0x4a, 0x49, 0x49, 0x30, 0x00, 0x00, 0x00 ], # '6'
[ 0x01, 0x71, 0x09, 0x05, 0x03, 0x00, 0x00, 0x00 ], # '7'
[ 0x36, 0x49, 0x49, 0x49, 0x36, 0x00, 0x00, 0x00 ], # '8'
[ 0x06, 0x49, 0x49, 0x29, 0x1e, 0x00, 0x00, 0x00 ], # '9'
[ 0x00, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00 ], # ':'
[ 0x00, 0x56, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00 ], # ';'
[ 0x08, 0x14, 0x22, 0x41, 0x00, 0x00, 0x00, 0x00 ], # '<'
[ 0x14, 0x14, 0x14, 0x14, 0x14, 0x00, 0x00, 0x00 ], # '='
[ 0x00, 0x41, 0x22, 0x14, 0x08, 0x00, 0x00, 0x00 ], # '>'
[ 0x02, 0x01, 0x51, 0x09, 0x06, 0x00, 0x00, 0x00 ], # '?'
[ 0x32, 0x49, 0x79, 0x41, 0x3e, 0x00, 0x00, 0x00 ], # '@'
[ 0x7e, 0x11, 0x11, 0x11, 0x7e, 0x00, 0x00, 0x00 ], # 'A'
[ 0x7f, 0x49, 0x49, 0x49, 0x36, 0x00, 0x00, 0x00 ], # 'B'
[ 0x3e, 0x41, 0x41, 0x41, 0x22, 0x00, 0x00, 0x00 ], # 'C'
[ 0x7f, 0x41, 0x41, 0x22, 0x1c, 0x00, 0x00, 0x00 ], # 'D'
[ 0x7f, 0x49, 0x49, 0x49, 0x41, 0x00, 0x00, 0x00 ], # 'E'
[ 0x7f, 0x09, 0x09, 0x09, 0x01, 0x00, 0x00, 0x00 ], # 'F'
[ 0x3e, 0x41, 0x49, 0x49, 0x7a, 0x00, 0x00, 0x00 ], # 'G'
[ 0x7f, 0x08, 0x08, 0x08, 0x7f, 0x00, 0x00, 0x00 ], # 'H'
[ 0x00, 0x41, 0x7f, 0x41, 0x00, 0x00, 0x00, 0x00 ], # 'I'
[ 0x20, 0x40, 0x41, 0x3f, 0x01, 0x00, 0x00, 0x00 ], # 'J'
[ 0x7f, 0x08, 0x14, 0x22, 0x41, 0x00, 0x00, 0x00 ], # 'K'
[ 0x7f, 0x40, 0x40, 0x40, 0x40, 0x00, 0x00, 0x00 ], # 'L'
[ 0x7f, 0x02, 0x0c, 0x02, 0x7f, 0x00, 0x00, 0x00 ], # 'M'
[ 0x7f, 0x04, 0x08, 0x10, 0x7f, 0x00, 0x00, 0x00 ], # 'N'
[ 0x3e, 0x41, 0x41, 0x41, 0x3e, 0x00, 0x00, 0x00 ], # 'O'
[ 0x7f, 0x09, 0x09, 0x09, 0x06, 0x00, 0x00, 0x00 ], # 'P'
[ 0x3e, 0x41, 0x51, 0x21, 0x5e, 0x00, 0x00, 0x00 ], # 'Q'
[ 0x7f, 0x09, 0x19, 0x29, 0x46, 0x00, 0x00, 0x00 ], # 'R'
[ 0x46, 0x49, 0x49, 0x49, 0x31, 0x00, 0x00, 0x00 ], # 'S'
[ 0x01, 0x01, 0x7f, 0x01, 0x01, 0x00, 0x00, 0x00 ], # 'T'
[ 0x3f, 0x40, 0x40, 0x40, 0x3f, 0x00, 0x00, 0x00 ], # 'U'
[ 0x1f, 0x20, 0x40, 0x20, 0x1f, 0x00, 0x00, 0x00 ], # 'V'
[ 0x3f, 0x40, 0x38, 0x40, 0x3f, 0x00, 0x00, 0x00 ], # 'W'
[ 0x63, 0x14, 0x08, 0x14, 0x63, 0x00, 0x00, 0x00 ], # 'X'
[ 0x07, 0x08, 0x70, 0x08, 0x07, 0x00, 0x00, 0x00 ], # 'Y'
[ 0x61, 0x51, 0x49, 0x45, 0x43, 0x00, 0x00, 0x00 ], # 'Z'
[ 0x00, 0x7f, 0x41, 0x41, 0x00, 0x00, 0x00, 0x00 ], # '['
[ 0x02, 0x04, 0x08, 0x10, 0x20, 0x00, 0x00, 0x00 ], # backslash
[ 0x00, 0x41, 0x41, 0x7f, 0x00, 0x00, 0x00, 0x00 ], # '
[ 0x04, 0x02, 0x01, 0x02, 0x04, 0x00, 0x00, 0x00 ], # '^'
[ 0x40, 0x40, 0x40, 0x40, 0x40, 0x00, 0x00, 0x00 ], # '_'
[ 0x00, 0x01, 0x02, 0x04, 0x00, 0x00, 0x00, 0x00 ], # '`'
[ 0x20, 0x54, 0x54, 0x54, 0x78, 0x00, 0x00, 0x00 ], # 'a'
[ 0x7f, 0x48, 0x44, 0x44, 0x38, 0x00, 0x00, 0x00 ], # 'b'
[ 0x38, 0x44, 0x44, 0x44, 0x20, 0x00, 0x00, 0x00 ], # 'c'
[ 0x38, 0x44, 0x44, 0x48, 0x7f, 0x00, 0x00, 0x00 ], # 'd'
[ 0x38, 0x54, 0x54, 0x54, 0x18, 0x00, 0x00, 0x00 ], # 'e'
[ 0x08, 0x7e, 0x09, 0x01, 0x02, 0x00, 0x00, 0x00 ], # 'f'
[ 0x0c, 0x52, 0x52, 0x52, 0x3e, 0x00, 0x00, 0x00 ], # 'g'
[ 0x7f, 0x08, 0x04, 0x04, 0x78, 0x00, 0x00, 0x00 ], # 'h'
[ 0x00, 0x44, 0x7d, 0x40, 0x00, 0x00, 0x00, 0x00 ], # 'i'
[ 0x20, 0x40, 0x44, 0x3d, 0x00, 0x00, 0x00, 0x00 ], # 'j'
[ 0x7f, 0x10, 0x28, 0x44, 0x00, 0x00, 0x00, 0x00 ], # 'k'
[ 0x00, 0x41, 0x7f, 0x40, 0x00, 0x00, 0x00, 0x00 ], # 'l'
[ 0x7c, 0x04, 0x18, 0x04, 0x78, 0x00, 0x00, 0x00 ], # 'm'
[ 0x7c, 0x08, 0x04, 0x04, 0x78, 0x00, 0x00, 0x00 ], # 'n'
[ 0x38, 0x44, 0x44, 0x44, 0x38, 0x00, 0x00, 0x00 ], # 'o'
[ 0x7c, 0x14, 0x14, 0x14, 0x08, 0x00, 0x00, 0x00 ], # 'p'
[ 0x08, 0x14, 0x14, 0x18, 0x7c, 0x00, 0x00, 0x00 ], # 'q'
[ 0x7c, 0x08, 0x04, 0x04, 0x08, 0x00, 0x00, 0x00 ], # 'r'
[ 0x48, 0x54, 0x54, 0x54, 0x20, 0x00, 0x00, 0x00 ], # 's'
[ 0x04, 0x3f, 0x44, 0x40, 0x20, 0x00, 0x00, 0x00 ], # 't'
[ 0x3c, 0x40, 0x40, 0x20, 0x7c, 0x00, 0x00, 0x00 ], # 'u'
[ 0x1c, 0x20, 0x40, 0x20, 0x1c, 0x00, 0x00, 0x00 ], # 'v'
[ 0x3c, 0x40, 0x30, 0x40, 0x3c, 0x00, 0x00, 0x00 ], # 'w'
[ 0x44, 0x28, 0x10, 0x28, 0x44, 0x00, 0x00, 0x00 ], # 'x'
[ 0x0c, 0x50, 0x50, 0x50, 0x3c, 0x00, 0x00, 0x00 ], # 'y'
[ 0x44, 0x64, 0x54, 0x4c, 0x44, 0x00, 0x00, 0x00 ], # 'z'
[ 0x00, 0x08, 0x36, 0x41, 0x00, 0x00, 0x00, 0x00 ], # '{'
[ 0x00, 0x00, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00 ], # '|'
[ 0x00, 0x41, 0x36, 0x08, 0x00, 0x00, 0x00, 0x00 ], # '}'
[ 0x10, 0x08, 0x08, 0x10, 0x08, 0x00, 0x00, 0x00 ], # '~'
[ 0x00, 0x00, 0x02, 0x05, 0x02, 0x00, 0x00, 0x00 ], # 0x7F
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x80
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x81
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x82
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x83
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x84
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x85
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x86
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x87
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x88
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x89
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x8A
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x8B
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x8C
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x8D
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x8E
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x8F
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x90
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x91
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x92
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x93
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x94
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x95
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x96
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x97
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x98
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x99
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x9A
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x9B
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x9C
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x9D
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x9E
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x9F
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA0
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA1
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA2
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA3
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA4
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA5
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA6
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA7
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA8
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA9
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xAA
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xAB
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xAC
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xAD
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xAE
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xAF
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB0
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB1
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB2
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB3
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB4
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB5
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB6
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB7
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB8
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB9
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xBA
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xBB
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xBC
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xBD
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xBE
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xBF
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC0
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC1
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC2
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC3
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC4
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC5
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC6
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC7
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC8
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC9
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xCA
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xCB
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xCC
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xCD
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xCE
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xCF
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD0
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD1
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD2
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD3
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD4
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD5
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD6
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD7
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD8
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD9
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xDA
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xDB
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xDC
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xDD
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xDE
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xDF
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE0
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE1
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE2
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE3
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE4
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE5
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE6
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE7
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE8
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE9
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xEA
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xEB
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xEC
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xED
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xEE
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xEF
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF0
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF1
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF2
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF3
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF4
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF5
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF6
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF7
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF8
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF9
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xFA
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xFB
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xFC
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xFD
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xFE
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xFF
]; # end of LCD_FONT
# -----------------------------------------------------------
# Bit patterns for TINY_FONT
# Source: http://www.henningkarlsen.com/electronics/r_fonts.php
# Transposed by JLCArchibald
# Note: Only contains characters 0x20 - 0x7E inclusive
# All others will appear as blanks
TINY_FONT = [
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x00
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x01
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x02
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x03
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x04
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x05
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x06
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x07
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x08
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x09
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x0A
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x0B
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x0C
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x0D
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x0E
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x0F
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x10
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x11
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x12
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x13
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x14
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x15
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x16
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x17
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x18
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x19
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x1A
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x1B
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x1C
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x1D
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x1E
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x1F
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # ' '
[ 0x00, 0x00, 0x06, 0x5F, 0x5F, 0x06, 0x00, 0x00 ], # '!'
[ 0x00, 0x03, 0x07, 0x00, 0x00, 0x07, 0x03, 0x00 ], # '"'
[ 0x14, 0x7F, 0x7F, 0x14, 0x7F, 0x7F, 0x14, 0x00 ], # '#'
[ 0x00, 0x24, 0x2E, 0x6B, 0x6B, 0x3A, 0x12, 0x00 ], # '$'
[ 0x46, 0x66, 0x30, 0x18, 0x0C, 0x66, 0x62, 0x00 ], # '%'
[ 0x30, 0x7A, 0x4F, 0x5D, 0x37, 0x7A, 0x48, 0x00 ], # '&'
[ 0x00, 0x00, 0x04, 0x07, 0x03, 0x00, 0x00, 0x00 ], # '''
[ 0x00, 0x00, 0x1C, 0x3E, 0x63, 0x41, 0x00, 0x00 ], # '('
[ 0x00, 0x00, 0x41, 0x63, 0x3E, 0x1C, 0x00, 0x00 ], # ')'
[ 0x08, 0x2A, 0x3E, 0x1C, 0x1C, 0x3E, 0x2A, 0x08 ], # '*'
[ 0x00, 0x08, 0x08, 0x3E, 0x3E, 0x08, 0x08, 0x00 ], # '+'
[ 0x00, 0x00, 0x80, 0xE0, 0x60, 0x00, 0x00, 0x00 ], # '
[ 0x00, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x00 ], # '-'
[ 0x00, 0x00, 0x00, 0x60, 0x60, 0x00, 0x00, 0x00 ], # '.'
[ 0x60, 0x30, 0x18, 0x0C, 0x06, 0x03, 0x01, 0x00 ], # '/'
[ 0x3E, 0x7F, 0x51, 0x49, 0x45, 0x7F, 0x3E, 0x00 ], # '0'
[ 0x00, 0x40, 0x42, 0x7F, 0x7F, 0x40, 0x40, 0x00 ], # '1'
[ 0x42, 0x63, 0x71, 0x59, 0x49, 0x6F, 0x66, 0x00 ], # '2'
[ 0x22, 0x63, 0x49, 0x49, 0x49, 0x7F, 0x36, 0x00 ], # '3'
[ 0x18, 0x1C, 0x16, 0x53, 0x7F, 0x7F, 0x50, 0x00 ], # '4'
[ 0x2F, 0x6F, 0x49, 0x49, 0x49, 0x79, 0x31, 0x00 ], # '5'
[ 0x3C, 0x7E, 0x4B, 0x49, 0x49, 0x78, 0x30, 0x00 ], # '6'
[ 0x03, 0x03, 0x71, 0x79, 0x0D, 0x07, 0x03, 0x00 ], # '7'
[ 0x36, 0x7F, 0x49, 0x49, 0x49, 0x7F, 0x36, 0x00 ], # '8'
[ 0x06, 0x4F, 0x49, 0x49, 0x69, 0x3F, 0x1E, 0x00 ], # '9'
[ 0x00, 0x00, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00 ], # ':'
[ 0x00, 0x00, 0x80, 0xE6, 0x66, 0x00, 0x00, 0x00 ], # ';'
[ 0x00, 0x00, 0x08, 0x1C, 0x36, 0x63, 0x41, 0x00 ], # '<'
[ 0x00, 0x24, 0x24, 0x24, 0x24, 0x24, 0x24, 0x00 ], # '='
[ 0x00, 0x41, 0x63, 0x36, 0x1C, 0x08, 0x00, 0x00 ], # '>'
[ 0x02, 0x03, 0x01, 0x59, 0x5D, 0x07, 0x02, 0x00 ], # '?'
[ 0x3E, 0x7F, 0x41, 0x5D, 0x5D, 0x1F, 0x1E, 0x00 ], # '@'
[ 0x7C, 0x7E, 0x0B, 0x09, 0x0B, 0x7E, 0x7C, 0x00 ], # 'A'
[ 0x41, 0x7F, 0x7F, 0x49, 0x49, 0x7F, 0x36, 0x00 ], # 'B'
[ 0x1C, 0x3E, 0x63, 0x41, 0x41, 0x63, 0x22, 0x00 ], # 'C'
[ 0x41, 0x7F, 0x7F, 0x41, 0x63, 0x3E, 0x1C, 0x00 ], # 'D'
[ 0x41, 0x7F, 0x7F, 0x49, 0x5D, 0x41, 0x63, 0x00 ], # 'E'
[ 0x41, 0x7F, 0x7F, 0x49, 0x1D, 0x01, 0x03, 0x00 ], # 'F'
[ 0x1C, 0x3E, 0x63, 0x41, 0x51, 0x33, 0x72, 0x00 ], # 'G'
[ 0x7F, 0x7F, 0x08, 0x08, 0x08, 0x7F, 0x7F, 0x00 ], # 'H'
[ 0x00, 0x00, 0x41, 0x7F, 0x7F, 0x41, 0x00, 0x00 ], # 'I'
[ 0x30, 0x70, 0x40, 0x41, 0x7F, 0x3F, 0x01, 0x00 ], # 'J'
[ 0x41, 0x7F, 0x7F, 0x08, 0x1C, 0x77, 0x63, 0x00 ], # 'K'
[ 0x41, 0x7F, 0x7F, 0x41, 0x40, 0x60, 0x70, 0x00 ], # 'L'
[ 0x7F, 0x7F, 0x0E, 0x1C, 0x0E, 0x7F, 0x7F, 0x00 ], # 'M'
[ 0x7F, 0x7F, 0x06, 0x0C, 0x18, 0x7F, 0x7F, 0x00 ], # 'N'
[ 0x3E, 0x7F, 0x41, 0x41, 0x41, 0x7F, 0x3E, 0x00 ], # 'O'
[ 0x41, 0x7F, 0x7F, 0x49, 0x09, 0x0F, 0x06, 0x00 ], # 'P'
[ 0x3E, 0x7F, 0x41, 0x41, 0xE1, 0xFF, 0xBE, 0x00 ], # 'Q'
[ 0x41, 0x7F, 0x7F, 0x09, 0x19, 0x7F, 0x66, 0x00 ], # 'R'
[ 0x22, 0x67, 0x4D, 0x49, 0x59, 0x73, 0x22, 0x00 ], # 'S'
[ 0x00, 0x07, 0x43, 0x7F, 0x7F, 0x43, 0x07, 0x00 ], # 'T'
[ 0x3F, 0x7F, 0x40, 0x40, 0x40, 0x7F, 0x3F, 0x00 ], # 'U'
[ 0x1F, 0x3F, 0x60, 0x40, 0x60, 0x3F, 0x1F, 0x00 ], # 'V'
[ 0x3F, 0x7F, 0x60, 0x38, 0x60, 0x7F, 0x3F, 0x00 ], # 'W'
[ 0x63, 0x77, 0x1C, 0x08, 0x1C, 0x77, 0x63, 0x00 ], # 'X'
[ 0x00, 0x07, 0x4F, 0x78, 0x78, 0x4F, 0x07, 0x00 ], # 'Y'
[ 0x47, 0x63, 0x71, 0x59, 0x4D, 0x67, 0x73, 0x00 ], # 'Z'
[ 0x00, 0x00, 0x7F, 0x7F, 0x41, 0x41, 0x00, 0x00 ], # '['
[ 0x01, 0x03, 0x06, 0x0C, 0x18, 0x30, 0x60, 0x00 ], # backslash
[ 0x00, 0x00, 0x41, 0x41, 0x7F, 0x7F, 0x00, 0x00 ], # '
[ 0x08, 0x0C, 0x06, 0x03, 0x06, 0x0C, 0x08, 0x00 ], # '^'
[ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 ], # '_'
[ 0x00, 0x00, 0x01, 0x03, 0x06, 0x04, 0x00, 0x00 ], # '`'
[ 0x20, 0x74, 0x54, 0x54, 0x3C, 0x78, 0x40, 0x00 ], # 'a'
[ 0x41, 0x7F, 0x3F, 0x44, 0x44, 0x7C, 0x38, 0x00 ], # 'b'
[ 0x38, 0x7C, 0x44, 0x44, 0x44, 0x6C, 0x28, 0x00 ], # 'c'
[ 0x38, 0x7C, 0x44, 0x45, 0x3F, 0x7F, 0x40, 0x00 ], # 'd'
[ 0x38, 0x7C, 0x54, 0x54, 0x54, 0x5C, 0x18, 0x00 ], # 'e'
[ 0x48, 0x7E, 0x7F, 0x49, 0x09, 0x03, 0x02, 0x00 ], # 'f'
[ 0x98, 0xBC, 0xA4, 0xA4, 0xF8, 0x7C, 0x04, 0x00 ], # 'g'
[ 0x41, 0x7F, 0x7F, 0x08, 0x04, 0x7C, 0x78, 0x00 ], # 'h'
[ 0x00, 0x00, 0x44, 0x7D, 0x7D, 0x40, 0x00, 0x00 ], # 'i'
[ 0x00, 0x60, 0xE0, 0x80, 0x80, 0xFD, 0x7D, 0x00 ], # 'j'
[ 0x41, 0x7F, 0x7F, 0x10, 0x38, 0x6C, 0x44, 0x00 ], # 'k'
[ 0x00, 0x00, 0x41, 0x7F, 0x7F, 0x40, 0x00, 0x00 ], # 'l'
[ 0x7C, 0x7C, 0x0C, 0x78, 0x0C, 0x7C, 0x78, 0x00 ], # 'm'
[ 0x04, 0x7C, 0x78, 0x04, 0x04, 0x7C, 0x78, 0x00 ], # 'n'
[ 0x38, 0x7C, 0x44, 0x44, 0x44, 0x7C, 0x38, 0x00 ], # 'o'
[ 0x84, 0xFC, 0xF8, 0xA4, 0x24, 0x3C, 0x18, 0x00 ], # 'p'
[ 0x18, 0x3C, 0x24, 0xA4, 0xF8, 0xFC, 0x84, 0x00 ], # 'q'
[ 0x44, 0x7C, 0x78, 0x4C, 0x04, 0x0C, 0x08, 0x00 ], # 'r'
[ 0x48, 0x5C, 0x54, 0x54, 0x54, 0x74, 0x24, 0x00 ], # 's'
[ 0x04, 0x04, 0x3F, 0x7F, 0x44, 0x64, 0x20, 0x00 ], # 't'
[ 0x3C, 0x7C, 0x40, 0x40, 0x3C, 0x7C, 0x40, 0x00 ], # 'u'
[ 0x1C, 0x3C, 0x60, 0x40, 0x60, 0x3C, 0x1C, 0x00 ], # 'v'
[ 0x3C, 0x7C, 0x60, 0x38, 0x60, 0x7C, 0x3C, 0x00 ], # 'w'
[ 0x44, 0x6C, 0x38, 0x10, 0x38, 0x6C, 0x44, 0x00 ], # 'x'
[ 0x9C, 0xBC, 0xA0, 0xA0, 0xA0, 0xFC, 0x7C, 0x00 ], # 'y'
[ 0x00, 0x4C, 0x64, 0x74, 0x5C, 0x4C, 0x64, 0x00 ], # 'z'
[ 0x00, 0x08, 0x08, 0x3E, 0x77, 0x41, 0x41, 0x00 ], # '{'
[ 0x00, 0x00, 0x00, 0x7F, 0x7F, 0x00, 0x00, 0x00 ], # '|'
[ 0x00, 0x41, 0x41, 0x77, 0x3E, 0x08, 0x08, 0x00 ], # '}'
[ 0x02, 0x03, 0x01, 0x03, 0x02, 0x03, 0x01, 0x00 ], # '~'
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x7F
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x80
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x81
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x82
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x83
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x84
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x85
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x86
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x87
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x88
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x89
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x8A
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x8B
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x8C
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x8D
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x8E
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x8F
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x90
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x91
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x92
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x93
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x94
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x95
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x96
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x97
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x98
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x99
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x9A
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x9B
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x9C
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x9D
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x9E
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0x9F
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA0
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA1
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA2
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA3
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA4
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA5
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA6
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA7
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA8
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xA9
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xAA
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xAB
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xAC
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xAD
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xAE
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xAF
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB0
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB1
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB2
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB3
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB4
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB5
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB6
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB7
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB8
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xB9
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xBA
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xBB
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xBC
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xBD
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xBE
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xBF
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC0
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC1
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC2
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC3
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC4
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC5
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC6
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC7
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC8
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xC9
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xCA
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xCB
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xCC
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xCD
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xCE
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xCF
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD0
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD1
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD2
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD3
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD4
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD5
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD6
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD7
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD8
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xD9
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xDA
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xDB
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xDC
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xDD
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xDE
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xDF
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE0
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE1
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE2
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE3
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE4
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE5
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE6
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE7
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE8
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xE9
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xEA
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xEB
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xEC
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xED
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xEE
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xEF
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF0
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF1
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF2
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF3
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF4
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF5
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF6
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF7
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF8
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xF9
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xFA
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xFB
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xFC
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xFD
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xFE
[ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ], # 0xFF
]; # end of TINY_FONT
# -----------------------------------------------------------
|
ceremcem/aktos-led-panel
|
MAX7219fonts.py
|
Python
|
mit
| 51,355
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Tests for sysctl checks."""
from absl import app
from grr_response_core.lib.parsers import linux_sysctl_parser
from grr_response_server.check_lib import checks_test_lib
from grr.test_lib import test_lib
class SysctlTests(checks_test_lib.HostCheckTest):
@classmethod
def setUpClass(cls):
super(SysctlTests, cls).setUpClass()
cls.LoadCheck("sysctl.yaml")
cls.parser = linux_sysctl_parser.ProcSysParser()
def testRPFilter(self):
"""Ensure rp_filter is set to Strict mode.
rp_filter may be set to three values:
0 - Disabled
1 - Strict Reverse Path
2 - Loose Reverse Path
See https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt
"""
chk_id = "CIS-NET-RP-FILTER"
test_data = {"/proc/sys/net/ipv4/conf/default/rp_filter": "2"}
host_data = self.GenFileData("LinuxProcSysHardeningSettings", test_data,
self.parser)
results = self.RunChecks(host_data)
sym = "Found: System does not perform path filtering."
found = ["net_ipv4_conf_default_rp_filter: 2"]
self.assertCheckDetectedAnom(chk_id, results, sym, found)
test_data = {"/proc/sys/net/ipv4/conf/default/rp_filter": "1"}
host_data = self.GenFileData("LinuxProcSysHardeningSettings", test_data,
self.parser)
results = self.RunChecks(host_data)
self.assertCheckUndetected(chk_id, results)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
google/grr
|
grr/server/grr_response_server/checks/sysctl_test.py
|
Python
|
apache-2.0
| 1,553
|
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.torrent.base import TorrentProvider
import traceback
import six
log = CPLog(__name__)
class TorrentShack(TorrentProvider):
urls = {
'test': 'https://torrentshack.net/',
'login': 'https://torrentshack.net/login.php',
'login_check': 'https://torrentshack.net/inbox.php',
'detail': 'https://torrentshack.net/torrent/%s',
'search': 'https://torrentshack.net/torrents.php?action=advanced&searchstr=%s&scene=%s&filter_cat[%d]=1',
'download': 'https://torrentshack.net/%s',
}
cat_ids = [
([970], ['bd50']),
([300], ['720p', '1080p']),
([350], ['dvdr']),
([400], ['brrip', 'dvdrip']),
]
http_time_between_calls = 1 #seconds
cat_backup_id = 400
def _searchOnTitle(self, title, movie, quality, results):
scene_only = '1' if self.conf('scene_only') else ''
url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), scene_only, self.getCatId(quality['identifier'])[0])
data = self.getHTMLData(url)
if data:
html = BeautifulSoup(data)
try:
result_table = html.find('table', attrs = {'id' : 'torrent_table'})
if not result_table:
return
entries = result_table.find_all('tr', attrs = {'class' : 'torrent'})
for result in entries:
link = result.find('span', attrs = {'class' : 'torrent_name_link'}).parent
url = result.find('td', attrs = {'class' : 'torrent_td'}).find('a')
results.append({
'id': link['href'].replace('torrents.php?torrentid=', ''),
'name': six.text_type(link.span.string).translate({ord(six.u('\xad')): None}),
'url': self.urls['download'] % url['href'],
'detail_url': self.urls['download'] % link['href'],
'size': self.parseSize(result.find_all('td')[4].string),
'seeders': tryInt(result.find_all('td')[6].string),
'leechers': tryInt(result.find_all('td')[7].string),
})
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
def getLoginParams(self):
return {
'username': self.conf('username'),
'password': self.conf('password'),
'keeplogged': '1',
'login': 'Login',
}
def loginSuccess(self, output):
return 'logout.php' in output.lower()
loginCheckSuccess = loginSuccess
|
entomb/CouchPotatoServer
|
couchpotato/core/providers/torrent/torrentshack/main.py
|
Python
|
gpl-3.0
| 2,899
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
r_mask_vect.py
--------------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
def processCommand(alg, parameters, context):
# Remove input
alg.removeParameter('input')
alg.processCommand(parameters, context, True)
def processOutputs(alg, parameters, context):
createOpt = alg.parameterAsString(parameters, alg.GRASS_RASTER_FORMAT_OPT, context)
metaOpt = alg.parameterAsString(parameters, alg.GRASS_RASTER_FORMAT_META, context)
# We need to export the raster with all its bands and its color table
fileName = alg.parameterAsOutputLayer(parameters, 'output', context)
outFormat = Grass7Utils.getRasterFormatFromFilename(fileName)
grassName = alg.exportedLayers['input']
alg.exportRasterLayer(grassName, fileName, True,
outFormat, createOpt, metaOpt)
|
CS-SI/QGIS
|
python/plugins/processing/algs/grass7/ext/r_mask_vect.py
|
Python
|
gpl-2.0
| 1,850
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure macro expansion of $(VCInstallDir) is handled, and specifically
always / terminated for compatibility.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'vs-macros'
test.run_gyp('vcinstalldir.gyp', chdir=CHDIR)
# This fails on VS because the trailing slash escapes the trailing quote.
test.build('vcinstalldir.gyp', 'test_slash_trailing', chdir=CHDIR, status=1)
test.build('vcinstalldir.gyp', 'test_slash_dir', chdir=CHDIR)
test.pass_test()
|
Jet-Streaming/gyp
|
test/win/gyptest-macro-vcinstalldir.py
|
Python
|
bsd-3-clause
| 743
|
# -*- coding: utf-8 -*-
"""
@file
@brief Defines a way to reference a package or a page in this package.
"""
import sphinx
from docutils import nodes
from .import_object_helper import import_any_object
class epkg_node(nodes.TextElement):
"""
Defines *epkg* node.
"""
pass
class ClassStruct:
"""
Class as struct.
"""
def __init__(self, **kwargs):
"""
All arguments are added to the class.
"""
for k, v in kwargs.items():
setattr(self, k, v)
def epkg_role(role, rawtext, text, lineno, inliner, options=None, content=None):
"""
Defines custom role *epkg*. A list of supported urls must be defined in the
configuration file. It wants to replace something like:
::
`to_html <https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.to_html.html>`_
By:
::
:epkg:`pandas:DataFrame.to_html`
It inserts in the configuration the variable:
::
epkg_dictionary = {'pandas': ('http://pandas.pydata.org/pandas-docs/stable/generated/',
('http://pandas.pydata.org/pandas-docs/stable/generated/{0}.html', 1))
# 1 for one paraemter
'*py': ('https://docs.python.org/3/',
('https://docs.python.org/3/library/{0}.html#{0}.{1}', 2))
}
If the module name starts with a '*', the anchor does not contain it.
See also :ref:`l-sphinx-epkg`.
If no template is found, the role will look into the list of options
to see if there is one function. It must be the last one.
::
def my_custom_links(input):
return "string to display", "url"
epkg_dictionary = {'weird_package': ('http://pandas.pydata.org/pandas-docs/stable/generated/',
('http://pandas.pydata.org/pandas-docs/stable/generated/{0}.html', 1),
my_custom_links)
However, it is impossible to use a function as a value
in the configuration because :epkg:`*py:pickle` does not handle
this scenario (see `PicklingError on environment when config option
value is a callable <https://github.com/sphinx-doc/sphinx/issues/1424>`_),
``my_custom_links`` needs to be replaced by:
``("module_where_it_is_defined.function_name", None)``.
The role *epkg* will import it based on its name.
:param role: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.
"""
# It extracts the pieces of the text.
spl = text.split(":")
if len(spl) == 0: # pragma: no cover
msg = inliner.reporter.error("empty value for role epkg", line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
# Configuration.
env = inliner.document.settings.env
app = env.app
config = app.config
try:
epkg_dictionary = config.epkg_dictionary
except AttributeError as e: # pragma: no cover
ma = "\n".join(sorted(str(_) for _ in app.config))
raise AttributeError(
"unable to find 'epkg_dictionary' in configuration. Available:\n{0}"
"".format(ma)) from e
# Supported module?
modname = spl[0]
if modname not in epkg_dictionary:
msg = inliner.reporter.error(
"Unable to find module '{0}' in epkg_dictionary, existing={1}".format(
modname, ", ".join(sorted(epkg_dictionary.keys())), line=lineno))
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
if len(spl) == 1:
value = epkg_dictionary[modname]
if isinstance(value, tuple):
if len(value) == 0: # pragma: no cover
msg = inliner.reporter.error(
"Empty values for module '{0}' in epkg_dictionary.".format(modname))
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
value = value[0]
anchor, url = modname, value
else:
value = epkg_dictionary[modname]
expected = len(spl) - 1
found = None
for tu in value:
if isinstance(tu, tuple) and len(tu) == 2 and tu[1] == expected:
found = tu[0]
if found is None:
if callable(value[-1]):
found = value[-1]
elif isinstance(value[-1], tuple) and len(value[-1]) == 2 and value[-1][-1] is None:
# It assumes the first parameter is a name of a function.
namef = value[-1][0]
if not hasattr(config, namef):
# It assumes its name is defined in a package.
found = import_any_object(namef)[0]
else:
# Defined in the configuration.
found = getattr(config, namef)
if found is None: # pragma: no cover
msg = inliner.reporter.error(
"Unable to find a tuple with '{0}' parameters in epkg_dictionary['{1}']"
"".format(expected, modname))
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
if callable(found):
try:
anchor, url = found(text)
except TypeError:
try:
anchor, url = found()(text)
except Exception as e: # pragma: no cover
raise ValueError(
"epkg accepts function or classes with __call__ overloaded. "
"Found '{0}'".format(found)) from e
else:
url = found.format(*tuple(spl[1:]))
if spl[0].startswith("*"):
anchor = ".".join(spl[1:]) # pragma: no cover
else:
anchor = ".".join(spl)
extref = "`{0} <{1}>`__".format(anchor, url)
node = epkg_node(rawtext=rawtext)
node['classes'] += ["epkg"]
memo = ClassStruct(document=inliner.document, reporter=inliner.reporter,
language=inliner.language)
processed, messages = inliner.parse(extref, lineno, memo, node)
if len(messages) > 0: # pragma: no cover
msg = inliner.reporter.error(
"unable to interpret '{0}', messages={1}".format(
text, ", ".join(str(_) for _ in messages)), line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
node += processed
return [node], []
def visit_epkg_node(self, node):
"""
What to do when visiting a node *epkg*.
"""
pass
def depart_epkg_node(self, node):
"""
What to do when leaving a node *epkg*.
"""
pass
def setup(app):
"""
setup for ``bigger`` (sphinx)
"""
if hasattr(app, "add_mapping"):
app.add_mapping('epkg', epkg_node)
app.add_config_value('epkg_dictionary', {}, 'env')
app.add_node(epkg_node,
html=(visit_epkg_node, depart_epkg_node),
epub=(visit_epkg_node, depart_epkg_node),
elatex=(visit_epkg_node, depart_epkg_node),
latex=(visit_epkg_node, depart_epkg_node),
rst=(visit_epkg_node, depart_epkg_node),
md=(visit_epkg_node, depart_epkg_node),
text=(visit_epkg_node, depart_epkg_node))
app.add_role('epkg', epkg_role)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
|
sdpython/pyquickhelper
|
src/pyquickhelper/sphinxext/sphinx_epkg_extension.py
|
Python
|
mit
| 7,895
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for resource tracker claims."""
import uuid
import mock
from oslo_serialization import jsonutils
from nova.compute import claims
from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.pci import manager as pci_manager
from nova import test
from nova.tests.unit.pci import fakes as pci_fakes
class FakeResourceHandler(object):
test_called = False
usage_is_instance = False
def test_resources(self, usage, limits):
self.test_called = True
self.usage_is_itype = usage.get('name') == 'fakeitype'
return []
class DummyTracker(object):
icalled = False
rcalled = False
ext_resources_handler = FakeResourceHandler()
def __init__(self):
self.new_pci_tracker()
def abort_instance_claim(self, *args, **kwargs):
self.icalled = True
def drop_move_claim(self, *args, **kwargs):
self.rcalled = True
def new_pci_tracker(self):
ctxt = context.RequestContext('testuser', 'testproject')
self.pci_tracker = pci_manager.PciDevTracker(ctxt)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
class ClaimTestCase(test.NoDBTestCase):
def setUp(self):
super(ClaimTestCase, self).setUp()
self.resources = self._fake_resources()
self.tracker = DummyTracker()
def _claim(self, limits=None, overhead=None, **kwargs):
numa_topology = kwargs.pop('numa_topology', None)
instance = self._fake_instance(**kwargs)
if numa_topology:
db_numa_topology = {
'id': 1, 'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': None,
'instance_uuid': instance['uuid'],
'numa_topology': numa_topology._to_json()
}
else:
db_numa_topology = None
if overhead is None:
overhead = {'memory_mb': 0}
with mock.patch.object(
db, 'instance_extra_get_by_instance_uuid',
return_value=db_numa_topology):
return claims.Claim('context', instance, self.tracker,
self.resources, overhead=overhead,
limits=limits)
def _fake_instance(self, **kwargs):
instance = {
'uuid': str(uuid.uuid1()),
'memory_mb': 1024,
'root_gb': 10,
'ephemeral_gb': 5,
'vcpus': 1,
'system_metadata': {},
'numa_topology': None
}
instance.update(**kwargs)
return instance
def _fake_instance_type(self, **kwargs):
instance_type = {
'id': 1,
'name': 'fakeitype',
'memory_mb': 1,
'vcpus': 1,
'root_gb': 1,
'ephemeral_gb': 2
}
instance_type.update(**kwargs)
return objects.Flavor(**instance_type)
def _fake_resources(self, values=None):
resources = {
'memory_mb': 2048,
'memory_mb_used': 0,
'free_ram_mb': 2048,
'local_gb': 20,
'local_gb_used': 0,
'free_disk_gb': 20,
'vcpus': 2,
'vcpus_used': 0,
'numa_topology': objects.NUMATopology(
cells=[objects.NUMACell(id=1, cpuset=set([1, 2]), memory=512,
memory_usage=0, cpu_usage=0,
mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(id=2, cpuset=set([3, 4]), memory=512,
memory_usage=0, cpu_usage=0,
mempages=[], siblings=[],
pinned_cpus=set([]))]
)._to_json()
}
if values:
resources.update(values)
return resources
def test_memory_unlimited(self, mock_get):
self._claim(memory_mb=99999999)
def test_disk_unlimited_root(self, mock_get):
self._claim(root_gb=999999)
def test_disk_unlimited_ephemeral(self, mock_get):
self._claim(ephemeral_gb=999999)
def test_memory_with_overhead(self, mock_get):
overhead = {'memory_mb': 8}
limits = {'memory_mb': 2048}
self._claim(memory_mb=2040, limits=limits,
overhead=overhead)
def test_memory_with_overhead_insufficient(self, mock_get):
overhead = {'memory_mb': 9}
limits = {'memory_mb': 2048}
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim, limits=limits, overhead=overhead,
memory_mb=2040)
def test_memory_oversubscription(self, mock_get):
self._claim(memory_mb=4096)
def test_memory_insufficient(self, mock_get):
limits = {'memory_mb': 8192}
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim, limits=limits, memory_mb=16384)
def test_disk_oversubscription(self, mock_get):
limits = {'disk_gb': 60}
self._claim(root_gb=10, ephemeral_gb=40,
limits=limits)
def test_disk_insufficient(self, mock_get):
limits = {'disk_gb': 45}
self.assertRaisesRegex(
exception.ComputeResourcesUnavailable,
"disk",
self._claim, limits=limits, root_gb=10, ephemeral_gb=40)
def test_disk_and_memory_insufficient(self, mock_get):
limits = {'disk_gb': 45, 'memory_mb': 8192}
self.assertRaisesRegex(
exception.ComputeResourcesUnavailable,
"memory.*disk",
self._claim, limits=limits, root_gb=10, ephemeral_gb=40,
memory_mb=16384)
@pci_fakes.patch_pci_whitelist
def test_pci_pass(self, mock_get):
dev_dict = {
'compute_node_id': 1,
'address': 'a',
'product_id': 'p',
'vendor_id': 'v',
'numa_node': 0,
'status': 'available'}
self.tracker.new_pci_tracker()
self.tracker.pci_tracker._set_hvdevs([dev_dict])
claim = self._claim()
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
mock_get.return_value = objects.InstancePCIRequests(
requests=[request])
self.assertIsNone(claim._test_pci())
@pci_fakes.patch_pci_whitelist
def test_pci_fail(self, mock_get):
dev_dict = {
'compute_node_id': 1,
'address': 'a',
'product_id': 'p',
'vendor_id': 'v1',
'numa_node': 1,
'status': 'available'}
self.tracker.new_pci_tracker()
self.tracker.pci_tracker._set_hvdevs([dev_dict])
claim = self._claim()
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
mock_get.return_value = objects.InstancePCIRequests(
requests=[request])
claim._test_pci()
@pci_fakes.patch_pci_whitelist
def test_pci_pass_no_requests(self, mock_get):
dev_dict = {
'compute_node_id': 1,
'address': 'a',
'product_id': 'p',
'vendor_id': 'v',
'numa_node': 0,
'status': 'available'}
self.tracker.new_pci_tracker()
self.tracker.pci_tracker._set_hvdevs([dev_dict])
claim = self._claim()
self.assertIsNone(claim._test_pci())
def test_ext_resources(self, mock_get):
self._claim()
self.assertTrue(self.tracker.ext_resources_handler.test_called)
self.assertFalse(self.tracker.ext_resources_handler.usage_is_itype)
def test_numa_topology_no_limit(self, mock_get):
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
self._claim(numa_topology=huge_instance)
def test_numa_topology_fails(self, mock_get):
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2, 3, 4, 5]), memory=2048)])
limit_topo = objects.NUMATopologyLimits(
cpu_allocation_ratio=1, ram_allocation_ratio=1)
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim,
limits={'numa_topology': limit_topo},
numa_topology=huge_instance)
def test_numa_topology_passes(self, mock_get):
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
limit_topo = objects.NUMATopologyLimits(
cpu_allocation_ratio=1, ram_allocation_ratio=1)
self._claim(limits={'numa_topology': limit_topo},
numa_topology=huge_instance)
@pci_fakes.patch_pci_whitelist
def test_numa_topology_with_pci(self, mock_get):
dev_dict = {
'compute_node_id': 1,
'address': 'a',
'product_id': 'p',
'vendor_id': 'v',
'numa_node': 1,
'status': 'available'}
self.tracker.new_pci_tracker()
self.tracker.pci_tracker._set_hvdevs([dev_dict])
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
mock_get.return_value = objects.InstancePCIRequests(
requests=[request])
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
self._claim(numa_topology= huge_instance)
@pci_fakes.patch_pci_whitelist
def test_numa_topology_with_pci_fail(self, mock_get):
dev_dict = {
'compute_node_id': 1,
'address': 'a',
'product_id': 'p',
'vendor_id': 'v',
'numa_node': 1,
'status': 'available'}
dev_dict2 = {
'compute_node_id': 1,
'address': 'a',
'product_id': 'p',
'vendor_id': 'v',
'numa_node': 2,
'status': 'available'}
self.tracker.new_pci_tracker()
self.tracker.pci_tracker._set_hvdevs([dev_dict, dev_dict2])
request = objects.InstancePCIRequest(count=2,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
mock_get.return_value = objects.InstancePCIRequests(
requests=[request])
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
self.assertRaises(exception.ComputeResourcesUnavailable,
self._claim,
numa_topology=huge_instance)
@pci_fakes.patch_pci_whitelist
def test_numa_topology_with_pci_no_numa_info(self, mock_get):
dev_dict = {
'compute_node_id': 1,
'address': 'a',
'product_id': 'p',
'vendor_id': 'v',
'numa_node': None,
'status': 'available'}
self.tracker.new_pci_tracker()
self.tracker.pci_tracker._set_hvdevs([dev_dict])
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
mock_get.return_value = objects.InstancePCIRequests(
requests=[request])
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
self._claim(numa_topology= huge_instance)
def test_abort(self, mock_get):
claim = self._abort()
self.assertTrue(claim.tracker.icalled)
def _abort(self):
claim = None
try:
with self._claim(memory_mb=4096) as claim:
raise test.TestingException("abort")
except test.TestingException:
pass
return claim
class MoveClaimTestCase(ClaimTestCase):
def setUp(self):
super(MoveClaimTestCase, self).setUp()
self.instance = self._fake_instance()
self.get_numa_constraint_patch = None
def _claim(self, limits=None, overhead=None, **kwargs):
instance_type = self._fake_instance_type(**kwargs)
numa_constraint = kwargs.pop('numa_topology', None)
if overhead is None:
overhead = {'memory_mb': 0}
with mock.patch(
'nova.virt.hardware.numa_get_constraints',
return_value=numa_constraint):
return claims.MoveClaim('context', self.instance, instance_type,
{}, self.tracker, self.resources,
overhead=overhead, limits=limits)
def _set_pci_request(self, claim):
request = [{'count': 1,
'spec': [{'vendor_id': 'v', 'product_id': 'p'}],
}]
claim.instance.update(
system_metadata={'new_pci_requests': jsonutils.dumps(request)})
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_ext_resources(self, mock_get):
self._claim()
self.assertTrue(self.tracker.ext_resources_handler.test_called)
self.assertTrue(self.tracker.ext_resources_handler.usage_is_itype)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_abort(self, mock_get):
claim = self._abort()
self.assertTrue(claim.tracker.rcalled)
|
takeshineshiro/nova
|
nova/tests/unit/compute/test_claims.py
|
Python
|
apache-2.0
| 14,665
|
import warnings
from great_expectations.dataset.util import create_multiple_expectations
from great_expectations.profile.base import DatasetProfiler
# This particular file should be immune to the new changes
class ColumnsExistProfiler(DatasetProfiler):
@classmethod
def _profile(cls, dataset, configuration=None):
"""
This function will take a dataset and add expectations that each column present exists.
Args:
dataset (great_expectations.dataset): The dataset to profile and to which to add expectations.
configuration: Configuration for select profilers.
"""
if not hasattr(dataset, "get_table_columns"):
warnings.warn("No columns list found in dataset; no profiling performed.")
raise NotImplementedError(
"ColumnsExistProfiler._profile is not implemented for data assests without the table_columns property"
)
table_columns = dataset.get_table_columns()
if table_columns is None:
warnings.warn("No columns list found in dataset; no profiling performed.")
raise NotImplementedError(
"ColumnsExistProfiler._profile is not implemented for data assests without the table_columns property"
)
create_multiple_expectations(dataset, table_columns, "expect_column_to_exist")
return dataset.get_expectation_suite(suppress_warnings=True)
|
great-expectations/great_expectations
|
great_expectations/profile/columns_exist.py
|
Python
|
apache-2.0
| 1,449
|
"""
String Utils.
Bruce Wernick
10 June 2021
"""
import random
import string
from textdistance import hamming
__all__ = ['rand_str', 'pword', 'SameText', 'fsig']
def rand_str(n=12, chars=string.ascii_letters+string.digits):
"return a random string of length n"
return ''.join([random.choice(chars) for n in range(n)])
def pword(size=12, chars=string.ascii_letters + string.digits):
return ''.join(random.choice(chars) for i in range(size))
def SameText(a, b, strip=True):
'case insensive compare'
if a == b:
return True
if type(a) == 'str' and type(b) == 'str':
a = a.lower()
b = b.lower()
if a == b:
return True
if strip and (a.strip() == b.strip()):
return True
if a.casefold() == b.casefold():
return True
return False
def fsig(x, f='0.6g'):
"round float with significant digits"
return float('{:{}}'.format(x,f))
def TextMatch(str1, str2):
"""return True is there is a close match
"""
# Exact match
if str1 == str2:
return True
# Match Text (simple) ignore case
if str1.lower() == str2.lower():
return True
# Use Python 3 casefold to (agressive) ignore case
if str1.casefold() == str2.casefold():
return True
# compare the hamming distance to tolerate a close match
if hamming(str1, str2) < 2:
return True
# no match found
return False
# ---------------------------------------------------------------------
if __name__ == '__main__':
print(fsig(123.456, '0.3f'))
a = 'text_sample'
b = 'text_simple'
print(SameText(a, b))
print(TextMatch(a, b))
|
bru32/magz
|
magz/ustring.py
|
Python
|
mit
| 1,668
|
import sys
import os
import base64
from db import *
from funcs import *
from routes_ips import *
from routes_data import *
##
# ROOT
##
@app.route('/')
def hello_world():
return abort(404)
##
# MAIN
##
@app.route('/'+app.config['RNG_ID']+'/')
def get_endpoints():
data = {
'endpoints':
{
'GET':
[
'/',
'/getdb/',
'/data/get/<file_path>',
'/data/del/<file_path>',
'/data/list/all/[<pi_id>]/',
'/data/list/last/[<pi_id>]/',
'/data/list/filepath/[<pi_id>]/',
'/data/list/filename/[<pi_id>]/',
'/data/list/ids/',
'/data/show/all/[<pi_id>]/',
'/data/show/last/[<pi_id>]/',
'/ips/list/all/[<pi_id>]/',
'/ips/list/last/[<pi_id>]/',
'/ips/add/<pi_id>/',
'/ips/add/<pi_id>/<pi_internal_ip>/'
],
'POST':
[
'/data/add/<pi_id>/'
],
'error': False
}
}
return jsonify(data), 200
##
# GETTERS
##
@app.route('/'+app.config['RNG_ID']+'/getdb/')
def get_db():
if not os.path.exists(app.config['DB_NAME']):
return abort(404)
#content = get_file(app.config['DB_NAME'])
##return Response(content, mimetype=mimetype)
#return Response(content)
return send_from_directory('.', app.config['DB_NAME'], as_attachment=True, attachment_filename=app.config['DB_NAME'])
|
sauloalrpi/pifollow
|
routes.py
|
Python
|
mit
| 1,876
|
import sys
import xmlrpclib
import urllib
from certmaster import SSLCommon
class SSL_Transport(xmlrpclib.Transport):
user_agent = "pyOpenSSL_XMLRPC/%s - %s" % ('0.1', xmlrpclib.Transport.user_agent)
def __init__(self, ssl_context, timeout=None, use_datetime=0):
if sys.version_info[:3] >= (2, 5, 0):
xmlrpclib.Transport.__init__(self, use_datetime)
self.ssl_ctx=ssl_context
self._timeout = timeout
def make_connection(self, host):
# Handle username and password.
try:
host, extra_headers, x509 = self.get_host_info(host)
except AttributeError:
# Yay for Python 2.2
pass
_host, _port = urllib.splitport(host)
if hasattr(xmlrpclib.Transport, 'single_request'):
cnx_class = SSLCommon.HTTPSConnection
else:
cnx_class = SSLCommon.HTTPS
return cnx_class(_host, int(_port), ssl_context=self.ssl_ctx, timeout=self._timeout)
class SSLXMLRPCServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, pkey_file, cert_file, ca_cert_file, timeout=None):
self.ctx = SSLCommon.CreateSSLContext(pkey_file, cert_file, ca_cert_file)
xmlrpclib.ServerProxy.__init__(self, uri, SSL_Transport(ssl_context=self.ctx, timeout=timeout), allow_none=True)
class FuncServer(SSLXMLRPCServerProxy):
def __init__(self, uri, pem=None, crt=None, ca=None, timeout=None):
self.pem = pem
self.crt = crt
self.ca = ca
self.timeout = timeout
SSLXMLRPCServerProxy.__init__(self, uri,
self.pem,
self.crt,
self.ca,
self.timeout)
if __name__ == "__main__":
s = SSLXMLRPCServerProxy('https://localhost:51234/', '/etc/pki/certmaster/slave.pem', '/etc/pki/certmaster/slave.cert', '/etc/pki/certmaster/ca/certmaster.crt')
f = s.ping(1, 2)
print f
|
caglar10ur/func
|
func/overlord/sslclient.py
|
Python
|
gpl-2.0
| 2,005
|
#!/usr/bin/env python3
from collections import OrderedDict
import itertools
import logging
import threading
import xcffib
import xcffib.xproto
import xcffib.randr
from barython import _BarSpawner
logger = logging.getLogger("barython")
def get_randr_screens():
conn = xcffib.connect()
conn.randr = conn(xcffib.randr.key)
window = conn.get_setup().roots[0].root
resources = conn.randr.GetScreenResourcesCurrent(window).reply()
outputs = OrderedDict()
for rroutput in resources.outputs:
try:
cookie = conn.randr.GetOutputInfo(
rroutput, resources.config_timestamp
)
info = cookie.reply()
name = "".join(map(chr, info.name))
cookie = conn.randr.GetCrtcInfo(
info.crtc, resources.config_timestamp
)
info = cookie.reply()
if info:
outputs[name] = (info.width, info.height, info.x, info.y)
except Exception as e:
logger.debug("Error when trying to fetch screens infos")
logger.debug(e)
continue
return outputs
class Screen(_BarSpawner):
_bspwm_monitor_name = None
@property
def geometry(self):
"""
Return the screen geometry in a tuple
"""
if self._geometry:
return self._geometry
elif self.name:
try:
x, y, px, py = get_randr_screens().get(self.name, None)
self._geometry = (x, self.height, px, py)
except (ValueError, TypeError):
logger.error(
"Properties of screen {} could not be fetched. Please "
"specify the geometry manually.".format(self.name)
)
return self._geometry
@geometry.setter
def geometry(self, value):
self._geometry = value
@property
def bspwm_monitor_name(self):
return (self.name if self._bspwm_monitor_name is None
else self._bspwm_monitor_name)
@bspwm_monitor_name.setter
def bspwm_monitor_name(self, value):
self._bspwm_monitor_name = value
def add_widget(self, alignment, *widgets, index=None):
"""
Add a widget to a screen
:param alignment: where adding the widget (left, center, right)
:param *widgets: widgets to add
:param index: if set, will insert the widgets before the specified
index (default: None)
"""
if alignment not in self._widgets.keys():
raise ValueError("'alignement' might be either 'l', 'c' or 'r'")
if index is None:
self._widgets[alignment].extend(widgets)
else:
list_widgets = self._widgets[alignment]
self._widgets[alignment] = (
list_widgets[:index] + list(widgets) + list_widgets[index:]
)
for w in self._widgets[alignment]:
w.screens.add(self)
self.hooks.merge(w.hooks)
def gather(self):
"""
Gather all widgets content
"""
return "".join(
"%{{{}}}{}".format(
alignment, "".join([
str(widget.content) if widget.content is not None
else "" for widget in widgets
])
) for alignment, widgets in self._widgets.items() if widgets
)
def update(self, *args, **kwargs):
if self.panel.instance_per_screen:
return super().update(*args, **kwargs)
else:
return self.panel.update(*args, **kwargs)
def propage_hooks_changes(self):
"""
Propage a change in the hooks pool
"""
if getattr(self, "panel", None):
self.panel.hooks.merge(self.hooks)
def start(self):
"""
Start the screen panel
If the global panel set that there might be one instance per screen,
starts a local lemonbar.
Starts all widgets in there own threads. They will callback a screen
update in case of any change.
"""
super().start()
attached_widgets = list(itertools.chain(*self._widgets.values()))
if not self.panel.instance_per_screen and len(attached_widgets) == 0:
# No widget attached, no need to keep this thread opened
# TODO: Add a test for it
self.content = ""
self.stop()
return
self.update(no_wait=True)
for widget in attached_widgets:
threading.Thread(
target=widget.start
).start()
self._stop.wait()
def stop(self, *args, **kwargs):
super().stop(*args, **kwargs)
if self.hooks.listen:
try:
self.hooks.stop()
except:
pass
for widget in itertools.chain(*self._widgets.values()):
try:
widget.stop()
except:
logger.debug("Error when stopping widget")
continue
def __getattribute__(self, name):
attr = super().__getattribute__(name)
# attributes to inherit from panel
panel_attr = ("height", "fg", "bg", "fonts", "refresh", "clickable")
if name in panel_attr:
if (attr is None or attr == -1) and self.panel:
return getattr(self.panel, name, attr)
return attr
def __init__(self, name=None, refresh=-1, clickable=-1, geometry=None,
panel=None, bspwm_monitor_name=None, *args, **kwargs):
super().__init__(*args, **kwargs)
#: screen name
self.name = name
#: refresh rate
self.refresh = refresh
#: clickable items (for lemonbar)
self.clickable = clickable
self.panel = panel
#: bar geometry, in a tuple (x, y, position_x, position_y)
self.geometry = geometry
#: widgets to show on this screen
self._widgets = OrderedDict([("l", []), ("c", []), ("r", [])])
#: only useful with bspwm. Used by Bspwm*DesktopWidget
self.bspwm_monitor_name = bspwm_monitor_name
|
Anthony25/barython
|
barython/screen.py
|
Python
|
bsd-3-clause
| 6,190
|
#!/usr/bin/env python3
import collections
from itertools import groupby
from typing import List, MutableMapping
from .path import Path
# future: sorting & unsorting
# fixme: problems with the total sum/value at the root
# (not the same, as value at root can be more than the total of the next level)
# solution: do not allow empty roots to be given by the user
# thus the real (empy) root always carries the total sum of the entries
# and gets set by complete_pv
# to plot the innerst circle, bring back the draw_center_circle option
def complete_pv(pathvalues: MutableMapping[Path, float]) -> MutableMapping[Path, float]:
""" Consider a pathvalue dictionary of the form Dict[Path, float] e.g.
{1.1.1: 12.0} (here: only one entry). This function will disect each path
and assign its value to the truncated path: e.g. here 1, 1.1 and 1.1.1.
Thus we get {1: 12.0, 1.1: 12.0, 1.1.1: 12.0}. For more items the values
will be summed accordingly.
Furthermore the total sum of the items of
the topmost level will be assigned to the empty path. For this to make
sense we require that no empy path is in the data beforehand""
:param pathvalues: {path: value} dictionary
:return: {path: value}
dictionary
"""
if Path(()) in pathvalues:
raise ValueError("This function does not allow the empty path as item"
"in the data list.")
completed = collections.defaultdict(float)
for path, value in pathvalues.items():
# len(path) +1 ensures that also the whole tag is considered
# starting point 0: also add to empty path.
for level in range(0, len(path) + 1):
completed[path[:level]] += value
return completed
def complete_paths(paths: List[Path]) -> List[Path]:
""" Like complete_pv, only that it tries to preserve the order of paths.
"""
ret = [Path(())]
for path in paths:
for i in range(1, len(path)):
# iterate over all "real" ancestors
ancestor = path[:i]
if ancestor not in paths and ancestor not in ret:
# will not come up later: insert before path
ret.append(ancestor)
ret.append(path)
return ret
def structure_paths(paths: List[Path]) -> List[List[List[Path]]]:
""" Takes a list of paths and groups the paths first by length (empty
path length 0) and then by the parent (path[:len(path) - 1]).
Example:
[
[ ["" ]], # the root
[ [1, 2, 3] ], # level 1: only one group, because all
# elements share the same parent
[ [1.1, 1.2], [3.1, 3.2] ], # grouped by parents
[ [1.1.1, 1.1.2], [1.2.1, 1.2.2], [3.1.1], [3.2.1] ]
]
:param paths: Paths
:return: [[Paths grouped by parents] grouped by levels.]
"""
structured = [] # return value
# we do this in two stepts via the iteritems.groupby function, first
# using the level, then the parent function as keys.
# (Note that sorting is nescessary before calling groupby!)
def level(path):
return len(path)
def parent(path):
return path.parent()
# sort by level
paths.sort(key=level)
paths_by_level = (list(group) for _, group in groupby(paths, key=level))
# sort by parent
for paths_of_level in paths_by_level:
paths_of_level.sort(key=parent)
paths_by_parent = [list(group) for _, group in
groupby(paths_of_level, key=parent)]
structured.append(paths_by_parent)
return structured
def pprint_structured_paths(structurized: List[List[List[Path]]]):
print("[")
for grp_lvl in structurized:
print("\t", [list(map(str, grp_parents)) for grp_parents in grp_lvl])
print("]")
def pprint_paths(paths: List[Path]):
print("[")
for path in paths:
print("\t", str(path))
print("]")
Angles = collections.namedtuple('Angles', ['theta1', 'theta2'])
# todo: docstring . path values must be complete_pv!
def calculate_angles(structured_paths: List[List[List[Path]]],
path_values: MutableMapping[Path, float]) -> \
MutableMapping[Path, Angles]:
angles = {} # return value
# the total sum of all elements (on one level)
value_sum = path_values[Path(())]
for level_no, groups in enumerate(structured_paths):
for group in groups:
theta2 = None # else pycharm complains about theta2 undefined
for path_no, path in enumerate(group):
# First we determine the starting angle (theta1) for the wedge
# corresponding to the path.
if level_no == 0:
# This corresponds to the inner circle (because level 0
# only contains the empty path, the root of the whole tree)
theta1 = 0
elif path_no == 0:
# The first path of a group. Since the wedges must be
# aligned with the parent, we have to get this value
# from the parent.
theta1 = angles[path.parent()].theta1
else:
# we continue the wedge where the previous one had stopped
theta1 = theta2
# Now we determine the ending angle based on the fraction of
# the value.
theta2 = theta1 + 360 * path_values[path]/value_sum
angles[path] = Angles(theta1, theta2)
return angles
|
klieret/pyplot-hierarchical-pie
|
hpie/calc.py
|
Python
|
bsd-3-clause
| 5,593
|
# -*- coding: utf-8 -*-
# defining the basic object we will be working with
# adapted from :
# /media/KINGSTON/ARMOR/ARMOR/python/weatherPattern.py ,
# /media/KINGSTON/ARMOR/ARMOR/python/clustering.py ,
# /media/KINGSTON/ARMOR/2013/pythonJan2013/basics.py
# Yau Kwan Kiu, Room 801, 23-1-2013
##############################################################################################
#
#==== imports ================================================================================
# some of the stuff are to be moved to a submodule
import copy
import time
import os
import re
import numpy
import numpy as np
import numpy.ma as ma
#import matplotlib
import matplotlib.pyplot as plt
#import scipy.misc.pilutil as smp
#import numpy.fft as fft
#import shutil
#import sys
import pickle
from copy import deepcopy
try:
from scipy import signal
from scipy import interpolate
except ImportError:
#print "Scipy not installed"
pass
#==== setting up the global parameters========================================================
from defaultParameters import * #bad habits but all these variables are prefixed with "default"
# or at least i try to make them to
import colourbarQPESUMS # the colourbars for the Central Weather Bureau
import colourbarQPESUMSwhiteBackground # the same as above, with white backgrounds
#==== defining the classes ===================================================================
class DBZ(object): #python 2.7 (?) new style class, subclassing object
"""module predecessors: basics.py; weatherPattern.py
NOTE: a DBZ object can be loaded from data or generated in run time (e.g. by translation, or
other operations.) There is flexibility in this. In particular, the object is instantiated before
its values loaded (either from file or from other computations).
attributes (some to be defined at __init__, some afterwards):
DBZ.name - a string, the name of the instance, default = something like "DBZ20120612.0200"
DBZ.matrix - a numpy.ma.core.MaskedArray object
DBZ.datatime - a string like "20120612.0200"
DBZ.dataPath - a string like "../data/dbz20120612.0200.dat"
can be relative (preferred) or absolute
DBZ.outputPath - a string like "../data/dbz20120612.0200.dat"
can be relative (preferred) or absolute
DBZ.inputType - a string to record the type of input file, most common is "txt",
which should be 2-dim arrays in text, separated by " " and "\n",
readable by numpy or matlab
- convention: first row of data = bottom row of pixels
DBZ.image - I am not going to define this yet, since matplotlib.pyplot is pretty fast
DBZ.imagePath - a string like "../data/dbz20120612.0200.png"
can be relative (preferred) or absolute
default = "" (undefined)
DBZ.dt - time interval from the previous image (default=1; how about 10mins = 1/6 hour??)
DBZ.dy - grid size, latitudinal, in km (default =1; how about 0.0125 degree = how many kms?)
DBZ.dx - grid size, longitudinal, in km (same as above)
DBZ.timeStamp - time stamp when the object was created
DBZ.verbose - whether print out a lot of stuff when we work with this object
#################################################################
# DBZ.inputFolder - a string, self evident # <-- not used yet,
# DBZ.outputFolder - ditto # perhaps not here
# DBZ.outputFolderForImages - ditto #
#################################################################
DBZ.database - a string, pointing to the database, somehow, for future
methods:
DBZ.load - load into DBZ.matrix
DBZ.save
DBZ.saveImage
DBZ.printToScreen
use:
>>> from armor import pattern
>>> a = pattern.DBZ(dataTime="20120612.0200",name="", dt=1, dx=1, dy=1, dataPath="", imagePath="")
>>> a.load()
>>> a.printToScreen()
>>> import numpy as np
>>> import armor
>>> import armor.pattern as pattern
>>> dbz=pattern.DBZ
>>> a = dbz('20120612.0300')
DBZ20120612.0300initialised. Use the command '___.load()' to load your data, and '__.printToScreen()' to print it to screen.
>>> b = dbz('20120612.0330')
DBZ20120612.0330initialised. Use the command '___.load()' to load your data, and '__.printToScreen()' to print it to screen.
>>> a.load()
>>> b.load()# -*- coding: utf-8 -*-
>>> c=a-b
DBZ20120612.0300_minus_DBZ20120612.0330initialised. Use the command '___.load()' to load your data, and '__.printToScreen()' to print it to screen.
>>> c.show()
>>> d=a*b
DBZ20120612.0300_times_DBZ20120612.0330initialised. Use the command '___.load()' to load your data, and '__.printToScreen()' to print it to screen.
>>> d.show()
>>>
"""
def __init__(self, dataTime="NoneGiven", matrix=-999, name="", dt=1, dx=1, dy=1,\
dataPath="",outputPath ="",imagePath="",\
cmap='hsv', vmin=-20, vmax=100, coordinateOrigin="default",\
coastDataPath="", relief100DataPath='', relief1000DataPath='',\
relief2000DataPath='', relief3000DataPath='',\
lowerLeftCornerLatitudeLongitude ='',\
upperRightCornerLatitudeLongitude ='',\
database="", verbose=False):
self.timeStamp = str(int(time.time()))
"""
Notes:
1. cmap = colourbar of the dbz plot, need to find out how to plot it with
CWB's colour scheme as specified in the modules colourbarQPESUMS
and colourbarQPESUMSwhiteBackground
2. coordinateOrigin: normally either place at the centre of the picture
or at Taichung Park
(24.145056°N 120.683329°E)
which translates to
(492, 455) in our 881x921 grid
reference:
http://zh.wikipedia.org/wiki/%E8%87%BA%E4%B8%AD%E5%85%AC%E5%9C%92
/media/KINGSTON/ARMOR/2013/python/testing/test104/test104.py
"""
########
#
if name == "":
name = "DBZ" + dataTime
if type(matrix)==type(-999): # if matrix not given,
matrix = ma.zeros((defaultHeight, defaultWidth)) # initialise with zeros
matrix.fill_value = -999 # -999 for missing values always
if isinstance(matrix, ma.MaskedArray):
matrix.fill_value = -999
if isinstance(matrix, np.ndarray) and not isinstance(matrix, ma.MaskedArray):
matrix = matrix.view(ma.MaskedArray)
matrix.mask = None
matrix.fill_value = -999
if dataPath =="":
dataPath = defaultInputFolder + "COMPREF." + dataTime +".dat"
if outputPath =="":
outputPath = defaultOutputFolder + name + '_'+ self.timeStamp + ".dat"
if imagePath =="":
imagePath = defaultOutputFolderForImages + name + '_'+self.timeStamp + ".png"
if coastDataPath == "":
coastDataPath = defaultInputFolder + "taiwanCoast.dat"
if relief100DataPath == "":
relief100DataPath = defaultInputFolder + "relief100.dat"
if relief1000DataPath == "":
relief1000DataPath = defaultInputFolder + "relief1000Extended.dat"
if relief2000DataPath == "":
relief2000DataPath = defaultInputFolder + "relief2000Extended.dat"
if relief3000DataPath == "":
relief3000DataPath = defaultInputFolder + "relief3000Extended.dat"
if lowerLeftCornerLatitudeLongitude =="":
lowerLeftCornerLatitudeLongitude = defaultLowerLeftCornerLatitudeLongitude
if upperRightCornerLatitudeLongitude=="":
upperRightCornerLatitudeLongitude = defaultUpperRightCornerLatitudeLongitude
if database =="":
database = defaultDatabase
###############################################################################
# if matrix shape = (881, 921) then by default the origin at Taichung Park
# (24.145056°N 120.683329°E)
# or (492, 455) in our grid
# else the centre is the origin by default
###############################################################################
if coordinateOrigin == "default": #default
if matrix.shape == (881, 921):
coordinateOrigin = (492, 455)
else:
coordinateOrigin = (matrix.shape[0]//2, matrix.shape[1]//2)
elif coordinateOrigin == "centre" or coordinateOrigin=="center":
coordinateOrigin = (matrix.shape[0]//2, matrix.shape[1]//2)
elif (coordinateOrigin == 'Taichung' or \
coordinateOrigin == 'Taichung Park' or\
coordinateOrigin == 'taichungpark') and matrix.shape==(881,921):
coordinateOrigin = (492,455)
#coordinateOrigin = (0,0) # switch it off - will implement coordinate Origin later
if verbose:
print "------------------------------------------------------------------"
print "armor.pattern.DBZ:\nname, dt, dx, dy, dataPath, imagePath ="
print name, dt, dx, dy, dataPath, imagePath
#
########
self.matrix = matrix
self.dataTime = dataTime
self.name = name
self.dt = dt #retrospective
self.dx = dx #grid size
self.dy = dy
self.outputFolder= defaultOutputFolder
self.dataPath = dataPath
self.outputPath = outputPath
self.imagePath = imagePath
self.coastDataPath = coastDataPath
self.relief100DataPath = relief100DataPath
self.relief1000DataPath = relief1000DataPath
self.relief2000DataPath = relief2000DataPath
self.relief3000DataPath = relief3000DataPath
self.lowerLeftCornerLatitudeLongitude = lowerLeftCornerLatitudeLongitude
self.upperRightCornerLatitudeLongitude = upperRightCornerLatitudeLongitude
self.database = database
self.cmap = cmap
self.vmin = vmin # min and max for makeImage()
self.vmax = vmax
self.coordinateOrigin = coordinateOrigin
self.O = self.coordinateOrigin #alise, no guarentee
self.verbose = verbose
#self.matrix_backups = [] # for storage
#if verbose:
# print(self.name + "initialised. Use the command '___.load()' to load your data, " +\
# "and '__.printToScreen()' to print it to screen.")
#################################################################################
# basic operator overloads
def __call__(self, i=-999, j=-999, display=False):
if i ==-999 and j ==-999:
height, width = self.matrix.shape
h = int(height**.5 /2)
w = int(width**.5 /2)
print self.matrix.filled().astype(int)[height//2-h:height//2+h,\
width//2-w: width//2+w]
return self.matrix.filled().astype(int)
else:
"""
returns interpolated value
"""
arr= self.matrix
i0 = int(i)
j0 = int(j)
i1 = i0 + 1
j1 = j0 + 1
i_frac = i % 1
j_frac = j % 1
f00 = arr[i0,j0]
f01 = arr[i0,j1]
f10 = arr[i1,j0]
f11 = arr[i1,j1]
interpolated_value = (1-i_frac)*(1-j_frac) * f00 + \
(1-i_frac)*( j_frac) * f01 + \
( i_frac)*(1-j_frac) * f10 + \
( i_frac)*( j_frac) * f11
if display:
print i_frac, j_frac, f00, f01, f10, f11
return interpolated_value
def __add__(self, DBZ2):
"""defining the addition of two pattern.DBZ objects
c.f. http://docs.python.org/release/2.5.2/ref/numeric-types.html
can move to CUDA in the future
"""
return DBZ(dataTime=self.dataTime, matrix=self.matrix+DBZ2.matrix,\
name=self.name+"_plus_"+DBZ2.name, \
dt=self.dt, dx=self.dx, dy=self.dy,\
dataPath =self.outputPath+"_plus_"+DBZ2.name+".dat",\
outputPath=self.outputPath+"_plus_"+DBZ2.name+".dat",\
imagePath =self.imagePath +"_plus_"+DBZ2.name+".png",\
database =self.database,\
cmap=self.cmap, verbose=self.verbose)
def __sub__(self, DBZ2):
"""defining the subtraction of two pattern.DBZ objects
c.f. http://docs.python.org/release/2.5.2/ref/numeric-types.html
can move to CUDA in the future
"""
return DBZ(dataTime=self.dataTime, matrix=self.matrix-DBZ2.matrix,\
name=self.name+"_minus_"+DBZ2.name, \
dt=self.dt, dx=self.dx, dy=self.dy,\
dataPath =self.outputPath+"_minus_"+DBZ2.name+".dat",\
outputPath=self.outputPath+"_minus_"+DBZ2.name+".dat",\
imagePath =self.imagePath +"_minus_"+DBZ2.name+".png",\
database =self.database,\
cmap=self.cmap, verbose=self.verbose)
def __mul__(self, M):
""" defining multiplication
c.f. http://docs.python.org/release/2.5.2/ref/numeric-types.html
can move to CUDA in the future
"""
if type(M)==type(1) or type(M)==type(1.1) or type(M)==type(self.matrix) :
matrix = self.matrix * M
name=self.name+"_times_"+ str(M)
if type(M)==type(self):
matrix = self.matrix * M.matrix
name=self.name+"_times_"+ M.name
return DBZ(dataTime=self.dataTime, matrix=matrix,\
dt=self.dt, dx=self.dx, dy=self.dy,\
name =name,
dataPath =self.outputPath+"_times_"+str(M)+".dat",\
outputPath=self.outputPath+"_times_"+str(M)+".dat",\
imagePath =self.imagePath +"_times_"+str(M)+".png",\
database =self.database,\
cmap=self.cmap, verbose=self.verbose)
def __rmul__(self, M):
""" defining multiplication on the right
c.f. http://docs.python.org/release/2.5.2/ref/numeric-types.html
can move to CUDA in the future
"""
if type(M)==type(1) or type(M)==type(1.1) or type(M)==type(self.matrix) :
matrix = self.matrix * M
name=self.name+"_times_"+ str(M)
if type(M)==type(self):
matrix = self.matrix * M.matrix
name=self.name+"_times_"+ M.name
return DBZ(dataTime=self.dataTime, matrix=matrix,\
dt=self.dt, dx=self.dx, dy=self.dy,\
name =name,
dataPath =self.outputPath+"_times_"+str(M)+".dat",\
outputPath=self.outputPath+"_times_"+str(M)+".dat",\
imagePath =self.imagePath +"_times_"+str(M)+".png",\
database =self.database,\
cmap=self.cmap, verbose=self.verbose)
# end basic operator overloads
##################################
############################################################
# basic i/o's
def load(self):
"""
DBZ.load - load into DBZ.matrix
adapted from basics.readToArray(path)
"""
m = np.loadtxt(self.dataPath)
self.matrix = ma.array(m)
# setting the mask
self.matrix.fill_value = -999 # -999 for missing values
# self.matrix.fill_value = -20.1 # -20 for missing values
self.matrix.mask = (m < -20) # smaller than -20 considered no echo
# 1 March 2013
##
# THE FOLLOWING IS SKIPPED TO SAVE MEMORY
# loading coastal data
#try:
# self.coastData = np.loadtxt(self.coastDataPath)
#except:
# print "Cannot load coast data from the path: ", self.coastDataPath
def loadCoast(self):
self.coastData = np.loadtxt(self.coastDataPath)
def load100(self):
self.coastData = np.loadtxt(self.relief100DataPath)
def load1000(self):
self.coastData = np.loadtxt(self.relief1000DataPath)
def load2000(self):
self.coastData = np.loadtxt(self.relief2000DataPath)
def load3000(self):
self.coastData = np.loadtxt(self.relief3000DataPath)
def toArray(self):
"""convert return a normal array filled with -999 for missing values for other uses
"""
return ma.filled(self.matrix)
def save(self):
"""
* We convert the masked array into a standard array with masked data filled by -999
* adapted from basics.writeArrayToTxtFile(arr, path, as_integer=False):
if as_integer:
np.savetxt(path, arr, fmt='%.0f') # 0 decimal place
else:
np.savetxt(path, arr, fmt='%.2f') # two decimal places as default
"""
np.savetxt(self.outputPath, self.toArray())
def saveMatrix(self):
""" alias for self.save()
"""
self.save()
def makeImage(self, matrix="", vmin=99999, vmax=-99999, cmap="", title="",\
showColourbar=True, closeAll=True):
"""
requires: matplotlib
to make the plot before you save/print it to screen
*adapted from basics.printToScreen(m,cmap='gray'):
which was in turn adapted from stackoverflow:
http://stackoverflow.com/questions/7875688/how-can-i-create-a-standard-colorbar-for-a-series-of-plots-in-python
def printToScreen(m,cmap='gray'):
fig, axes = plt.subplots(nrows=1, ncols=1)
# The vmin and vmax arguments specify the color limits
im = axes.imshow(m, vmin=-20, vmax=100, cmap=cmap)
cax = fig.add_axes([0.9, 0.1, 0.03, 0.8])
fig.colorbar(im, cax=cax)
plt.show()
!!! TO DO: FIX THE AXES !!!
"""
if isinstance(matrix, str):
matrix = self.matrix
if title =="":
title = self.name
if cmap == "":
cmap = self.cmap
if vmin == 99999:
vmin = self.vmin
if vmax == -99999:
vmax = self.vmax
# clear the canvass
if closeAll:
#plt.clf()
plt.close()
# make the image
fig, axes = plt.subplots(nrows=1, ncols=1)
im = axes.imshow(matrix, # or np.flipud(self.matrix)?
vmin=vmin, vmax=vmax, cmap=cmap) # The vmin and vmax arguments
# specify the color limits
plt.title(title)
if showColourbar :
cax = fig.add_axes([0.9, 0.1, 0.01, 0.8])
fig.colorbar(im,cax=cax)
#plt.show() # wait, don't show!
def saveImage(self):
self.makeImage()
plt.savefig(self.imagePath, dpi=200)
def printToScreen(self, matrix="", cmap=""):
self.makeImage(matrix=matrix, cmap=cmap)
plt.show()
def show(self, matrix="", cmap=""):
"""alias to printToScreen()
"""
self.printToScreen(matrix=matrix, cmap=cmap)
def showWithFlip(self, cmap=""):
"""flip it upside down and show it
"""
self.matrix = np.flipud(self.matrix)
self.printToScreen(cmap=cmap)
def showWithCoast(self, matrix="", cmap='', intensity=9999):
if matrix=="":
matrix=self.matrix
try:
if self.showingWithCoast: # if already showing coast: do nothing
self.show(matrix=matrix)
return None # just show and go
except AttributeError: # if it didn't happen before: default = False
self.showingWithCoast = False # just do something
self.showingWithCoast = True
self.matrix_backup = self.matrix.copy()
if cmap != '':
self.cmap_backup = self.cmap
self.cmap = cmap
else:
self.cmap_backup = self.cmap
try:
if self.coastData == "" : print "haha" #test for existence
except AttributeError:
self.loadCoast()
print "\n... coast data loaded from ", self.coastDataPath, "for ", self.name
for v in self.coastData:
self.matrix[v[0], v[1]] += intensity
self.show(matrix=matrix)
def show2(self, cmap='', intensity=99999):
""" adding the coastline and then flip it
"""
try:
if self.showingWithCoast: # if already showing coast: do nothing
self.show()
return None # just show and go
except AttributeError: # if it didn't happen before: default = False
self.showingWithCoast = False # just do something
self.showingWithCoast = True
self.matrix_backup = self.matrix.copy()
if cmap != '':
self.cmap_backup = self.cmap.copy()
self.cmap = cmap
else:
self.cmap_backup = self.cmap
try:
if self.coastData == "" : print "haha" #test for existence
except AttributeError:
self.loadCoast()
print "\n... coast data loaded from ", self.coastDataPath, "for ", self.name
for v in self.coastData:
self.matrix[v[0], v[1]] = intensity
self.matrix = np.flipud(self.matrix)
self.printToScreen(cmap=cmap)
def showWithoutCoast(self):
"""resetting
"""
self.showingWithCoast = False
self.cmap = self.cmap_backup
self.matrix = self.matrix_backup
self.show()
def show3(self):
"""alias
"""
self.showWithoutCoast()
def showInverted(self):
self.matrix = np.flipud(self.matrix)
self.printToScreen()
self.matrix = np.flipud(self.matrix)
def show0(self):
"""alias
"""
self.showInverted()
def show4(self):
"""alias
"""
self.showInverted()
def backupMatrix(self, name=""):
"""backing up self.matrix for analysis
paired with self.restoreMatrix()
"""
try:
self.backupCount += 1
if name =="":
name = self.backupCount
self.matrix_backups[name] = self.matrix.copy()
except AttributeError:
self.backupCount = 0
self.matrix_backups = {}
if name =="":
name = self.backupCount
self.matrix_backups[name] = self.matrix.copy()
def restoreMatrix(self, name =""):
"""see self.backupMatrix() for comments
"""
if name =="":
name = self.backupCount
self.matrix = self.matrix_backups[name].copy()
# end basic i/o's
############################################################
#############################################################
# new objects from old
def copy(self):
"""returning a copy of itself
9 March 2013
"""
return DBZ(dataTime =self.dataTime,
matrix =self.matrix.copy(),
name =self.name,
dt =self.dt,
dx =self.dx,
dy =self.dy,
dataPath =self.dataPath,
outputPath=self.outputPath,
imagePath=self.imagePath,
coastDataPath=self.coastDataPath,
database =self.database,
cmap =self.cmap,
vmin =self.vmin,
vmax =self.vmax,
coordinateOrigin= self.coordinateOrigin,
lowerLeftCornerLatitudeLongitude = self.lowerLeftCornerLatitudeLongitude,
upperRightCornerLatitudeLongitude =self.upperRightCornerLatitudeLongitude,
verbose =self.verbose)
def drawCross(self, i="", j="", radius=5, intensity=9999):
"""to draw a cross (+) at the marked point
"""
if i=="" or j=="":
i=self.coordinateOrigin[0]
j=self.coordinateOrigin[1]
matrix=self.matrix.copy()
matrix[i-radius:i+radius+1, j ] = intensity
matrix[i , j-radius:j+radius+1] = intensity
return DBZ(dataTime =self.dataTime,
matrix = matrix,
name =self.name + \
", cross at x,y=(%d,%d), radius=%d" %\
(j, i, radius),
dt =self.dt,
dx =self.dx,
dy =self.dy,
dataPath =self.dataPath,
outputPath=self.outputPath,
imagePath=self.imagePath,
coastDataPath=self.coastDataPath,
database =self.database,
cmap =self.cmap,
vmin =self.vmin,
vmax =self.vmax,
coordinateOrigin= self.coordinateOrigin,
lowerLeftCornerLatitudeLongitude = self.lowerLeftCornerLatitudeLongitude,
upperRightCornerLatitudeLongitude =self.upperRightCornerLatitudeLongitude,
verbose =self.verbose)
def drawCoast(self, intensity=9999, newCopy=False):
"""
adapted from DBZ.show2()
"""
if newCopy:
a = self.copy() # no need for this i guess!!!
else:
a = self
try:
if a.coastData == "" : print "haha" #test for existence
except AttributeError:
a.loadCoast()
print "\n... coast data loaded from ", a.coastDataPath, "for ", a.name
for v in a.coastData:
a.matrix[v[0], v[1]] = intensity
return a
def recentreTaichungPark(self):
"""
2013-08-27
use:
a = pattern.a
a.showTaichungPark()
takes as input
attributes:
lowerLeftCornerLatitudeLongitude
upperRightCornerLatitudeLongitude
constants:
taichung park coordinates (24.145056°N 120.683329°E)
changes:
self.coordinateOrigin
self.O
returns:
grid square for taichung park
"""
#global taichungParkLatitude, taichungParkLongitude
height, width = self.matrix.shape
i0 = taichungParkLatitude #defined in defaultParameters.py
j0 = taichungParkLongitude
# the above two lines dont work, here's a hack fix
#import defaultParameters
#j0 = defaultParameters.taichungParkLongitude
#i0 = defaultParameters.taichungParkLatitude
i1, j1 = self.lowerLeftCornerLatitudeLongitude
i2, j2 = self.upperRightCornerLatitudeLongitude
i3 = 1.*(i0-i1)*height/(i2-i1) # (latitudeTCP-latLowerleft) * grid per latitude
j3 = 1.*(j0-j1)*width/(j2-j1) # ditto for longitude
self.coordinateOrigin = (i3,j3)
self.O = (i3,j3)
return i3, j3
def recentre(self):
"""alias for recentreTaichungPark(self)
"""
return self.recentreTaichungPark()
def recenter(self):
"""alias for recentreTaichungPark(self)
"""
return self.recentreTaichungPark()
def drawRectangle(self, bottom=0, left=0, height=100, width=100, intensity=9999):
""" return a copy with a rectangle on the image
"""
vmax = self.vmax
matrix = self.matrix.copy()
for i in range(bottom, bottom+height):
matrix[i , left:left+2] = intensity
matrix[i , left+width] = intensity
for j in range(left, left+width):
matrix[bottom:bottom+2, j] = intensity
matrix[bottom+height, j] = intensity
return DBZ(dataTime =self.dataTime,
matrix = matrix,
name =self.name + \
", rectangle at x,y=(%d,%d), width=%d, height=%d" %\
(left, bottom, width, height),
dt =self.dt,
dx =self.dx,
dy =self.dy,
dataPath =self.dataPath,
outputPath=self.outputPath,
imagePath=self.imagePath,
coastDataPath=self.coastDataPath,
database =self.database,
cmap =self.cmap,
vmin =self.vmin,
vmax =self.vmax,
coordinateOrigin= self.coordinateOrigin,
verbose =self.verbose)
def getWindow(self, bottom=0, left=0, height=100, width=100):
"""return a dbz object, a window view of itself
"""
name = self.name +'_windowed' + '_bottom' + str(bottom) +\
'_left' + str(left) + '_height' + str(height) + '_width' + str(width)
matrix = self.matrix.copy()
matrix = matrix[bottom:bottom+height, left:left+width]
return DBZ(dataTime =self.dataTime,
matrix = matrix,
name = name,
dt =self.dt,
dx =self.dx,
dy =self.dy,
dataPath =self.dataPath,
outputPath=self.outputPath,
imagePath=self.imagePath,
coastDataPath=self.coastDataPath,
database =self.database,
cmap =self.cmap,
vmin =self.vmin,
vmax =self.vmax,
coordinateOrigin = (height//2, width//2) , #hack
verbose =self.verbose)
def shiftMatrix(self,i,j):
"""shifting the array/dbz pattern; masking the edge
codes migrated from shiiba.py (now armor.shiiba.regression) to here
i = shift in axis-0 = going up
j = shift in axis-1 = going right
"""
#1. copy the matrix
matrix = self.matrix.copy()
#2. shift the matrix
matrix = np.roll(matrix, i,axis=0)
matrix = np.roll(matrix, j,axis=1)
#3. mask the edges
if i>0: # up
matrix.mask[ :i, : ] = 1 #mask the first (=bottom) i rows
if i<0: # down
matrix.mask[i: , : ] = 1 #mask the last (=top) rows; i<0
if j>0: # right
matrix.mask[ : , :j] = 1 #mask the first (=left) columns
if j<0: # left
matrix.mask[ : ,j: ] = 1 #mask the last (=right) columns
#4. return an armor.pattern.DBZ object
self_shifted_by_ij =DBZ(dataTime=self.dataTime, matrix=matrix,\
name=self.name+"shifted"+str((i,j)),\
dt=self.dt, dx=self.dx, dy=self.dy, \
dataPath =self.outputPath+"shifted"+str((i,j))+".dat",\
outputPath =self.outputPath+"shifted"+str((i,j))+".dat",\
imagePath =self.imagePath +"shifted"+str((i,j))+".png",\
database =self.database,\
cmap=self.cmap,
coordinateOrigin = (self.coordinateOrigin,\
self.coordinateOrigin),
verbose=self.verbose)
return self_shifted_by_ij
def shift(self, i, j):
"""alias for shiftMatrix()
"""
return self.shiftMatrix(i,j)
def smooth(self, ker=""):
"""
################################
# smoothing the image by convolution with a kernal
# uses SciPY
# return : a DBZ object, smoothed
# 8 March 2013
#################################
"""
if ker=="":
ker = 1./237. * np.array( [[1, 4, 7, 4, 1], # default kernel
[4,16,26,16, 4],
[7,26,41,26, 7],
[4,16,26,16, 4],
[1, 4, 7, 4, 1]])
phi0 = self.matrix.copy()
phi0.fill_value = -999999999
phi0 = signal.convolve(phi0.filled(),ker)
phi0 = ma.array(phi0, fill_value=-999, mask=(phi0<-80))
# cutting it down to size (881,921)
return DBZ(name=self.name+'smoothed', matrix =phi0[2:-2, 2:-2],
dt=self.dt, dx=self.dx, dy=self.dy,
dataPath =self.dataPath +'smoothed.dat',
outputPath=self.outputPath+'smoothed.dat',
imagePath =self.imagePath +'smoothed.dat',
coastDataPath=self.coastDataPath,
database=self.database,
cmap=self.cmap, vmin=self.vmin, vmax=self.vmax,
coordinateOrigin = self.coordinateOrigin,
verbose=self.verbose)
def coarser(self, scale=2):
"""
################################
# returning a coarser image by averaging 4 nearby points
#
# return : a DBZ object
# parameter "scale" not used yet
# 8 March 2013
# parameter "scale" implementation started on 12 march 2013
#################################
"""
phi = self.matrix.copy()
# trim if dimensions not even
height, width = phi.shape
horizontal = width//scale
vertical = height//scale
phi = phi[0:vertical*scale, 0:horizontal*scale] # trimming
# getting the shifted copies
# 0 1
# 2 3
phi.fill_value = -999999999
phiList = [] #work to be continued here (parameter "scale" implementation)
phi0 = phi[ ::2, ::2].flatten()
phi1 = phi[ ::2,1::2].flatten()
phi2 = phi[1::2, ::2].flatten()
phi3 = phi[1::2,1::2].flatten()
phi_mean= ma.vstack([phi0, phi1, phi2, phi3])
phi_mean= ma.mean(phi_mean, axis=0)
phi_mean= phi_mean.reshape(vertical, horizontal)
# cutting it down to size (881,921)
return DBZ(name=self.name+'coarser', matrix =phi_mean,
dt=self.dt, dx=self.dx, dy=self.dy,
dataPath =self.dataPath +'coarser.dat',
outputPath=self.outputPath+'coarser.dat',
imagePath =self.imagePath +'coarser.dat',
coastDataPath=self.coastDataPath,
database=self.database,
cmap=self.cmap, vmin=self.vmin, vmax=self.vmax,
coordinateOrigin = (self.coordinateOrigin[0] //scale,\
self.coordinateOrigin[1] //scale ) ,
verbose=self.verbose)
def coarser2(self):
""" like coarser() but returning a matrix of the same size, not smaller
do it later when i have time
algorithm:
to multiply self.matrix with a "diagonal" of matrix [[.5, .5],[.5,.5]]
on both left and right.
"""
height, width = self.matrix.shape
pass
def getPrediction(self, C):
"""wrapping armor.shiiba.regression2.getPrediction
"""
from armor.shiiba import regression2
return regression2.getPrediction(C, self)
def predict(self, *args, **kwargs):
"""wrapping self.getPrediction for the moment
"""
return self.getPrediction(*args, **kwargs)
def advect(self, *args, **kwargs):
"""wrapping advection.semiLagrangian.interploate2 for the moment
"""
from armor.advection import semiLagrangian as sl
return sl.interpolate2(self, *args, **kwargs)
def flipud(self):
"""wrapping the function np.flipud
"""
a_flipud = self.copy()
a_flipud.matrix = np.flipud(a_flipud.matrix)
return a_flipud
def fliplr(self):
a_fliplr = self.copy()
a_fliplr.matrix = np.fliplr(a_fliplr.matrix)
return a_fliplr
def threshold(self, threshold=0):
"""getting a threshold image of itself with mask
"""
matrix= self.matrix.copy()
name = self.name + " thresholded at " + str(threshold)
oldMask = matrix.mask.copy()
matrix.mask += (matrix < threshold)
a_thres = DBZ(dataTime =self.dataTime,
matrix =matrix,
name =name,
dt =self.dt,
dx =self.dx,
dy =self.dy,
dataPath =self.dataPath,
outputPath=self.outputPath + "_thresholded_" + str(threshold),
imagePath=self.imagePath + "_thresholded_" + str(threshold),
coastDataPath=self.coastDataPath,
database =self.database,
cmap =self.cmap,
vmin =self.vmin,
vmax =self.vmax,
coordinateOrigin= self.coordinateOrigin,
verbose =self.verbose)
a_thres.oldMask = oldMask
return a_thres
# end new objects from old
#############################################################
############################################################
# functions on object
def cov(self, dbz2):
"""wrapping the ma.cov function: covariance between two images
"""
phi0 = self.matrix.flatten()
phi1 = dbz2.matrix.flatten()
cov = ma.cov(phi0, phi1)
return cov
def corr(self, dbz2):
"""wrappig the ma.corrcoef function: correlation between two images
"""
phi0 = self.matrix.flatten()
phi1 = dbz2.matrix.flatten()
corr = ma.corrcoef(phi0, phi1)
if (not isinstance(corr, float)) and (not isinstance(corr,int)):
corr = corr[0,1] # return a number
return corr
def localCov(self, dbz2, windowSize=7):
"""plotting the local covariance of two dbz patterns
a slow version of the function
>>> test.tic() ; x=a.localCov(b) ; test.toc()
*************************
time spent: 4091.93978906
>>> x
>>> xm=x.matrix
>>> xm.min()
-1.0000000000000002
>>> xm.max()
1.0000000000000002
>>> xm.mean()
0.21721107449067339
>>> x.name = 'local correlation: dbz20120612.0200 - 0210'
>>> x.outputPath='testing/test112/localCorrelationMatrix.dat'
>>> x.save()
>>> x.matrix=np.flipud(x.matrix)
>>> x.imagePath='testing/test112/localCorrelationMatrix.png'
>>> x.saveImage()
>>>
"""
height, width = self.matrix.shape
E = (windowSize-1)/2 #shorthand
# initialise
localcovar = ma.zeros((height,width))
localcovar.mask = True
for i in range(height):
for j in range(width):
window1 = self.matrix[max(0,i-E):min(i+E+1, height),max(0,j-E):min(j+E+1,width)]
window2 = dbz2.matrix[max(0,i-E):min(i+E+1, height),max(0,j-E):min(j+E+1,width)]
localcovar[i,j] = ma.corrcoef(window1.flatten(), window2.flatten())[0,1]
return localcovar
def shiiba(self,b, *args, **kwargs):
"""wrapping armor.analysis.shiiba
"""
from armor import analysis
self.shiibaResult = analysis.shiiba(self, b, *args, **kwargs)
return self.shiibaResult
def shiibaLocal(self, b, *args, **kwargs):
"""wrapping armor.analyais.shiibaLocal
"""
from armor import analysis
self.shiibaLocalResult = analysis.shiibaLocal(self,b, *args, **kwargs)
self.shiibaLocalResult
def shiibaFree(self,b, *args, **kwargs):
"""wrapping armor.shiiba.regressionCFLfree
"""
from armor.shiiba import regressionCFLfree as cflfree
self.shiibaFreeResult = cflfree.regressGlobal(self,b, *args, **kwargs)
return self.shiibaFreeResult
def getVect(self, C):
"""wrapping armor.shiiba.regression2.convert
"""
from armor.shiiba import regression2
return regression2.convert(C, self)
def getKmeans(self, *args, **kwargs):
"""wrapping armor.kmeans.clustering.getKmeans()
8 April 2013
"""
import armor.kmeans.clustering as clust
x = clust.getKmeans(self, *args, **kwargs)
return x
def invariantMoments(self,**kwargs):
"""wrappng armor.geometry.moments.HuMoments
normalise with respect to the degree
"""
from armor.geometry import moments
x = moments.HuMoments(self.matrix, **kwargs)
x[0] = np.sign(x[0])*abs(x[0])**(.5)
x[1] = np.sign(x[1])*abs(x[1])**(.25)
x[2] = np.sign(x[2])*abs(x[2])**(1./6)
x[3] = np.sign(x[3])*abs(x[3])**(1./6)
x[4] = np.sign(x[4])*abs(x[4])**(1./12)
x[5] = np.sign(x[5])*abs(x[5])**(1./8)
x[6] = np.sign(x[6])*abs(x[6])**(1./12)
self.invMom = x
return x
def spline(self):
"""
wrapping the scipy interpolate module
http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RectBivariateSpline.html#scipy.interpolate.RectBivariateSpline
"""
height, width = self.matrix.shape
return interpolate.RectBivariateSpline(range(height), range(width), self.matrix)
# end function on object
############################################################
############################################################
# functions altering (attributes) of object
def findEdges(self, threshold=-9999):
from armor.geometry import edges
m = a.matrix.copy()
if threshold !=-9999:
m.mask += (m<threshold)
m_edges = edges.find(DBZ(matrix=m))
else:
m_edges = edges.find(DBZ(matrix=m))
self.edges = m_edges
return m_edges
# end functions altering (attributes) of object
############################################################
#####################################################
class VectorField(object):
"""wraps two masked arrays sharing the same mask (how can i make them share a mask?)
example:
>>> from armor import pattern
>>> a = pattern.DBZ(dataTime="20120612.0200")
>>> a.load()
>>> a.show()
>>> b = pattern.VectorField(a.matrix, -a.matrix)
>>> b.plot()
>>> b.show()
"""
def __init__(self, U, V, mask=False, name='vectorfield', dataPath="", outputPath="", imagePath="", \
key='vector field', title='title', gridSize=25):
""" U = first = i-component; V=second=j-component
"""
U = U.view(ma.MaskedArray)
V = V.view(ma.MaskedArray)
mask = U.mask + V.mask + mask
U.mask = mask.copy()
V.mask = mask.copy()
self.U = U
self.V = V
self.mask=mask
#################################################
# i don't know how to make this work; comment out
#if not isinstance(mask, type(False)): # if mask explicitly given, initialise with it
# self.U.mask = mask
# self.V.mask = mask
#################################################
self.name = name
self.dataPath = dataPath
self.outputPath = outputPath # for the future
self.imagePath = imagePath
self.key = key
self.title = title
self.gridSize= gridSize
def __sub__(self, vect2):
"""defining the subtraction of two vector fields
"""
if isinstance(vect2, tuple) or isinstance(vect2,list):
name = self.name + "_minus_" + str(vect2)
U = self.U - vect2[0] # we use (x,y) for external interface, not i,j
V = self.V - vect2[1]
mask = self.mask.copy()
key = self.key + " minus " + str(vect2)
title = self.title+" minus " + str(vect2)
gridSize = self.gridSize
else:
name = self.name + "_minus_" + vect2.name
U = self.U - vect2.U
V = self.V - vect2.V
mask = self.mask + vect2.mask.copy()
key = self.key + " minus " + vect2.key
title = self.title+" minus " + vect2.title
gridSize = min(self.gridSize, vect2.gridSize)
outputPath = self.outputPath + name + ".dat"
dataPath = outputPath
imagePath = self.imagePath + name + ".png"
return VectorField(U, V, mask=mask, name=name, dataPath=dataPath, outputPath=outputPath,\
imagePath=imagePath, key=key, title=title, gridSize=gridSize)
def __add__(self, vect2):
"""defining the addition of two vector fields
"""
if isinstance(vect2, tuple) or isinstance(vect2,list):
name = self.name + "_plus_" + str(vect2)
U = self.U + vect2[0] # we use (x,y) for external interface, not i,j
V = self.V + vect2[1]
mask = self.mask.copy()
key = self.key + " plus " + str(vect2)
title = self.title+" plus " + str(vect2)
gridSize = self.gridSize
else:
name = self.name + "_plus_" + vect2.name
U = self.U + vect2.U
V = self.V + vect2.V
mask = self.mask + vect2.mask.copy()
key = self.key + " plus " + vect2.key
title = self.title+" plus " + vect2.title
gridSize = min(self.gridSize, vect2.gridSize)
outputPath = self.outputPath + name + ".dat"
dataPath = outputPath
imagePath = self.imagePath + name + ".png"
return VectorField(U, V, mask=mask, name=name, dataPath=dataPath, outputPath=outputPath,\
imagePath=imagePath, key=key, title=title, gridSize=gridSize)
def __mul__(self, s):
"""scalar for now, will extend later
"""
if isinstance(s, tuple) or isinstance(s,list):
U = self.U * s[0]
V = self.V * s[1]
else:
U = self.U * s
V = self.V * s
mask=self.mask.copy()
name=self.name + "__times__" + str(s)
dataPath=''
outputPath=self.outputPath + "__times__" + str(s)
imagePath =self.imagePath + "__times__" + str(s)
key=self.key + "__times__" + str(s)
title=self.title + "__times__" + str(s)
gridSize = self.gridSize
return VectorField(U=U, V=V, mask=mask, name=name, dataPath=dataPath, \
outputPath=outputPath, imagePath=imagePath, \
key=key, title=title, gridSize=gridSize)
def plot(self, key="", title="", gridSize=0, X=-1, Y=-1, closeAll=True, lowerLeftKey=False):
"""
make the plot without showing it
adapted from
basics.plotVectorField(U, V, X=-1, Y=-1, gridSize=25, key="vector field",\
title="title", saveFileName="", outputToScreen=False):
"""
# clear the canvass
#plt.clf()
if closeAll:
plt.close()
U = self.U.copy()
V = self.V.copy()
if key =="":
key = self.key
if title =="":
title = self.title
if gridSize == 0:
gridSize = self.gridSize
width = U.shape[1]
height = U.shape[0]
if type(X)==type(-1) or type(Y)==type(-1):
X, Y = np.meshgrid(np.arange(0,width), np.arange(0,height))
left = X[ 0, 0]
bottom = Y[ 0, 0]
#computing the length of the vector field at centre for reference
r_centre = (U[height//2, width//2]**2 + V[height//2, width//2]**2) **(0.5)
print "==computing the length of the vector field at centre for reference:==\nr_centre=",\
"r_centre"
if lowerLeftKey:
# making a grid of standardardised vector in the lower-left corner
# for scale reference
U[1:gridSize+1, 1:gridSize+1] = 1
V[1:gridSize+1, 1:gridSize+1] = 0
Q = plt.quiver( X[::gridSize, ::gridSize], Y[::gridSize, ::gridSize],\
U[::gridSize, ::gridSize], V[::gridSize, ::gridSize],\
color='r', units='x', linewidths=(2,), edgecolors=('k'),\
headaxislength=5 )
qk = plt.quiverkey(Q, 0.7, 0.0, 1, 'length='+str(round(r_centre,5))+' at centre',\
fontproperties={'weight': 'bold'})
if lowerLeftKey:
qk = plt.quiverkey(Q, 0.3, 0.0, 1,\
key+',\nlength of the standard arrow in the lower-left corner=1',\
fontproperties={'weight': 'bold'})
plt.axis([left, left+width-1, bottom, bottom+height-1])
plt.title(title)
def showPlot(self,**kwargs):
self.plot(**kwargs)
plt.show()
def show(self,**kwargs): #alias
self.showPlot(**kwargs)
def savePlot(self):
self.plot()
if self.imagePath =="":
self.imagePath = raw_input("Please enter imagePath:")
plt.savefig(self.imagePath, dpi=200)
def saveImage(self):
"""alias for savePlot
"""
self.savePlot()
def toArray(self):
"""return normal arrays filled with -999 for missing values for other uses
"""
return ma.filled(self.U), ma.filled(self.V)
def saveMatrix(self):
"""
* We convert and save the masked arrays into standard arrays with masked data filled by -999
"""
U, V = self.toArray()
np.savetxt(self.outputPath+"U.dat", U, '%.4f')
np.savetxt(self.outputPath+"V.dat", V, '%.4f')
def pickle(self):
pickle.dump(self)
#####################################################
# functions from vector fields to values
def corr(self, vect2, region1="", region2=""):
"""adapted from DBZ.corr():
"""
height, width = self.U.shape
if region1=="":
region1 = (0, 0, height, width)
if region2=="":
region2 = region1
u1 = self.U[region1[0]:region1[0]+region1[2], \
region1[1]:region1[1]+region1[3]].flatten()
u2 = vect2.U[region2[0]:region2[0]+region2[2], \
region2[1]:region2[1]+region2[3]].flatten()
ucorr = ma.corrcoef(u1, u2)
v1 = self.V[region1[0]:region1[0]+region1[2], \
region1[1]:region1[1]+region1[3]].flatten()
v2 = vect2.V[region2[0]:region2[0]+region2[2], \
region2[1]:region2[1]+region2[3]].flatten()
vcorr = ma.corrcoef(v1, v2)
return {'ucorr': ucorr, 'vcorr': vcorr}
##############################################################################
# streams of DBZ objects, with basic operations, comparisons, etc
class DBZstream:
"""
a stream of DBZ objects, with basic i/o facilities
migrating some codes from armor.basicio.dataStream
WE DO ASSUME THAT there are no two sets of data with the same dataTime
or else we would need some extra logic to check for redundancies.
"""
###########################################################
#
# basic construction
def __init__(self, dataFolder='../data_temp/', name="COMPREF.DBZ",
lowerLeftCornerLatitudeLongitude=defaultLowerLeftCornerLatitudeLongitude,
upperRightCornerLatitudeLongitude=defaultUpperRightCornerLatitudeLongitude,
outputFolder="",
imageFolder="",
preload=False):
"""
construct the objects without loading them
input: path of folder "/../../"
process: parse the folder for files
output: sequence of armor.pattern.DBZ objects
DBZ(name, dataPath, dataTime)
# parse the filename and look for clues
"""
if outputFolder =="":
outputFolder = defaultOutputFolder
if imageFolder =="":
imageFolder = defaultImageFolder
self.dataFolder = dataFolder
self.lowerLeftCornerLatitudeLongitude = lowerLeftCornerLatitudeLongitude
self.upperRightCornerLatitudeLongitude = upperRightCornerLatitudeLongitude
self.outputFolder = outputFolder
self.imageFolder = imageFolder
dbzList = []
dataFolder = re.sub(r'\\', '/' , dataFolder) # standardise: g:\\ARMOR .. --> g:/ARMOR
dataSource = '-'.join(dataFolder.split('/')[-2:]) + '-'
if name != "":
self.name = name
else:
self.name = dataSource
L = os.listdir(dataFolder)
L = [v for v in L if v.lower().endswith('.txt') or v.lower().endswith('.dat')] # fetch the data files
for fileName in L:
dataTime = re.findall(r'\d{4}', fileName)
if len(dataTime)<3: # NOT DATED DBZ FILE, REJECT
continue
dataTime = dataTime[0] + dataTime[1] + '.' + dataTime[2]
name = dataSource + fileName
dataPath = dataFolder + fileName
a = DBZ(dataTime=dataTime,
name=name,
dataPath=dataPath,
lowerLeftCornerLatitudeLongitude=lowerLeftCornerLatitudeLongitude,
upperRightCornerLatitudeLongitude=upperRightCornerLatitudeLongitude,
)
if preload:
a.load()
dbzList.append(a)
## there you go! ######
#
self.list = dbzList
#
#######################
def __call__(self, N=-999):
"""
if N is an integer then return the N-th DBZ pattern in the stream
else if N is a string then return those whose names or dataTimes contains N
"""
if N == -999:
return self.list
elif isinstance(N, int):
return self.list[N]
elif isinstance(N, str):
return [v for v in self.list if N in v.name or N in v.dataTime]
def __getitem__(self, N=-999):
"""alias for self.list[] """
return self.list[N]
def __len__(self, dataTime=""):
return len([v for v in self.list if dataTime in v.dataTime])
###########################################################
#
# stream operations
def append(self, filePath):
"""
to append a new member to the DBZstream list
"""
pass
def regrid(self, b):
"""
wrapping armor.geometry.regrid.regrid()
b is another DBZ object representing the grid pattern to be transformed to
"""
from armor.geometry import regrid
for i in range(len(self.list)):
self.list[i] = regrid.regrid(self.list[i], b)
def cutUnloaded(self):
"""
cut the unloaded objects
"""
i=0
while i < len(self.list):
dbzObject = self.list[i]
if (dbzObject.matrix**2).sum()==0:
del(self.list[i])
else:
i+=1
return i # length of the stream in the end
###########################################################
#
# basic I/O
def load(self, N=-999, name="", verbose=False):
"""
N - index of object to be loaded, if N==-999 : load all
if N is a string, look through the list of dbz objects
and load those whose dataTime string contain N
and whose name contains name
"""
if N==-999:
for img in self.list:
img.load()
elif isinstance(N, int):
self.list[N].load()
elif isinstance(N, str):
for img in self.list:
if N in img.dataTime or N in img.name:
img.load()
if verbose:
print img.name, '|',
def setImageFolder(self, folder):
for dbzPattern in self.list:
dbzPattern.imageFolder = folder
#dbzPattern.imagePath = folder + dbzPattern.name + '_'+dbzPattern.dataTime + ".png"
dbzPattern.imagePath = folder + dbzPattern.dataTime + ".png"
def setOutputFolder(self, folder):
for dbzPattern in self.list:
dbzPattern.outputFolder = folder
#dbzPattern.outputPath = folder + dbzPattern.name + '_'+dbzPattern.dataTime + ".dat"
dbzPattern.outputPath = folder + dbzPattern.dataTime + ".dat"
###########################################################
#
# functions on streams, comparisons, etc
def countLoaded(self):
"""
return the number of loaded DBZ objects in the stream
essentially computing those with matrix!=0
"""
return len([v for v in self if (v.matrix**2).sum()!=0])
def corr(self, ds2, verbose=False):
"""
returns a list of correlation of the streams
[(dataTime <str>, corr <float>),...]
"""
ds1 = self # alias
# 1. get the list of common dataTimes
dataTimeList1 = [v.dataTime for v in ds1.list]
dataTimeList2 = [v.dataTime for v in ds2.list]
dataTimeList = sorted(list(set(dataTimeList1).intersection(set(dataTimeList2))))
if verbose:
print dataTimeList
# 2. compute the correlations with the built in DBZ.corr() method
L = []
for T in dataTimeList:
a = ds1(T)[0]
b = ds2(T)[0]
L.append((T, a.corr(b)))
return L
########################
# demo
a = DBZ('20120612.0200')
b = DBZ('20120612.0210')
ds1 = DBZstream()
"""
exit()
python
from armor import pattern
"""
try:
print externalHardDriveRoot
ds2 = DBZstream(dataFolder='%sdata/SOULIK/wrf_shue/' %externalHardDriveRoot,
lowerLeftCornerLatitudeLongitude=(17.7094,113.3272),
upperRightCornerLatitudeLongitude=(28.62909, 127.6353),
)
except:
print 'EXTERNAL HARD DRIVE %sdata/SOULIK/wrf_shue/' %externalHardDriveRoot, "NOT FOUND"
try:
ds2 = DBZstream(dataFolder='%sdata/SOULIK/wrf_shue/' %hardDriveRoot,
lowerLeftCornerLatitudeLongitude=(17.7094,113.3272),
upperRightCornerLatitudeLongitude=(28.62909, 127.6353),
)
print 'HARD DRIVE %sdata/SOULIK/wrf_shue/' %hardDriveRoot, "\nFOUND!!"
except:
print 'HARD DRIVE %sdata/SOULIK/wrf_shue/' %hardDriveRoot, "NOT FOUND"
try:
print externalHardDriveRoot2
ds2 = DBZstream(dataFolder='%sdata/SOULIK/wrf_shue/' %externalHardDriveRoot2,
lowerLeftCornerLatitudeLongitude=(17.7094,113.3272),
upperRightCornerLatitudeLongitude=(28.62909, 127.6353),
)
print 'EXTERNAL HARD DRIVE %sdata/SOULIK/wrf_shue/' %externalHardDriveRoot2, "\nFOUND!!"
except:
print 'EXTERNAL HARD DRIVE %sdata/SOULIK/wrf_shue/' %externalHardDriveRoot2, "NOT FOUND"
try:
ds3 = DBZstream(dataFolder='../data_simulation/20120611_12/', name="WRFoutput",
lowerLeftCornerLatitudeLongitude=(17.7094,113.3272),
upperRightCornerLatitudeLongitude=(28.62909, 127.6353),
preload=False)
except:
print '../data_simulation/20120611_12/ - NOT FOUND'
#a.load()
#b.load()
"""
The following are constructed from data from mr. shue : https://mail.google.com/mail/u/0/?shva=1#search/azteque%40manysplendid.com/14070bb7d7aef48c
wd3
282x342
MaxLatF = 28.62909
MinLatF = 17.7094
MaxLonF = 127.6353
MinLonF = 113.3272
"""
c = DBZ(name='WRF20120612.0200', dataTime='20120612.0200',
dataPath= usbRoot + '/data_simulation/20120611_12/out_201206120200.txt',
lowerLeftCornerLatitudeLongitude= (17.7094, 113.3272),
upperRightCornerLatitudeLongitude= (28.62909,127.6353) ,
)
d = DBZ(name='WRF20120612.0210', dataTime='20120612.0210',
dataPath= usbRoot + '/data_simulation/20120611_12/out_201206120210.txt',
lowerLeftCornerLatitudeLongitude= (17.7094, 113.3272),
upperRightCornerLatitudeLongitude= (28.62909,127.6353) ,
)
|
yaukwankiu/armor
|
pattern_.py
|
Python
|
cc0-1.0
| 62,433
|
from furl import furl
from lxml import etree
from django.conf import settings
from share import Harvester
class PLOSHarvester(Harvester):
url = 'http://api.plos.org/search'
MAX_ROWS_PER_REQUEST = 999
def do_harvest(self, start_date, end_date):
if not settings.PLOS_API_KEY:
raise Exception('PLOS api key not defined.')
start_date = start_date.isoformat().split('.')[0] + 'Z'
end_date = end_date.isoformat().split('.')[0] + 'Z'
return self.fetch_rows(furl(self.url).set(query_params={
'q': 'publication_date:[{} TO {}]'.format(start_date, end_date),
'rows': '0',
'api_key': settings.PLOS_API_KEY
}).url, start_date, end_date)
def fetch_rows(self, url, start_date, end_date):
resp = self.requests.get(url)
total_rows = etree.XML(resp.content).xpath('//result/@numFound')
total_rows = int(total_rows[0]) if total_rows else 0
current_row = 0
while current_row < total_rows:
response = self.requests.get(furl(self.url).set(query_params={
'q': 'publication_date:[{} TO {}]'.format(start_date, end_date),
'start': current_row,
'api_key': settings.PLOS_API_KEY,
'rows': self.MAX_ROWS_PER_REQUEST
}).url)
docs = etree.XML(response.content).xpath('//doc')
for doc in docs:
if doc.xpath("arr[@name='abstract']") or doc.xpath("str[@name='author_display']"):
doc_id = doc.xpath("str[@name='id']")[0].text
doc = etree.tostring(doc)
yield (doc_id, doc)
current_row += len(docs)
|
zamattiac/SHARE
|
providers/org/plos/harvester.py
|
Python
|
apache-2.0
| 1,721
|
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
#
# This function was modified from the original code
# We can't use the multiprocessing module as it was included in python2.6
# and we support python 2.4
from __future__ import absolute_import, print_function, division
import os
import sys
def cpuCount():
'''
Returns the number of CPUs in the system
'''
if sys.platform == 'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = -1
elif sys.platform == 'darwin':
try:
num = int(os.popen('sysctl -n hw.ncpu').read())
except ValueError:
num = -1
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = -1
return num
|
JazzeYoung/VeryDeepAutoEncoder
|
theano/misc/cpucount.py
|
Python
|
bsd-3-clause
| 2,307
|
# -*- coding: utf-8 -*-
"""
The module :mod:`openerp.tests.common` provides unittest test cases and a few
helpers and classes to write tests.
"""
import errno
import glob
import importlib
import json
import logging
import os
import select
import subprocess
import threading
import time
import itertools
import unittest
import urllib2
import xmlrpclib
from contextlib import contextmanager
from datetime import datetime, timedelta
from pprint import pformat
import werkzeug
import openerp
from openerp import api
from openerp.modules.registry import RegistryManager
_logger = logging.getLogger(__name__)
# The openerp library is supposed already configured.
ADDONS_PATH = openerp.tools.config['addons_path']
HOST = '127.0.0.1'
PORT = openerp.tools.config['xmlrpc_port']
# Useless constant, tests are aware of the content of demo data
ADMIN_USER_ID = openerp.SUPERUSER_ID
def get_db_name():
db = openerp.tools.config['db_name']
# If the database name is not provided on the command-line,
# use the one on the thread (which means if it is provided on
# the command-line, this will break when installing another
# database from XML-RPC).
if not db and hasattr(threading.current_thread(), 'dbname'):
return threading.current_thread().dbname
return db
# For backwards-compatibility - get_db_name() should be used instead
DB = get_db_name()
def at_install(flag):
""" Sets the at-install state of a test, the flag is a boolean specifying
whether the test should (``True``) or should not (``False``) run during
module installation.
By default, tests are run right after installing the module, before
starting the installation of the next module.
"""
def decorator(obj):
obj.at_install = flag
return obj
return decorator
def post_install(flag):
""" Sets the post-install state of a test. The flag is a boolean
specifying whether the test should or should not run after a set of
module installations.
By default, tests are *not* run after installation of all modules in the
current installation set.
"""
def decorator(obj):
obj.post_install = flag
return obj
return decorator
class BaseCase(unittest.TestCase):
"""
Subclass of TestCase for common OpenERP-specific code.
This class is abstract and expects self.registry, self.cr and self.uid to be
initialized by subclasses.
"""
def cursor(self):
return self.registry.cursor()
def ref(self, xid):
""" Returns database ID for the provided :term:`external identifier`,
shortcut for ``get_object_reference``
:param xid: fully-qualified :term:`external identifier`, in the form
:samp:`{module}.{identifier}`
:raise: ValueError if not found
:returns: registered id
"""
assert "." in xid, "this method requires a fully qualified parameter, in the following form: 'module.identifier'"
module, xid = xid.split('.')
_, id = self.registry('ir.model.data').get_object_reference(self.cr, self.uid, module, xid)
return id
def browse_ref(self, xid):
""" Returns a record object for the provided
:term:`external identifier`
:param xid: fully-qualified :term:`external identifier`, in the form
:samp:`{module}.{identifier}`
:raise: ValueError if not found
:returns: :class:`~openerp.models.BaseModel`
"""
assert "." in xid, "this method requires a fully qualified parameter, in the following form: 'module.identifier'"
module, xid = xid.split('.')
return self.registry('ir.model.data').get_object(self.cr, self.uid, module, xid)
@contextmanager
def _assertRaises(self, exception):
""" Context manager that clears the environment upon failure. """
with super(BaseCase, self).assertRaises(exception) as cm:
with self.env.clear_upon_failure():
yield cm
def assertRaises(self, exception, func=None, *args, **kwargs):
if func:
with self._assertRaises(exception):
func(*args, **kwargs)
else:
return self._assertRaises(exception)
def shortDescription(self):
doc = self._testMethodDoc
return doc and ' '.join(filter(None, map(str.strip, doc.splitlines()))) or None
class TransactionCase(BaseCase):
""" TestCase in which each test method is run in its own transaction,
and with its own cursor. The transaction is rolled back and the cursor
is closed after each test.
"""
def setUp(self):
self.registry = RegistryManager.get(get_db_name())
#: current transaction's cursor
self.cr = self.cursor()
self.uid = openerp.SUPERUSER_ID
#: :class:`~openerp.api.Environment` for the current test case
self.env = api.Environment(self.cr, self.uid, {})
@self.addCleanup
def reset():
# rollback and close the cursor, and reset the environments
self.registry.clear_caches()
self.env.reset()
self.cr.rollback()
self.cr.close()
def patch_order(self, model, order):
m_e = self.env[model]
m_r = self.registry(model)
old_order = m_e._order
@self.addCleanup
def cleanup():
m_r._order = type(m_e)._order = old_order
m_r._order = type(m_e)._order = order
class SingleTransactionCase(BaseCase):
""" TestCase in which all test methods are run in the same transaction,
the transaction is started with the first test method and rolled back at
the end of the last.
"""
@classmethod
def setUpClass(cls):
cls.registry = RegistryManager.get(get_db_name())
cls.cr = cls.registry.cursor()
cls.uid = openerp.SUPERUSER_ID
cls.env = api.Environment(cls.cr, cls.uid, {})
@classmethod
def tearDownClass(cls):
# rollback and close the cursor, and reset the environments
cls.registry.clear_caches()
cls.env.reset()
cls.cr.rollback()
cls.cr.close()
savepoint_seq = itertools.count()
class SavepointCase(SingleTransactionCase):
""" Similar to :class:`SingleTransactionCase` in that all test methods
are run in a single transaction *but* each test case is run inside a
rollbacked savepoint (sub-transaction).
Useful for test cases containing fast tests but with significant database
setup common to all cases (complex in-db test data): :meth:`~.setUpClass`
can be used to generate db test data once, then all test cases use the
same data without influencing one another but without having to recreate
the test data either.
"""
def setUp(self):
self._savepoint_id = next(savepoint_seq)
self.cr.execute('SAVEPOINT test_%d' % self._savepoint_id)
def tearDown(self):
self.cr.execute('ROLLBACK TO SAVEPOINT test_%d' % self._savepoint_id)
self.env.clear()
self.registry.clear_caches()
class RedirectHandler(urllib2.HTTPRedirectHandler):
"""
HTTPRedirectHandler is predicated upon HTTPErrorProcessor being used and
works by intercepting 3xy "errors".
Inherit from it to handle 3xy non-error responses instead, as we're not
using the error processor
"""
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if 300 <= code < 400:
return self.parent.error(
'http', request, response, code, msg, hdrs)
return response
https_response = http_response
class HttpCase(TransactionCase):
""" Transactional HTTP TestCase with url_open and phantomjs helpers.
"""
def __init__(self, methodName='runTest'):
super(HttpCase, self).__init__(methodName)
# v8 api with correct xmlrpc exception handling.
self.xmlrpc_url = url_8 = 'http://%s:%d/xmlrpc/2/' % (HOST, PORT)
self.xmlrpc_common = xmlrpclib.ServerProxy(url_8 + 'common')
self.xmlrpc_db = xmlrpclib.ServerProxy(url_8 + 'db')
self.xmlrpc_object = xmlrpclib.ServerProxy(url_8 + 'object')
def setUp(self):
super(HttpCase, self).setUp()
self.registry.enter_test_mode()
# setup a magic session_id that will be rollbacked
self.session = openerp.http.root.session_store.new()
self.session_id = self.session.sid
self.session.db = get_db_name()
openerp.http.root.session_store.save(self.session)
# setup an url opener helper
self.opener = urllib2.OpenerDirector()
self.opener.add_handler(urllib2.UnknownHandler())
self.opener.add_handler(urllib2.HTTPHandler())
self.opener.add_handler(urllib2.HTTPSHandler())
self.opener.add_handler(urllib2.HTTPCookieProcessor())
self.opener.add_handler(RedirectHandler())
self.opener.addheaders.append(('Cookie', 'session_id=%s' % self.session_id))
def tearDown(self):
self.registry.leave_test_mode()
super(HttpCase, self).tearDown()
def url_open(self, url, data=None, timeout=10):
if url.startswith('/'):
url = "http://%s:%s%s" % (HOST, PORT, url)
return self.opener.open(url, data, timeout)
def authenticate(self, user, password):
# stay non-authenticated
if user is None:
return
db = get_db_name()
Users = self.registry['res.users']
uid = Users.authenticate(db, user, password, None)
# self.session.authenticate(db, user, password, uid=uid)
# OpenERPSession.authenticate accesses the current request, which we
# don't have, so reimplement it manually...
session = self.session
session.db = db
session.uid = uid
session.login = user
session.password = password
session.context = Users.context_get(self.cr, uid) or {}
session.context['uid'] = uid
session._fix_lang(session.context)
openerp.http.root.session_store.save(session)
def phantom_poll(self, phantom, timeout):
""" Phantomjs Test protocol.
Use console.log in phantomjs to output test results:
- for a success: console.log("ok")
- for an error: console.log("error")
Other lines are relayed to the test log.
"""
t0 = datetime.now()
td = timedelta(seconds=timeout)
buf = bytearray()
while True:
# timeout
self.assertLess(datetime.now() - t0, td,
"PhantomJS tests should take less than %s seconds" % timeout)
# read a byte
try:
ready, _, _ = select.select([phantom.stdout], [], [], 0.5)
except select.error, e:
# In Python 2, select.error has no relation to IOError or
# OSError, and no errno/strerror/filename, only a pair of
# unnamed arguments (matching errno and strerror)
err, _ = e.args
if err == errno.EINTR:
continue
raise
if ready:
s = phantom.stdout.read(1)
if not s:
break
buf.append(s)
# process lines
if '\n' in buf and (not buf.startswith('<phantomLog>') or '</phantomLog>' in buf):
if buf.startswith('<phantomLog>'):
line = buf[12:buf.index('</phantomLog>')]
buf = bytearray()
else:
line, buf = buf.split('\n', 1)
line = str(line)
lline = line.lower()
if lline.startswith(("error", "server application error")):
try:
# when errors occur the execution stack may be sent as a JSON
prefix = lline.index('error') + 6
_logger.error("phantomjs: %s", pformat(json.loads(line[prefix:])))
except ValueError:
line_ = line.split('\n\n')
_logger.error("phantomjs: %s", line_[0])
# The second part of the log is for debugging
if len(line_) > 1:
_logger.info("phantomjs: \n%s", line.split('\n\n', 1)[1])
pass
break
elif lline.startswith("warning"):
_logger.warn("phantomjs: %s", line)
else:
_logger.info("phantomjs: %s", line)
if line == "ok":
break
def phantom_run(self, cmd, timeout):
_logger.info('phantom_run executing %s', ' '.join(cmd))
ls_glob = os.path.expanduser('~/.qws/share/data/Ofi Labs/PhantomJS/http_%s_%s.*' % (HOST, PORT))
for i in glob.glob(ls_glob):
_logger.info('phantomjs unlink localstorage %s', i)
os.unlink(i)
try:
phantom = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=None)
except OSError:
raise unittest.SkipTest("PhantomJS not found")
try:
self.phantom_poll(phantom, timeout)
finally:
# kill phantomjs if phantom.exit() wasn't called in the test
if phantom.poll() is None:
phantom.terminate()
phantom.wait()
self._wait_remaining_requests()
# we ignore phantomjs return code as we kill it as soon as we have ok
_logger.info("phantom_run execution finished")
def _wait_remaining_requests(self):
t0 = int(time.time())
for thread in threading.enumerate():
if thread.name.startswith('openerp.service.http.request.'):
while thread.isAlive():
# Need a busyloop here as thread.join() masks signals
# and would prevent the forced shutdown.
thread.join(0.05)
time.sleep(0.05)
t1 = int(time.time())
if t0 != t1:
_logger.info('remaining requests')
openerp.tools.misc.dumpstacks()
t0 = t1
def phantom_js(self, url_path, code, ready="window", login=None, timeout=60, **kw):
""" Test js code running in the browser
- optionnally log as 'login'
- load page given by url_path
- wait for ready object to be available
- eval(code) inside the page
To signal success test do:
console.log('ok')
To signal failure do:
console.log('error')
If neither are done before timeout test fails.
"""
options = {
'port': PORT,
'db': get_db_name(),
'url_path': url_path,
'code': code,
'ready': ready,
'timeout' : timeout,
'session_id': self.session_id,
}
options.update(kw)
self.authenticate(login, login)
phantomtest = os.path.join(os.path.dirname(__file__), 'phantomtest.js')
cmd = ['phantomjs', phantomtest, json.dumps(options)]
self.phantom_run(cmd, timeout)
def can_import(module):
""" Checks if <module> can be imported, returns ``True`` if it can be,
``False`` otherwise.
To use with ``unittest.skipUnless`` for tests conditional on *optional*
dependencies, which may or may be present but must still be tested if
possible.
"""
try:
importlib.import_module(module)
except ImportError:
return False
else:
return True
|
vileopratama/vitech
|
src/openerp/tests/common.py
|
Python
|
mit
| 15,778
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Legacy BibExport."""
import warnings
from invenio.utils.deprecation import RemovedInInvenio23Warning
warnings.warn("Legacy BibExport will be removed in 2.3. Please check "
"'invenio.modules.exporter' module.",
RemovedInInvenio23Warning)
|
ludmilamarian/invenio
|
invenio/legacy/bibexport/__init__.py
|
Python
|
gpl-2.0
| 1,044
|
''' Tests for feeds. '''
import unittest
import datetime
from reader.shared import db
from reader.models.feed import Feed
from testutils import ModelTest
class FeedsTests(unittest.TestCase):
def test_create_new_feed(self):
url = 'http://xkcd.com/rss.xml'
self.assertEqual(url, Feed(url).url)
def test_feed_without_url_fails(self):
with self.assertRaises(ValueError):
feed = Feed(None)
class IntegrationTests(ModelTest):
def test_get_all_urls(self):
urls = ['http://xkcd.com/rss.xml', 'http://xkcd.com/atom.xml']
db.session.add_all([Feed(urls[0]), Feed(urls[1])])
db.session.commit()
all_urls = Feed.get_all_urls()
self.assertEqual(urls, all_urls)
def test_get_feed_that_need_updating(self):
urls = ['http://xkcd.com/rss.xml', 'http://xkcd.com/atom.xml']
f = [Feed(urls[0]), Feed(urls[1])]
f[0].updated = datetime.datetime(year=2014, month=02, day=01)
f[0].last_parsed = datetime.datetime(year=2014, month=01, day=01)
f[1].updated = datetime.datetime(year=2013, month=01, day=01)
f[1].last_parsed = datetime.datetime(year=2014, month=01, day=01)
db.session.add_all(f)
db.session.commit()
needs_updated = Feed.urls_to_update()
self.assertTrue(urls[0] in needs_updated)
self.assertTrue(urls[1] not in needs_updated)
if __name__ == '__main__':
unittest.main()
|
ajferrick/simple_reader
|
server/tests/test_feeds.py
|
Python
|
gpl-2.0
| 1,452
|
import os
from django.test import TestCase
from django.test.utils import override_settings
import math
from dataset_manager.enums import FeatureType, ComputingStateType, FeatureFunctionType
from dataset_manager.models import Dataset
from django.conf import settings
test_path = os.path.join(settings.DATASET_DEFAULT_PATH, 'test_dataset')
class AudioProcessorTestCase(TestCase):
def setUp(self):
self.dataset, self.created = Dataset.objects.get_or_create(name="test_dataset", description="Dataset for tests")
self.dataset.scan_video_folder()
self.video = self.dataset.videos.get(full_name='1_shot.avi')
self.video.prepare(overwrite=True)
def test_compute_audio_features(self):
"""Verify the computation of audio features"""
OVERWRITE = True
feature_types = FeatureType.audio_features
self.video.extract_features(feature_types=feature_types, overwrite=OVERWRITE)
self.assertEqual(self.video.audio_part.features.count(), len(feature_types), "Video {} should have {} audio features entry. {} instead".format(self.video.full_name, len(feature_types), self.video.audio_part.features.count()))
self.assertEqual(self.video.feature_extraction_state, ComputingStateType.SUCCESS, "Feature extraction state of video {} should be 'success'. {} instead".format(self.video.full_name, ComputingStateType.label(self.video.feature_extraction_state)))
for feature_type in feature_types:
functional_values = self.video.audio_part.features.get(type=feature_type).values
for functional, values in functional_values.iteritems():
if int(functional) == FeatureFunctionType.VALUE:
self.assertEqual(len(values), int(self.video.nb_frames), "Video features ({}) size of video {} should be {}. {} instead".format(functional, self.video.full_name, self.video.nb_frames, len(values)))
else:
self.assertEqual(len(values), math.floor(self.video.nb_frames / self.video.video_part.fps), "Video features ({}) size of video {} should be {}. {} instead".format(functional, self.video.full_name, math.floor(self.video.nb_frames / self.video.video_part.fps), len(values)))
AudioProcessorTestCase = override_settings(WEBCLIENT_VIDEOS_PATH=os.path.join(test_path, "web"))(AudioProcessorTestCase)
|
dumoulinj/ers
|
ers_backend/audio_processor/tests.py
|
Python
|
mit
| 2,387
|
from parsimonious.exceptions import VisitationError
from parsimonious.exceptions import ParseError as ParsimoniousParseError
from .types import Source
class ParseError(ValueError): pass
class OutOfContextNodeError(ParseError): pass
class ComparisonParseError(ParseError): pass
class OperationParseError(ParseError): pass
class MissingBaggageError(Exception): pass
class RulebookNotFound(Exception): pass
def raise_parse_error(position, error_type=ParseError):
if isinstance(position, Source):
raise error_type(
"%r" % position,
position,
)
else:
raise error_type(
"Could not determine source position:\n%r" % position,
position
)
|
eykd/ravel
|
ravel/exceptions.py
|
Python
|
mit
| 733
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class UsersApp(AppConfig):
name = 'munch.apps.users'
verbose_name = _('Users')
def ready(self):
import munch.apps.users.api.v1.urls # noqa
|
crunchmail/munch-core
|
src/munch/apps/users/apps.py
|
Python
|
agpl-3.0
| 253
|
import unittest
# Skip this whole module test if running under PyPy (incompatible with Cython)
try:
import __pypy__
# Empty test unit to show the reason of skipping
class TestMissingDependency(unittest.TestCase):
@unittest.skip('Missing dependency - Cython is incompatible with PyPy')
def test_fail():
pass
# Else we're not under PyPy, we can run the test
except ImportError:
__pypy__ = None
from ..cpolynomial import Polynomial
from ..cff import GF2int, init_lut
def map_GF2int(L):
return list(map(GF2int, L))
class cTestGFPoly(unittest.TestCase):
"""Tests that the Polynomial class works when given GF2int objects
instead of regular integers
"""
def setUp(self):
# (Re) initialize the GF tables to avoid conflicts with previously ran tests
init_lut(generator=3, prim=0x11b, c_exp=8)
def test_add(self):
one = Polynomial(map_GF2int([8,3,5,1]))
two = Polynomial(map_GF2int([5,3,1,1,6,8]))
r = one + two
self.assertEqual(list(r.coefficients), [5,3,9,2,3,9])
def test_sub(self):
one = Polynomial(map_GF2int([8,3,5,1]))
two = Polynomial(map_GF2int([5,3,1,1,6,8]))
r = one - two
self.assertEqual(list(r.coefficients), [5,3,9,2,3,9])
def test_mul(self):
one = Polynomial(map_GF2int([8,3,5,1]))
two = Polynomial(map_GF2int([5,3,1,1,6,8]))
r = one * two
self.assertEqual(list(r.coefficients), [40,23,28,1,53,78,7,46,8])
def test_mul_at(self):
one = Polynomial(map_GF2int([2,4,7,3]))
two = Polynomial(map_GF2int([5,2,4,2]))
k = 3
r1 = one * two
r2 = one.mul_at(two, k)
self.assertEqual(r1.get_coefficient(k), r2)
def test_scale(self):
one = Polynomial(map_GF2int([2,14,7,3]))
scalar = 12
r = one.scale(12)
self.assertEqual(list(r.coefficients), [24, 72, 36, 20])
def test_div(self):
one = Polynomial(map_GF2int([8,3,5,1]))
two = Polynomial(map_GF2int([5,3,1,1,6,8]))
q, r = divmod(two,one)
self.assertEqual(list(q.coefficients), [101, 152, 11])
self.assertEqual(list(r.coefficients), [183, 185, 3])
# Make sure they multiply back out okay
self.assertEqual(q*one + r, two)
def test_div_fast(self):
one = Polynomial(map_GF2int([8,3,5,1]))
two = Polynomial(map_GF2int([5,3,1,1,6,8]))
q, r = two._fastdivmod(one)
self.assertEqual(list(q.coefficients), [101, 152, 11])
self.assertEqual(list(r.coefficients), [183, 185, 3])
# Make sure they multiply back out okay
self.assertEqual(q*one + r, two)
def test_div_gffast(self):
one = Polynomial(map_GF2int([1,3,5,1])) # must be monic! (because the function is optimized for monic divisor polynomial)
two = Polynomial(map_GF2int([5,3,1,1,6,8]))
q, r = two._gffastdivmod(one) # optimized for monic divisor polynomial
q2, r2 = two._fastdivmod(one)
self.assertEqual(q, q2)
self.assertEqual(r, r2)
self.assertEqual(list(q.coefficients), [5, 12, 4])
self.assertEqual(list(r.coefficients), [52, 30, 12])
# Make sure they multiply back out okay
self.assertEqual(q*one + r, two)
def test_div_scalar(self):
"""Tests division by a scalar"""
numbers = map_GF2int([5,20,50,100,134,158,0,148,233,254,4,5,2])
scalar = GF2int(17)
poly = Polynomial(list(numbers))
scalarpoly = Polynomial(x0=scalar)
self.assertEqual(
list((poly // scalarpoly).coefficients),
[x / scalar for x in numbers]
)
def test_div_scalar2(self):
"""Test that dividing by a scalar is the same as multiplying by the
scalar's inverse"""
a = Polynomial(map_GF2int([5,3,1,1,6,8]))
scalar = GF2int(50)
self.assertEqual(
a * Polynomial(x0=scalar),
a // Polynomial(x0=scalar.inverse())
)
def test_evaluate(self):
a = Polynomial(map_GF2int([5,3,1,1,6,8]))
e = a.evaluate(3)
self.assertEqual(e, 196)
def test_evaluate_array(self):
a = Polynomial(map_GF2int([5,3,1,1,6,8]))
arr, sum = a.evaluate_array(3)
self.assertEqual(sum, 196)
self.assertEqual(list(arr), [255, 51, 15, 5, 10, 8])
def test_derive(self):
a = Polynomial(map_GF2int([5,3,1,1,6,8]))
r = a.derive()
self.assertEqual(list(r), [17, 12, 3, 2, 6])
class cTestPolynomial(unittest.TestCase):
def test_add_1(self):
one = Polynomial([2,4,7,3])
two = Polynomial([5,2,4,2])
r = one + two
self.assertEqual(list(r.coefficients), [7, 6, 11, 5])
def test_add_2(self):
one = Polynomial([2,4,7,3,5,2])
two = Polynomial([5,2,4,2])
r = one + two
self.assertEqual(list(r.coefficients), [2,4,12,5,9,4])
def test_add_3(self):
one = Polynomial([7,3,5,2])
two = Polynomial([6,8,5,2,4,2])
r = one + two
self.assertEqual(list(r.coefficients), [6,8,12,5,9,4])
def test_mul_1(self):
one = Polynomial([2,4,7,3])
two = Polynomial([5,2,4,2])
r = one * two
self.assertEqual(list(r.coefficients),
[10,24,51,49,42,26,6])
def test_mul_at_1(self):
one = Polynomial([2,4,7,3])
two = Polynomial([5,2,4,2])
k = 3
r1 = one * two
r2 = one.mul_at(two, k)
self.assertEqual(r1.get_coefficient(k), r2)
def test_scale_1(self):
one = Polynomial([2,4,7,3])
scalar = 12
r = one.scale(12)
self.assertEqual(list(r.coefficients), [24, 48, 84, 36])
def test_div_1(self):
one = Polynomial([1,4,0,3])
two = Polynomial([1,0,1])
q, r = divmod(one, two)
self.assertEqual(q, one // two)
self.assertEqual(r, one % two)
self.assertEqual(list(q.coefficients), [1,4])
self.assertEqual(list(r.coefficients), [-1,-1])
def test_div_2(self):
one = Polynomial([1,0,0,2,2,0,1,2,1])
two = Polynomial([1,0,-1])
q, r = divmod(one, two)
self.assertEqual(q, one // two)
self.assertEqual(r, one % two)
self.assertEqual(list(q.coefficients), [1,0,1,2,3,2,4])
self.assertEqual(list(r.coefficients), [4,5])
def test_div_3(self):
# 0 quotient
one = Polynomial([1,0,-1])
two = Polynomial([1,1,0,0,-1])
q, r = divmod(one, two)
self.assertEqual(q, one // two)
self.assertEqual(r, one % two)
self.assertEqual(list(q.coefficients), [0])
self.assertEqual(list(r.coefficients), [1,0,-1])
def test_div_4(self):
# no remander
one = Polynomial([1,0,0,2,2,0,1,-2,-4])
two = Polynomial([1,0,-1])
q, r = divmod(one, two)
self.assertEqual(q, one // two)
self.assertEqual(r, one % two)
self.assertEqual(list(q.coefficients), [1,0,1,2,3,2,4])
self.assertEqual(list(r.coefficients), [0])
def test_div_fast_1(self):
# no remander
one = Polynomial([1,0,0,2,2,0,1,-2,-4])
two = Polynomial([1,0,-1])
q, r = one._fastdivmod(two)
self.assertEqual(q, one._fastfloordiv(two))
self.assertEqual(r, one._fastmod(two))
self.assertEqual(list(q.coefficients), [1,0,1,2,3,2,4])
self.assertEqual(list(r.coefficients), [0])
def test_getcoeff(self):
p = Polynomial([9,3,3,2,2,3,1,-2,-4])
self.assertEqual(p.get_coefficient(0), -4)
self.assertEqual(p.get_coefficient(2), 1)
self.assertEqual(p.get_coefficient(8), 9)
self.assertEqual(p.get_coefficient(9), 0) # try to get a higher coefficient than the length of the polynomial, it should return 0 (non significant high coefficients are removed)
def test_evaluate(self):
a = Polynomial([5,3,1,1,6,8])
e = a.evaluate(3)
self.assertEqual(e, 1520)
def test_evaluate_array(self):
a = Polynomial([5,3,1,1,6,8])
arr, sum = a.evaluate_array(3)
self.assertEqual(sum, 1520)
self.assertEqual(list(arr), [1215, 243, 27, 9, 18, 8])
def test_derive(self):
a = Polynomial([5,3,1,1,6,8])
r = a.derive()
self.assertEqual(list(r), [25, 12, 3, 2, 6])
if __name__ == "__main__":
unittest.main()
|
lrq3000/unireedsolomon
|
unireedsolomon/tests/test_cpolynomial.py
|
Python
|
mit
| 9,289
|
# Copyright © 2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
# Fetch data from various services
from report_server.common import config
import base64
import cStringIO
import logging
import httplib
import json
import pycurl
import urllib
_LOG = logging.getLogger(__name__)
class ApiClient:
config.init()
@staticmethod
def get_all_rhics():
c = config.get_rhic_serve_config_info()
status, data = request(
c['host'], c['port'], '/api/rhic/', c['user'], c['passwd'], False)
if status == 200:
return data
raise Exception(status, data)
@staticmethod
def get_rhic(rhic_id):
c = config.get_rhic_serve_config_info()
api = '/api/rhic/503e31fdd9c1416fd0000003/'
# status, data = request(c['host'], c['port'], api, c['user'],
# c['passwd'], False)
data = request(
c['host'], c['port'], api, c['user'], c['passwd'], False)
return data
# if status == 200:
# return data
# raise Exception(status, data)
@staticmethod
def get_account():
c = config.get_rhic_serve_config_info()
api = '/api/account/'
# status, data = request(c['host'], c['port'], api, c['user'],
# c['passwd'], False)
data = request(
c['host'], c['port'], api, c['user'], c['passwd'], False)
return data
@staticmethod
def get_contract(api):
c = config.get_rhic_serve_config_info()
# status, data = request(c['host'], c['port'], api, c['user'],
# c['passwd'], False)
data = request(
c['host'], c['port'], api, c['user'], c['passwd'], False)
return data
@staticmethod
def get_rhic_details(RHIC):
id = getRHICdata(RHIC)
c = config.get_rhic_serve_config_info()
if id:
api = '/api/rhic/' + id + '/'
# status, data = request(c['host'], c['port'], api, c['user'],
# c['passwd'], False)
data = request(
c['host'], c['port'], api, c['user'], c['passwd'], False)
return data
@staticmethod
def getRHIC_in_account():
c = config.get_rhic_serve_config_info()
api = '/api/account/'
# status, data = request(c['host'], c['port'], api, c['user'],
# c['passwd'], False)
data = request(
c['host'], c['port'], api, c['user'], c['passwd'], False)
account_doc = data[0]
print(account_doc)
account_id = account_doc[0]['account_id']
api = '/api/rhic/'
data = request(
c['host'], c['port'], api, c['user'], c['passwd'], False)
# all_rhics = json.loads(data[0])
all_rhics = data
my_rhics = []
for rhic in all_rhics:
if rhic['account_id'] == account_id:
my_rhics.append(rhic['uuid'])
return my_rhics
def getRHICdata(RHIC):
c = config.get_rhic_serve_config_info()
api = '/api/rhic/'
data = request(c['host'], c['port'], api, c['user'], c['passwd'], False)
all_rhics = data
# all_rhics = json.loads(data)
for rhic in all_rhics:
if rhic['uuid'] == RHIC:
return rhic['id']
def requestPyCurl(host, port, url, username, password, debug=False):
buf = cStringIO.StringIO()
URL = 'https://' + host + url
USER = username
PASS = password
conn = pycurl.Curl()
conn.setopt(pycurl.USERPWD, "%s:%s" % (USER, PASS))
conn.setopt(pycurl.URL, URL)
conn.setopt(pycurl.WRITEFUNCTION, buf.write)
conn.perform()
return buf
def request(host, port, url, username, password, debug=False):
connection = httplib.HTTPSConnection(host, port)
if debug:
connection.set_debuglevel(100)
method = 'GET'
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
raw = ':'.join((username, password))
encoded = base64.encodestring(raw)[:-1]
headers['Authorization'] = 'Basic ' + encoded
query_params = {
}
data = urllib.urlencode(query_params, True)
url = url + "?" + data
_LOG.info("Sending HTTP request to: %s:%s%s with headers:%s" % (
host, port, url, headers))
connection.request(method, url, body=None, headers=headers)
response = connection.getresponse()
response_body = response.read()
if response.status != 200:
_LOG.info("Response status '%s', '%s', '%s'" % (
response.status, response.reason, response_body))
if response.status == 200:
response_body_raw = response_body
response_body = json.loads(response_body_raw)
if debug:
print "Response: %s %s" % (response.status, response.reason)
print "JSON: %s" % (json.dumps(response_body))
return response_body
|
splice/report_server
|
src/report_server/common/client.py
|
Python
|
gpl-2.0
| 5,321
|
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc("core", "doctype", "domain")
frappe.reload_doc("core", "doctype", "has_domain")
active_domains = frappe.get_active_domains()
all_domains = frappe.get_all("Domain")
for d in all_domains:
if d.name not in active_domains:
inactive_domain = frappe.get_doc("Domain", d.name)
inactive_domain.setup_data()
inactive_domain.remove_custom_field()
|
vjFaLk/frappe
|
frappe/patches/v10_0/remove_custom_field_for_disabled_domain.py
|
Python
|
mit
| 442
|
from django.utils.encoding import smart_str
from haystack import models
from lazymodel import LazyModel
class SearchResult(models.SearchResult):
"""Extended SearchResult class for general purposes."""
def __getattr__(self, attr):
"""
The __getattr__ method of Haystack's SearchResult is too lenient.
This class will raise exceptions if an attribute is missing.
"""
if attr == '__getnewargs__':
raise AttributeError
try:
return self.__dict__[attr]
except KeyError:
raise AttributeError
def __str__(self):
return smart_str(unicode(self))
@property
def _meta(self):
return self.model._meta
@property
def id(self):
"""Return the database ID instead of the search ID."""
return self.pk
@property
def object(self):
if self._object is None:
self._object = LazyModel(self.model, self.pk)
return self._object
def get_identifier(self):
return self.__dict__['id']
def get_label(self):
return self.model.get_label()
class LazySearchResult(SearchResult):
"""Get missing attributes from the lazy/cached object."""
def __unicode__(self):
return unicode(self.object)
def __getattr__(self, attr):
return getattr(self.object, attr)
|
apnarm/django-apn-search
|
apn_search/results.py
|
Python
|
mit
| 1,372
|
"""Zookeeper Partitioner Implementation
:Maintainer: None
:Status: Unknown
:class:`SetPartitioner` implements a partitioning scheme using
Zookeeper for dividing up resources amongst members of a party.
This is useful when there is a set of resources that should only be
accessed by a single process at a time that multiple processes
across a cluster might want to divide up.
Example Use-Case
----------------
- Multiple workers across a cluster need to divide up a list of queues
so that no two workers own the same queue.
"""
from functools import partial
import logging
import os
import socket
from kazoo.exceptions import KazooException, LockTimeout
from kazoo.protocol.states import KazooState
from kazoo.recipe.watchers import PatientChildrenWatch
log = logging.getLogger(__name__)
class PartitionState(object):
"""High level partition state values
.. attribute:: ALLOCATING
The set needs to be partitioned, and may require an existing
partition set to be released before acquiring a new partition
of the set.
.. attribute:: ACQUIRED
The set has been partitioned and acquired.
.. attribute:: RELEASE
The set needs to be repartitioned, and the current partitions
must be released before a new allocation can be made.
.. attribute:: FAILURE
The set partition has failed. This occurs when the maximum
time to partition the set is exceeded or the Zookeeper session
is lost. The partitioner is unusable after this state and must
be recreated.
"""
ALLOCATING = "ALLOCATING"
ACQUIRED = "ACQUIRED"
RELEASE = "RELEASE"
FAILURE = "FAILURE"
class SetPartitioner(object):
"""Partitions a set amongst members of a party
This class will partition a set amongst members of a party such
that each member will be given zero or more items of the set and
each set item will be given to a single member. When new members
enter or leave the party, the set will be re-partitioned amongst
the members.
When the :class:`SetPartitioner` enters the
:attr:`~PartitionState.FAILURE` state, it is unrecoverable
and a new :class:`SetPartitioner` should be created.
Example:
.. code-block:: python
from kazoo.client import KazooClient
client = KazooClient()
client.start()
qp = client.SetPartitioner(
path='/work_queues', set=('queue-1', 'queue-2', 'queue-3'))
while 1:
if qp.failed:
raise Exception("Lost or unable to acquire partition")
elif qp.release:
qp.release_set()
elif qp.acquired:
for partition in qp:
# Do something with each partition
elif qp.allocating:
qp.wait_for_acquire()
**State Transitions**
When created, the :class:`SetPartitioner` enters the
:attr:`PartitionState.ALLOCATING` state.
:attr:`~PartitionState.ALLOCATING` ->
:attr:`~PartitionState.ACQUIRED`
Set was partitioned successfully, the partition list assigned
is accessible via list/iter methods or calling list() on the
:class:`SetPartitioner` instance.
:attr:`~PartitionState.ALLOCATING` ->
:attr:`~PartitionState.FAILURE`
Allocating the set failed either due to a Zookeeper session
expiration, or failure to acquire the items of the set within
the timeout period.
:attr:`~PartitionState.ACQUIRED` ->
:attr:`~PartitionState.RELEASE`
The members of the party have changed, and the set needs to be
repartitioned. :meth:`SetPartitioner.release` should be called
as soon as possible.
:attr:`~PartitionState.ACQUIRED` ->
:attr:`~PartitionState.FAILURE`
The current partition was lost due to a Zookeeper session
expiration.
:attr:`~PartitionState.RELEASE` ->
:attr:`~PartitionState.ALLOCATING`
The current partition was released and is being re-allocated.
"""
def __init__(self, client, path, set, partition_func=None,
identifier=None, time_boundary=30, max_reaction_time=1,
state_change_event=None):
"""Create a :class:`~SetPartitioner` instance
:param client: A :class:`~kazoo.client.KazooClient` instance.
:param path: The partition path to use.
:param set: The set of items to partition.
:param partition_func: A function to use to decide how to
partition the set.
:param identifier: An identifier to use for this member of the
party when participating. Defaults to the
hostname + process id.
:param time_boundary: How long the party members must be stable
before allocation can complete.
:param max_reaction_time: Maximum reaction time for party members
change.
:param state_change_event: An optional Event object that will be set
on every state change.
"""
# Used to differentiate two states with the same names in time
self.state_id = 0
self.state = PartitionState.ALLOCATING
self.state_change_event = state_change_event or \
client.handler.event_object()
self._client = client
self._path = path
self._set = set
self._partition_set = []
self._partition_func = partition_func or self._partitioner
self._identifier = identifier or '%s-%s' % (
socket.getfqdn(), os.getpid())
self._locks = []
self._lock_path = '/'.join([path, 'locks'])
self._party_path = '/'.join([path, 'party'])
self._time_boundary = time_boundary
self._max_reaction_time = max_reaction_time
self._acquire_event = client.handler.event_object()
# Create basic path nodes
client.ensure_path(path)
client.ensure_path(self._lock_path)
client.ensure_path(self._party_path)
# Join the party
self._party = client.ShallowParty(self._party_path,
identifier=self._identifier)
self._party.join()
self._state_change = client.handler.rlock_object()
client.add_listener(self._establish_sessionwatch)
# Now watch the party and set the callback on the async result
# so we know when we're ready
self._child_watching(self._allocate_transition, client_handler=True)
def __iter__(self):
"""Return the partitions in this partition set"""
for partition in self._partition_set:
yield partition
@property
def failed(self):
"""Corresponds to the :attr:`PartitionState.FAILURE` state"""
return self.state == PartitionState.FAILURE
@property
def release(self):
"""Corresponds to the :attr:`PartitionState.RELEASE` state"""
return self.state == PartitionState.RELEASE
@property
def allocating(self):
"""Corresponds to the :attr:`PartitionState.ALLOCATING`
state"""
return self.state == PartitionState.ALLOCATING
@property
def acquired(self):
"""Corresponds to the :attr:`PartitionState.ACQUIRED` state"""
return self.state == PartitionState.ACQUIRED
def wait_for_acquire(self, timeout=30):
"""Wait for the set to be partitioned and acquired
:param timeout: How long to wait before returning.
:type timeout: int
"""
self._acquire_event.wait(timeout)
def release_set(self):
"""Call to release the set
This method begins the step of allocating once the set has
been released.
"""
self._release_locks()
if self._locks: # pragma: nocover
# This shouldn't happen, it means we couldn't release our
# locks, abort
self._fail_out()
return
else:
with self._state_change:
if self.failed:
return
self._set_state(PartitionState.ALLOCATING)
self._child_watching(self._allocate_transition, client_handler=True)
def finish(self):
"""Call to release the set and leave the party"""
self._release_locks()
self._fail_out()
def _fail_out(self):
with self._state_change:
self._set_state(PartitionState.FAILURE)
if self._party.participating:
try:
self._party.leave()
except KazooException: # pragma: nocover
pass
def _allocate_transition(self, result):
"""Called when in allocating mode, and the children settled"""
# Did we get an exception waiting for children to settle?
if result.exception: # pragma: nocover
self._fail_out()
return
children, async_result = result.get()
children_changed = self._client.handler.event_object()
def updated(result):
with self._state_change:
children_changed.set()
if self.acquired:
self._set_state(PartitionState.RELEASE)
with self._state_change:
# We can lose connection during processing the event
if not self.allocating:
return
# Remember the state ID to check later for race conditions
state_id = self.state_id
# updated() will be called when children change
async_result.rawlink(updated)
# Check whether the state has changed during the lock acquisition
# and abort the process if so.
def abort_if_needed():
if self.state_id == state_id:
if children_changed.is_set():
# The party has changed. Repartitioning...
self._abort_lock_acquisition()
return True
else:
return False
else:
if self.allocating or self.acquired:
# The connection was lost and user initiated a new
# allocation process. Abort it to eliminate race
# conditions with locks.
with self._state_change:
self._set_state(PartitionState.RELEASE)
return True
# Split up the set
partition_set = self._partition_func(
self._identifier, list(self._party), self._set)
# Proceed to acquire locks for the working set as needed
for member in partition_set:
lock = self._client.Lock(self._lock_path + '/' + str(member))
while True:
try:
# We mustn't lock without timeout because in that case we
# can get a deadlock if the party state will change during
# lock acquisition.
lock.acquire(timeout=self._max_reaction_time)
except LockTimeout:
if abort_if_needed():
return
except KazooException:
return self.finish()
else:
break
self._locks.append(lock)
if abort_if_needed():
return
# All locks acquired. Time for state transition.
with self._state_change:
if self.state_id == state_id and not children_changed.is_set():
self._partition_set = partition_set
self._set_state(PartitionState.ACQUIRED)
self._acquire_event.set()
return
if not abort_if_needed():
# This mustn't happen. Means a logical error.
self._fail_out()
def _release_locks(self):
"""Attempt to completely remove all the locks"""
self._acquire_event.clear()
for lock in self._locks[:]:
try:
lock.release()
except KazooException: # pragma: nocover
# We proceed to remove as many as possible, and leave
# the ones we couldn't remove
pass
else:
self._locks.remove(lock)
def _abort_lock_acquisition(self):
"""Called during lock acquisition if a party change occurs"""
self._release_locks()
if self._locks:
# This shouldn't happen, it means we couldn't release our
# locks, abort
self._fail_out()
return
self._child_watching(self._allocate_transition, client_handler=True)
def _child_watching(self, func=None, client_handler=False):
"""Called when children are being watched to stabilize
This actually returns immediately, child watcher spins up a
new thread/greenlet and waits for it to stabilize before
any callbacks might run.
:param client_handler: If True, deliver the result using the
client's event handler.
"""
watcher = PatientChildrenWatch(self._client, self._party_path,
self._time_boundary)
asy = watcher.start()
if func is not None:
# We spin up the function in a separate thread/greenlet
# to ensure that the rawlink's it might use won't be
# blocked
if client_handler:
func = partial(self._client.handler.spawn, func)
asy.rawlink(func)
return asy
def _establish_sessionwatch(self, state):
"""Register ourself to listen for session events, we shut down
if we become lost"""
with self._state_change:
if self.failed:
pass
elif state == KazooState.LOST:
self._client.handler.spawn(self._fail_out)
elif not self.release:
self._set_state(PartitionState.RELEASE)
return state == KazooState.LOST
def _partitioner(self, identifier, members, partitions):
# Ensure consistent order of partitions/members
all_partitions = sorted(partitions)
workers = sorted(members)
i = workers.index(identifier)
# Now return the partition list starting at our location and
# skipping the other workers
return all_partitions[i::len(workers)]
def _set_state(self, state):
self.state = state
self.state_id += 1
self.state_change_event.set()
|
python-zk/kazoo
|
kazoo/recipe/partitioner.py
|
Python
|
apache-2.0
| 14,668
|
# Copyright 2009 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""The way the branch puller talks to the database."""
__metaclass__ = type
# Export nothing. This code should be obtained via utilities.
__all__ = []
from datetime import timedelta
from zope.interface import implements
from lp.code.enums import BranchType
from lp.code.interfaces.branchpuller import IBranchPuller
from lp.code.model.branch import Branch
from lp.services.database.constants import UTC_NOW
from lp.services.database.interfaces import IStore
class BranchPuller:
"""See `IBranchPuller`."""
implements(IBranchPuller)
MAXIMUM_MIRROR_FAILURES = 5
MIRROR_TIME_INCREMENT = timedelta(hours=6)
def acquireBranchToPull(self, *branch_types):
"""See `IBranchPuller`."""
if not branch_types:
branch_types = (BranchType.MIRRORED, BranchType.IMPORTED)
branch = IStore(Branch).find(
Branch,
Branch.next_mirror_time <= UTC_NOW,
Branch.branch_type.is_in(branch_types)).order_by(
Branch.next_mirror_time).first()
if branch is not None:
branch.startMirroring()
return branch
|
abramhindle/UnnaturalCodeFork
|
python/testdata/launchpad/lib/lp/code/model/branchpuller.py
|
Python
|
agpl-3.0
| 1,256
|
#!/usr/bin/env python3
#
# t.py: utility for contest problem development
# Copyright (C) 2009-2017 Oleg Davydov
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from invoker.common import RunResult
class Verdict:
def __init__ ( self, message, value, comment='', *, peak_time=None, peak_memory=None ):
self.__message = message
self.__value = value
self.__comment = comment
self.__peak_time = peak_time
self.__peak_memory = peak_memory
def __bool__ ( self ):
return self.__value
def __str__ ( self ):
return self.__message
comment = property (lambda self: self.__comment)
peak_time = property (lambda self: self.__peak_time)
peak_memory = property (lambda self: self.__peak_memory)
@classmethod
def ce ( cls ):
return cls ("CE", False)
@classmethod
def fail_solution ( cls, test, result, **kwargs ):
return cls ("%s/%d" % ({
RunResult.RUNTIME: 'RE',
RunResult.LIMIT_TIME: 'TL',
RunResult.LIMIT_IDLE: 'IL',
RunResult.LIMIT_MEMORY: 'ML',
}[result.value], test), False, **kwargs)
@classmethod
def fail_checker ( cls, test, result, comment, **kwargs ):
if result.value is RunResult.RUNTIME and result.exitcode == 1:
return cls ("WA/%d" % test, False, comment, **kwargs)
if result.value is RunResult.RUNTIME and result.exitcode == 2:
return cls ("PE/%d" % test, False, comment, **kwargs)
return cls ("JE/%d" % test, False, comment, **kwargs)
@classmethod
def ok ( cls, **kwargs ):
return cls ("OK", True, **kwargs)
|
burunduk3/t.sh
|
verdict.py
|
Python
|
gpl-2.0
| 2,357
|
from typing import List, Tuple, Dict, NamedTuple, Optional
import json
import logging
import numpy
from depccg.types import ScoringResult
from depccg.cat import Category
logger = logging.getLogger(__name__)
def is_json(file_path: str) -> bool:
try:
with open(file_path, 'r') as data_file:
json.load(data_file)
return True
except json.JSONDecodeError:
return False
def normalize(word: str) -> str:
if word == "-LRB-":
return "("
elif word == "-RRB-":
return ")"
elif word == "-LCB-":
return "{"
elif word == "-RCB-":
return "}"
elif word == "-LSB-":
return "["
elif word == "-RSB-":
return "]"
else:
return word
def denormalize(word: str) -> str:
if word == "(":
return "-LRB-"
elif word == ")":
return "-RRB-"
elif word == "{":
return "-LCB-"
elif word == "}":
return "-RCB-"
elif word == "[":
return "-LSB-"
elif word == "]":
return "-RSB-"
word = word.replace(">", "-RAB-")
word = word.replace("<", "-LAB-")
return word
def read_pretrained_embeddings(filepath: str) -> numpy.ndarray:
nvocab = 0
io = open(filepath)
dim = len(io.readline().split())
io.seek(0)
for _ in io:
nvocab += 1
io.seek(0)
res = numpy.empty((nvocab, dim), dtype=numpy.float32)
for i, line in enumerate(io):
line = line.strip()
if len(line) == 0:
continue
res[i] = line.split()
io.close()
return res
def read_model_defs(filepath: str) -> Dict[str, int]:
return {
line.strip().split(' ')[0]: i
for i, line in enumerate(open(filepath, encoding='utf-8'))
}
def remove_comment(line: str) -> str:
comment = line.find('#')
if comment != -1:
line = line[:comment]
return line.strip()
class SpanInfo(NamedTuple):
cat: Category
idx: int
end_idx: Optional[int] = None
def read_partial_tree(string: str) -> Tuple[List[str], List[SpanInfo]]:
stack = []
spans = []
words = []
buf = list(reversed(string.split()))
counter = 0
while buf:
item = buf.pop()
if item.startswith('<'):
cat = item[1:]
cat = None if cat == 'X' else Category.parse(cat)
stack.append(cat)
stack.append(counter)
elif item == '>':
start = stack.pop()
cat = stack.pop()
spans.append(SpanInfo(cat, start, counter - start))
else:
items = item.split('|')
if len(items) == 1:
words.append(items[0])
elif len(items) == 2:
cat, word = items
assert len(cat) > 0 and len(word) > 0, \
'failed to parse partially annotated sentence.'
words.append(word)
spans.append(SpanInfo(Category.parse(cat), counter))
counter += 1
assert len(stack) == 0, 'failed to parse partially annotated sentence.'
return words, spans
def maybe_split_and_join(string):
if isinstance(string, list):
split = string
join = ' '.join(string)
else:
assert isinstance(string, str)
split = string.split(' ')
join = string
return split, join
def read_weights(filename, file_type='json'):
assert file_type == 'json'
categories = None
scores = []
for line in open(filename):
json_dict = json.loads(line.strip())
if categories is None:
categories = [
Category.parse(cat)
for cat in json_dict['categories']
]
dep_scores = numpy.array(json_dict['heads']) \
.reshape(json_dict['heads_shape']) \
.astype(numpy.float32)
tag_scores = numpy.array(json_dict['head_tags']) \
.reshape(json_dict['head_tags_shape']) \
.astype(numpy.float32)
scores.append(
ScoringResult(
tag_scores,
dep_scores
)
)
return scores, categories
|
masashi-y/depccg
|
depccg/utils.py
|
Python
|
mit
| 4,155
|
# coding: iso-8859-1
"""
htpc-updater
Copyright (c) 2014 Nikola Klaric (nikola@generic.company)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__author__ = 'Nikola Klaric (nikola@generic.company)'
__copyright__ = 'Copyright (c) 2014 Nikola Klaric'
__version__ = '0.8.2'
import sys
import argparse
from shutil import copy
from ctypes import c_ulong
from updater.lib import *
HTPC_UPDATER_RELEASES = 'https://api.github.com/repos/nikola/htpc-updater/releases'
HTPC_UPDATER_PROJECT = 'https://github.com/nikola/htpc-updater'
HTPC_UPDATER_DL_PATH = HTPC_UPDATER_PROJECT + '/releases/download/{0}/htpc-updater-{0}.zip'
CONSOLE_HANDLER = windll.Kernel32.GetStdHandle(c_ulong(0xfffffff5))
CWD = os.path.dirname(sys.executable) if hasattr(sys, 'frozen') else os.path.dirname(os.path.realpath(__file__))
# Support unbuffered, colored console output.
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
windll.Kernel32.GetStdHandle.restype = c_ulong
def log(text=None, color=BLACK):
windll.Kernel32.SetConsoleTextAttribute(CONSOLE_HANDLER, color)
if text is not None: sys.stdout.write(text)
setLogger(log)
def _updateComponents(arguments):
installPreReleaseList = arguments.get('installPreReleaseList') or ''
silentInstallList = arguments.get('silentInstallList') or ''
installComponentsList = arguments.get('installComponentsList')
components = [
('MPC-HC', 'mpchc',
'mpchc' in installPreReleaseList,
'mpchc' in silentInstallList,
Component(r'MPC-HC\MPC-HC',
getLatestReleaseVersion = mpcHc_getLatestReleaseVersion,
getLatestPreReleaseVersion = mpcHc_getLatestPreReleaseVersion,
getInstalledVersion = mpcHc_getInstalledVersion,
getPostInstallVersion = mpcHc_getPostInstallVersion,
installLatestReleaseVersion = mpcHc_installLatestReleaseVersion,
installLatestPreReleaseVersion = mpcHc_installLatestPreReleaseVersion,
)
),
('LAV Filters', 'lavfilters',
'lavfilters' in installPreReleaseList,
'lavfilters' in silentInstallList,
Component(LAVFILTERS_CLSID,
getLatestReleaseVersion = lavFilters_getLatestReleaseVersion,
getInstalledVersion = lavFilters_getInstalledVersion,
installLatestReleaseVersion = lavFilters_installLatestReleaseVersion,
)
),
('madVR', 'madvr',
'madvr' in installPreReleaseList,
'madvr' in silentInstallList,
Component(MADVR_CLSID,
getLatestReleaseVersion = madVr_getLatestReleaseVersion,
getInstalledVersion = madVr_getInstalledVersion,
installLatestReleaseVersion = madVr_installLatestReleaseVersion,
)
),
]
for name, identifier, preRelease, silent, instance in components:
if not identifier in installComponentsList:
continue
log('\n')
prefix, infix = ('pre-', 'Pre') if preRelease else ('', '')
try:
latestVersion = getattr(instance, 'getLatest%sReleaseVersion' % infix)()
except:
log('ERROR: Could not retrieve version info of the latest %s %srelease.\n' % (name, prefix), RED)
else:
log('Latest %srelease version of %s: %s\n' % (prefix, name, latestVersion))
mustInstall = False
installedVersion, detectedInstallationPath = instance.getInstalledVersion()
if installedVersion is not None:
log('Installed version: %s\n\t%s\n' % (installedVersion, detectedInstallationPath))
if getVersionTuple(installedVersion) < getVersionTuple(latestVersion):
mustInstall = True
else:
log('%s does not need to be updated.\n' % name, GREEN)
else:
log('%s does not seem to be installed on the local machine.\n' % name)
mustInstall = True
if mustInstall:
getattr(instance, 'installLatest%sReleaseVersion' % infix)(latestVersion, detectedInstallationPath, silent)
currentInstalledVersion, currentInstallationPath = instance.getPostInstallVersion(cwd=CWD)
if currentInstallationPath is None or getVersionTuple(currentInstalledVersion) != getVersionTuple(latestVersion):
log('\nFailed to %s %s %s.\n'
% ('update to' if installedVersion is not None else 'install', name, latestVersion), RED)
else:
log(' done.\n')
if detectedInstallationPath != currentInstallationPath:
log('%s %s is now installed in:\n\t%s\n'
% (name, latestVersion, currentInstallationPath))
if installedVersion is not None:
log('Your previous installation of %s %s remains in:\n\t%s\n'
% (name, installedVersion, detectedInstallationPath))
log('Successfully %s %s. No errors.\n'
% ('updated' if installedVersion is not None else 'installed', name), GREEN)
def _updateSelf():
if hasattr(sys, 'frozen'):
htpcUpdaterExecutable = sys.executable
htpcUpdaterDirectory = os.path.dirname(htpcUpdaterExecutable)
log('\nChecking for new version of htpc-updater ...')
try:
requests.get(HTPC_UPDATER_PROJECT)
except:
log(' ERROR: Could not connect to GitHub.\n', RED)
else:
releaseVersion = getLatestGitHubReleaseVersion(HTPC_UPDATER_RELEASES)
if getVersionTuple(releaseVersion) > getVersionTuple(__version__):
log(' %s is available, starting upgrade process.\n' % releaseVersion)
url = HTPC_UPDATER_DL_PATH.format(releaseVersion)
log('Downloading %s ...' % url)
htpcUpdaterZipFile = requests.get(url, headers=HEADERS_TRACKABLE).content
log(' done.\n')
htpcUpdaterNew = writeTempFile(ZipFile(StringIO(htpcUpdaterZipFile)).open('htpc-updater.exe').read())
args = ['"%s"' % arg for arg in sys.argv]
args.append('"--relaunch=%s"' % htpcUpdaterDirectory)
# Clear the PATH so that MSVCRT libraries are not conflicting with libraries
# from other programs that ship their own, avoiding error R6034.
# This only affects the currently running htpc-updater.exe.
environ = os.environ.copy()
environ.pop('PATH', None)
log('Restarting htpc-updater ...\n\n')
os.chdir(os.path.dirname(htpcUpdaterNew))
os.execve(htpcUpdaterNew, args, environ)
else:
log(' %s is the latest version.\n' % __version__)
def _isUpdatingSelf(arguments):
return bool(arguments.get('relaunch'))
def _cleanupUpdate(arguments):
copy(sys.executable, os.path.join(arguments.get('relaunch'), 'htpc-updater.exe'))
if __name__ == '__main__':
log('htpc-updater %s (%s)\n' % (__version__, HTPC_UPDATER_PROJECT))
parser = argparse.ArgumentParser(
prog='htpc-updater',
formatter_class=argparse.RawTextHelpFormatter,
description='Install or update MPC-HC, LAV Filters and madVR automagically.',
epilog="""Examples:
htpc-updater --install-components=mpchc,madvr --silent-install=mpchc
Install only MPC-HC and madVR, and do not show the installer GUI of MPC-HC.
htpc-updater --install-pre-release=mpchc --auto-exit
Install the latest MPC-HC nightly build and release versions of LAV Filters and madVR, and exit htpc-updater after completion."""
)
parser.add_argument('--install-components', dest='installComponentsList', action='store', default='mpchc,lavfilters,madvr',
help='Install only comma-separated arguments.', metavar='= mpchc* | lavfilters* | madvr*')
parser.add_argument('--install-pre-release', dest='installPreReleaseList', action='store',
help='Install pre-release version of comma-separated arguments if available.', metavar='= mpchc')
parser.add_argument('--silent-install', dest='silentInstallList', action='store',
help='Install comma-separated arguments without showing installer GUI.', metavar='= mpchc* | lavfilters*')
parser.add_argument('--auto-exit', dest='autoExit', action='store_true',
help='Close htpc-updater without prompt for ENTER key.')
parser.add_argument('--relaunch', action='store',
help='Do not use this option.')
options = vars(parser.parse_args())
if getattr(sys, 'frozen', None):
# Enable SSL support in requests library when running as EXE.
os.environ['REQUESTS_CA_BUNDLE'] = os.path.join(sys._MEIPASS, 'cacert.pem')
if _isUpdatingSelf(options):
_cleanupUpdate(options)
else:
_updateSelf()
try:
_updateComponents(options)
except:
import traceback
log('\n', RED)
traceback.print_exc()
log()
if not options.get('autoExit'):
log('\n')
raw_input('Press ENTER to exit ...')
|
nikola/htpc-updater
|
src/htpc-updater.py
|
Python
|
mit
| 10,353
|
"""
The CouchDB template loader allows for Tornado templates to be stored in CouchDB
and retrieved on demand and supports all of the syntax of including and
extending templates that you'd expect in any other template loader.
"""
import json
import logging
from tornado import escape
from tornado import httpclient
from tornado import template
LOGGER = logging.getLogger(__name__)
class CouchDBLoader(template.BaseLoader):
"""Extends the tornado.template.Loader allowing for templates to be loaded
out of CouchDB.
Templates in CouchDB should have have an _id matching the value of the name
that is passed into load. _id's may have /'s in them. The template itself
should be in the template node of the JSON document in CouchDB.
"""
def __init__(self, base_url, **kwargs):
"""Creates a template loader.
:param str base_url: The base URL for the CouchDB server
"""
super(CouchDBLoader, self).__init__('/', **kwargs)
self._base_url = base_url.rstrip('/')
LOGGER.info('Initialized with base URL of %s', self._base_url)
self._http_client = httpclient.HTTPClient()
def load(self, name, parent_path=None):
"""Loads a template.
:param str name: The template name
:param str parent_path: The optional path for a parent document
:rtype: tornado.template.Template
"""
if name not in self.templates:
self.templates[name] = self._create_template(name)
return self.templates[name]
def _create_template(self, name):
"""Create an instance of a tornado.template.Template object for the
given template name.
:param str name: The name/path to the template
:rtype: tornado.template.Template
"""
url = '%s/%s' % (self._base_url, escape.url_escape(name))
LOGGER.debug('Making HTTP GET request to %s', url)
response = self._http_client.fetch(url)
data = json.loads(response.body, ensure_ascii=False)
return template.Template(data['template'], name=name, loader=self)
|
lucius-feng/tinman
|
tinman/couchdb.py
|
Python
|
bsd-3-clause
| 2,097
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import ConfigParser
import StringIO
import unittest
class ConfigItems(object):
"""Return configuration file values from sections "DB" and "Web" in a
configuration file readable by ConfigParser.SafeConfigParser as defined in
the documentation of the ConfigParser module.
Args:
config_file_descriptor: optional file descriptor.
config_file: optional filename.
One of those must be set. config_file_descriptor takes precedence if
both are set.
Returns:
ConfigItems object.
Raises:
IOError if file not found.
NoSectionError if config file is missing [DB] and/or [Web] sections.
"""
def __init__(self, config_file_descriptor=None,
config_file='shortweb.config'):
config = ConfigParser.SafeConfigParser()
try:
config.readfp(config_file_descriptor)
except AttributeError:
with open(config_file) as f:
config.readfp(f)
self._dbargs = dict(config.items('DB'))
self._webargs = dict(config.items('Web'))
@property
def dbargs(self):
return self._dbargs
@property
def webargs(self):
return self._webargs
class TestSequence(unittest.TestCase):
def setUp(self):
self.fields = {
'host':'localhost',
'user': 'short',
'passwd': 'shortpassword',
'db': 'short',
'base_url': 'http://example.com/s/',
'title': 'Test suite title',
'h1': 'Test suite h1'}
def test_configitems_data_integrity(self):
"""Read back values should be identical to input."""
config_contents = (
'[DB]\n'
'host = {host}\n'
'user = {user}\n'
'passwd = {passwd}\n'
'db = {db}\n'
'\n'
'[Web]\n'
'base_url = {base_url}\n'
'title = {title}\n'
'h1 = {h1}').format(**self.fields)
testfile = StringIO.StringIO(config_contents)
c = ConfigItems(config_file_descriptor=testfile)
self.assertEqual(c.dbargs['host'], self.fields['host'])
self.assertEqual(c.dbargs['user'], self.fields['user'])
self.assertEqual(c.dbargs['passwd'], self.fields['passwd'])
self.assertEqual(c.dbargs['db'], self.fields['db'])
self.assertEqual(c.webargs['base_url'], self.fields['base_url'])
self.assertEqual(c.webargs['title'], self.fields['title'])
def test_configitems_sections_exist(self):
"""Lack of [DB] and/or [Web] sections should raise
ConfigParser.NoSectionError."""
config_contents_no_db = (
'[Web]\n'
'base_url = {base_url}\n'
'title = {title}\n'
'h1 = {h1}').format(**self.fields)
config_contents_no_web = (
'[DB]\n'
'host = {host}\n'
'user = {user}\n'
'passwd = {passwd}\n'
'db = {db}\n').format(**self.fields)
with self.assertRaises(ConfigParser.NoSectionError):
testfile = StringIO.StringIO(config_contents_no_db)
ConfigItems(config_file_descriptor=testfile)
with self.assertRaises(ConfigParser.NoSectionError):
testfile = StringIO.StringIO(config_contents_no_web)
ConfigItems(config_file_descriptor=testfile)
def test_configuration_no_empty_values(self):
"""Empty values should not be allowed and raise
ConfigParser.ParserError."""
config_contents_empty_value = (
'[DB]\n'
'host\n'
'\n'
'[Web]\n'
'base_url = {base_url}\n'
'title = {title}\n'
'h1 = {h1}').format(**self.fields)
with self.assertRaises(ConfigParser.ParsingError):
testfile = StringIO.StringIO(config_contents_empty_value)
ConfigItems(config_file_descriptor=testfile)
def test_configitems_non_existent_file(self):
"""Non-existent filename should raise IOError."""
with self.assertRaises(IOError):
ConfigItems(config_file='/bananarama')
def test_configitems_interpolation(self):
"""Ensure implementation of ConfigParser style %()s interpolation."""
config_contents_interpolation = (
'[DB]\n'
'host = {host}\n'
'user = {user}\n'
'passwd = {passwd}\n'
'db = {db}\n'
'\n'
'[Web]\n'
'domain = {domain}\n'
'base_url = %(domain)s/s/\n'
'title = %(domain)s title\n'
'h1 = %(title)s in body\n').format(domain='example.com',
**self.fields)
testfile = StringIO.StringIO(config_contents_interpolation)
c = ConfigItems(config_file_descriptor=testfile)
self.assertEqual(c.webargs['base_url'], 'example.com/s/')
self.assertEqual(c.webargs['title'], 'example.com title')
self.assertEqual(c.webargs['h1'], 'example.com title in body')
def main():
unittest.main()
if __name__ == '__main__':
main()
|
dandersson/shortweb
|
swlib/config.py
|
Python
|
gpl-2.0
| 5,380
|
# -*- coding: utf-8 -*-
import itertools
import functools
import os
import re
import logging
import pymongo
import datetime
from dateutil.parser import parse as parse_date
import urlparse
from collections import OrderedDict
import warnings
import pytz
from flask import request
from django.core.urlresolvers import reverse
from modularodm import Q
from modularodm import fields
from modularodm.validators import MaxLengthValidator
from modularodm.exceptions import NoResultsFound
from modularodm.exceptions import ValidationTypeError
from modularodm.exceptions import ValidationValueError
from api.base.utils import absolute_reverse
from framework import status
from framework.mongo import ObjectId
from framework.mongo import StoredObject
from framework.mongo import validators
from framework.addons import AddonModelMixin
from framework.auth import get_user, User, Auth
from framework.auth import signals as auth_signals
from framework.exceptions import PermissionsError
from framework.guid.model import GuidStoredObject
from framework.auth.utils import privacy_info_handle
from framework.analytics import tasks as piwik_tasks
from framework.mongo.utils import to_mongo_key, unique_on
from framework.analytics import (
get_basic_counters, increment_user_activity_counters
)
from framework.sentry import log_exception
from framework.transactions.context import TokuTransaction
from framework.utils import iso8601format
from website import language, mails, settings, tokens
from website.util import web_url_for
from website.util import api_url_for
from website.util import sanitize
from website.exceptions import (
NodeStateError,
InvalidSanctionApprovalToken, InvalidSanctionRejectionToken,
)
from website.citations.utils import datetime_to_csl
from website.identifiers.model import IdentifierMixin
from website.util.permissions import expand_permissions
from website.util.permissions import CREATOR_PERMISSIONS, DEFAULT_CONTRIBUTOR_PERMISSIONS, ADMIN
from website.project.metadata.schemas import OSF_META_SCHEMAS
from website.project.licenses import (
NodeLicense,
NodeLicenseRecord,
)
from website.project import signals as project_signals
from website.prereg import utils as prereg_utils
logger = logging.getLogger(__name__)
VIEW_PROJECT_URL_TEMPLATE = settings.DOMAIN + '{node_id}/'
def has_anonymous_link(node, auth):
"""check if the node is anonymous to the user
:param Node node: Node which the user wants to visit
:param str link: any view-only link in the current url
:return bool anonymous: Whether the node is anonymous to the user or not
"""
view_only_link = auth.private_key or request.args.get('view_only', '').strip('/')
if not view_only_link:
return False
if node.is_public:
return False
return any(
link.anonymous
for link in node.private_links_active
if link.key == view_only_link
)
@unique_on(['name', 'schema_version', '_id'])
class MetaSchema(StoredObject):
_id = fields.StringField(default=lambda: str(ObjectId()))
name = fields.StringField()
schema = fields.DictionaryField()
category = fields.StringField()
# Version of the schema to use (e.g. if questions, responses change)
schema_version = fields.IntegerField()
@property
def _config(self):
return self.schema.get('config', {})
@property
def requires_approval(self):
return self._config.get('requiresApproval', False)
@property
def fulfills(self):
return self._config.get('fulfills', [])
@property
def messages(self):
return self._config.get('messages', {})
@property
def requires_consent(self):
return self._config.get('requiresConsent', False)
def ensure_schema(schema, name, version=1):
schema_obj = None
try:
schema_obj = MetaSchema.find_one(
Q('name', 'eq', name) &
Q('schema_version', 'eq', version)
)
except NoResultsFound:
meta_schema = {
'name': name,
'schema_version': version,
'schema': schema,
}
schema_obj = MetaSchema(**meta_schema)
else:
schema_obj.schema = schema
schema_obj.save()
return schema_obj
def ensure_schemas():
"""Import meta-data schemas from JSON to database if not already loaded
"""
for schema in OSF_META_SCHEMAS:
ensure_schema(schema, schema['name'], version=schema.get('version', 1))
class MetaData(GuidStoredObject):
_id = fields.StringField(primary=True)
target = fields.AbstractForeignField(backref='metadata')
data = fields.DictionaryField()
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
date_modified = fields.DateTimeField(auto_now=datetime.datetime.utcnow)
def validate_comment_reports(value, *args, **kwargs):
for key, val in value.iteritems():
if not User.load(key):
raise ValidationValueError('Keys must be user IDs')
if not isinstance(val, dict):
raise ValidationTypeError('Values must be dictionaries')
if 'category' not in val or 'text' not in val:
raise ValidationValueError(
'Values must include `category` and `text` keys'
)
class Comment(GuidStoredObject):
_id = fields.StringField(primary=True)
user = fields.ForeignField('user', required=True, backref='commented')
node = fields.ForeignField('node', required=True, backref='comment_owner')
target = fields.AbstractForeignField(required=True, backref='commented')
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
date_modified = fields.DateTimeField(auto_now=datetime.datetime.utcnow)
modified = fields.BooleanField(default=False)
is_deleted = fields.BooleanField(default=False)
content = fields.StringField(required=True,
validate=[MaxLengthValidator(settings.COMMENT_MAXLENGTH), validators.string_required])
# Dictionary field mapping user IDs to dictionaries of report details:
# {
# 'icpnw': {'category': 'hate', 'text': 'offensive'},
# 'cdi38': {'category': 'spam', 'text': 'godwins law'},
# }
reports = fields.DictionaryField(validate=validate_comment_reports)
# For Django compatibility
@property
def pk(self):
return self._id
@property
def absolute_api_v2_url(self):
return absolute_reverse('comments:comment-detail', kwargs={'comment_id': self._id})
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
def get_content(self, auth):
""" Returns the comment content if the user is allowed to see it. Deleted comments
can only be viewed by the user who created the comment."""
if not auth and not self.node.is_public:
raise PermissionsError
if self.is_deleted and ((not auth or auth.user.is_anonymous())
or (auth and not auth.user.is_anonymous() and self.user._id != auth.user._id)):
return None
return self.content
@classmethod
def find_unread(cls, user, node):
default_timestamp = datetime.datetime(1970, 1, 1, 12, 0, 0)
n_unread = 0
if node.is_contributor(user):
if user.comments_viewed_timestamp is None:
user.comments_viewed_timestamp = {}
user.save()
view_timestamp = user.comments_viewed_timestamp.get(node._id, default_timestamp)
n_unread = Comment.find(Q('node', 'eq', node) &
Q('user', 'ne', user) &
Q('is_deleted', 'ne', True) &
(Q('date_created', 'gt', view_timestamp) |
Q('date_modified', 'gt', view_timestamp))).count()
return n_unread
@classmethod
def create(cls, auth, **kwargs):
comment = cls(**kwargs)
if not comment.node.can_comment(auth):
raise PermissionsError('{0!r} does not have permission to comment on this node'.format(auth.user))
comment.save()
comment.node.add_log(
NodeLog.COMMENT_ADDED,
{
'project': comment.node.parent_id,
'node': comment.node._id,
'user': comment.user._id,
'comment': comment._id,
},
auth=auth,
save=False,
)
comment.node.save()
project_signals.comment_added.send(comment, auth=auth)
return comment
def edit(self, content, auth, save=False):
if not self.node.can_comment(auth) or self.user._id != auth.user._id:
raise PermissionsError('{0!r} does not have permission to edit this comment'.format(auth.user))
self.content = content
self.modified = True
self.node.add_log(
NodeLog.COMMENT_UPDATED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def delete(self, auth, save=False):
if not self.node.can_comment(auth) or self.user._id != auth.user._id:
raise PermissionsError('{0!r} does not have permission to comment on this node'.format(auth.user))
self.is_deleted = True
self.node.add_log(
NodeLog.COMMENT_REMOVED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def undelete(self, auth, save=False):
if not self.node.can_comment(auth) or self.user._id != auth.user._id:
raise PermissionsError('{0!r} does not have permission to comment on this node'.format(auth.user))
self.is_deleted = False
self.node.add_log(
NodeLog.COMMENT_ADDED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def report_abuse(self, user, save=False, **kwargs):
"""Report that a comment is abuse.
:param User user: User submitting the report
:param bool save: Save changes
:param dict kwargs: Report details
:raises: ValueError if the user submitting abuse is the same as the
user who posted the comment
"""
if user == self.user:
raise ValueError
self.reports[user._id] = kwargs
if save:
self.save()
def unreport_abuse(self, user, save=False):
"""Revoke report of abuse.
:param User user: User who submitted the report
:param bool save: Save changes
:raises: ValueError if user has not reported comment as abuse
"""
try:
self.reports.pop(user._id)
except KeyError:
raise ValueError('User has not reported comment as abuse')
if save:
self.save()
@unique_on(['params.node', '_id'])
class NodeLog(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
date = fields.DateTimeField(default=datetime.datetime.utcnow, index=True)
action = fields.StringField(index=True)
params = fields.DictionaryField()
should_hide = fields.BooleanField(default=False)
__indices__ = [
{
'key_or_list': [
('__backrefs.logged.node.logs.$', 1)
],
}
]
was_connected_to = fields.ForeignField('node', list=True)
user = fields.ForeignField('user', index=True)
foreign_user = fields.StringField()
DATE_FORMAT = '%m/%d/%Y %H:%M UTC'
# Log action constants -- NOTE: templates stored in log_templates.mako
CREATED_FROM = 'created_from'
PROJECT_CREATED = 'project_created'
PROJECT_REGISTERED = 'project_registered'
PROJECT_DELETED = 'project_deleted'
NODE_CREATED = 'node_created'
NODE_FORKED = 'node_forked'
NODE_REMOVED = 'node_removed'
POINTER_CREATED = 'pointer_created'
POINTER_FORKED = 'pointer_forked'
POINTER_REMOVED = 'pointer_removed'
WIKI_UPDATED = 'wiki_updated'
WIKI_DELETED = 'wiki_deleted'
WIKI_RENAMED = 'wiki_renamed'
MADE_WIKI_PUBLIC = 'made_wiki_public'
MADE_WIKI_PRIVATE = 'made_wiki_private'
CONTRIB_ADDED = 'contributor_added'
CONTRIB_REMOVED = 'contributor_removed'
CONTRIB_REORDERED = 'contributors_reordered'
PERMISSIONS_UPDATED = 'permissions_updated'
MADE_PRIVATE = 'made_private'
MADE_PUBLIC = 'made_public'
TAG_ADDED = 'tag_added'
TAG_REMOVED = 'tag_removed'
EDITED_TITLE = 'edit_title'
EDITED_DESCRIPTION = 'edit_description'
CHANGED_LICENSE = 'license_changed'
UPDATED_FIELDS = 'updated_fields'
FILE_MOVED = 'addon_file_moved'
FILE_COPIED = 'addon_file_copied'
FILE_RENAMED = 'addon_file_renamed'
FOLDER_CREATED = 'folder_created'
FILE_ADDED = 'file_added'
FILE_UPDATED = 'file_updated'
FILE_REMOVED = 'file_removed'
FILE_RESTORED = 'file_restored'
ADDON_ADDED = 'addon_added'
ADDON_REMOVED = 'addon_removed'
COMMENT_ADDED = 'comment_added'
COMMENT_REMOVED = 'comment_removed'
COMMENT_UPDATED = 'comment_updated'
CITATION_ADDED = 'citation_added'
CITATION_EDITED = 'citation_edited'
CITATION_REMOVED = 'citation_removed'
MADE_CONTRIBUTOR_VISIBLE = 'made_contributor_visible'
MADE_CONTRIBUTOR_INVISIBLE = 'made_contributor_invisible'
EXTERNAL_IDS_ADDED = 'external_ids_added'
EMBARGO_APPROVED = 'embargo_approved'
EMBARGO_CANCELLED = 'embargo_cancelled'
EMBARGO_COMPLETED = 'embargo_completed'
EMBARGO_INITIATED = 'embargo_initiated'
RETRACTION_APPROVED = 'retraction_approved'
RETRACTION_CANCELLED = 'retraction_cancelled'
RETRACTION_INITIATED = 'retraction_initiated'
REGISTRATION_APPROVAL_CANCELLED = 'registration_cancelled'
REGISTRATION_APPROVAL_INITIATED = 'registration_initiated'
REGISTRATION_APPROVAL_APPROVED = 'registration_approved'
actions = [CREATED_FROM, PROJECT_CREATED, PROJECT_REGISTERED, PROJECT_DELETED, NODE_CREATED, NODE_FORKED, NODE_REMOVED, POINTER_CREATED, POINTER_FORKED, POINTER_REMOVED, WIKI_UPDATED, WIKI_DELETED, WIKI_RENAMED, MADE_WIKI_PUBLIC, MADE_WIKI_PRIVATE, CONTRIB_ADDED, CONTRIB_REMOVED, CONTRIB_REORDERED, PERMISSIONS_UPDATED, MADE_PRIVATE, MADE_PUBLIC, TAG_ADDED, TAG_REMOVED, EDITED_TITLE, EDITED_DESCRIPTION, UPDATED_FIELDS, FILE_MOVED, FILE_COPIED, FOLDER_CREATED, FILE_ADDED, FILE_UPDATED, FILE_REMOVED, FILE_RESTORED, ADDON_ADDED, ADDON_REMOVED, COMMENT_ADDED, COMMENT_REMOVED, COMMENT_UPDATED, MADE_CONTRIBUTOR_VISIBLE, MADE_CONTRIBUTOR_INVISIBLE, EXTERNAL_IDS_ADDED, EMBARGO_APPROVED, EMBARGO_CANCELLED, EMBARGO_COMPLETED, EMBARGO_INITIATED, RETRACTION_APPROVED, RETRACTION_CANCELLED, RETRACTION_INITIATED, REGISTRATION_APPROVAL_CANCELLED, REGISTRATION_APPROVAL_INITIATED, REGISTRATION_APPROVAL_APPROVED, CITATION_ADDED, CITATION_EDITED, CITATION_REMOVED]
def __repr__(self):
return ('<NodeLog({self.action!r}, params={self.params!r}) '
'with id {self._id!r}>').format(self=self)
# For Django compatibility
@property
def pk(self):
return self._id
@property
def node(self):
"""Return the :class:`Node` associated with this log."""
return (
Node.load(self.params.get('node')) or
Node.load(self.params.get('project'))
)
@property
def tz_date(self):
'''Return the timezone-aware date.
'''
# Date should always be defined, but a few logs in production are
# missing dates; return None and log error if date missing
if self.date:
return self.date.replace(tzinfo=pytz.UTC)
logger.error('Date missing on NodeLog {}'.format(self._primary_key))
@property
def formatted_date(self):
'''Return the timezone-aware, ISO-formatted string representation of
this log's date.
'''
if self.tz_date:
return self.tz_date.isoformat()
def resolve_node(self, node):
"""A single `NodeLog` record may be attached to multiple `Node` records
(parents, forks, registrations, etc.), so the node that the log refers
to may not be the same as the node the user is viewing. Use
`resolve_node` to determine the relevant node to use for permission
checks.
:param Node node: Node being viewed
"""
if self.node == node or self.node in node.nodes:
return self.node
if node.is_fork_of(self.node) or node.is_registration_of(self.node):
return node
for child in node.nodes:
if child.is_fork_of(self.node) or node.is_registration_of(self.node):
return child
return False
def can_view(self, node, auth):
node_to_check = self.resolve_node(node)
if node_to_check:
return node_to_check.can_view(auth)
return False
def _render_log_contributor(self, contributor, anonymous=False):
user = User.load(contributor)
if not user:
# Handle legacy non-registered users, which were
# represented as a dict
if isinstance(contributor, dict):
if 'nr_name' in contributor:
return {
'fullname': contributor['nr_name'],
'registered': False,
}
return None
if self.node:
fullname = user.display_full_name(node=self.node)
else:
fullname = user.fullname
return {
'id': privacy_info_handle(user._primary_key, anonymous),
'fullname': privacy_info_handle(fullname, anonymous, name=True),
'registered': user.is_registered,
}
class Tag(StoredObject):
_id = fields.StringField(primary=True, validate=MaxLengthValidator(128))
def __repr__(self):
return '<Tag() with id {self._id!r}>'.format(self=self)
@property
def url(self):
return '/search/?tags={}'.format(self._id)
class Pointer(StoredObject):
"""A link to a Node. The Pointer delegates all but a few methods to its
contained Node. Forking and registration are overridden such that the
link is cloned, but its contained Node is not.
"""
#: Whether this is a pointer or not
primary = False
_id = fields.StringField()
node = fields.ForeignField('node', backref='_pointed')
_meta = {'optimistic': True}
def _clone(self):
if self.node:
clone = self.clone()
clone.node = self.node
clone.save()
return clone
def fork_node(self, *args, **kwargs):
return self._clone()
def register_node(self, *args, **kwargs):
return self._clone()
def use_as_template(self, *args, **kwargs):
return self._clone()
def resolve(self):
return self.node
def __getattr__(self, item):
"""Delegate attribute access to the node being pointed to."""
# Prevent backref lookups from being overriden by proxied node
try:
return super(Pointer, self).__getattr__(item)
except AttributeError:
pass
if self.node:
return getattr(self.node, item)
raise AttributeError(
'Pointer object has no attribute {0}'.format(
item
)
)
def get_pointer_parent(pointer):
"""Given a `Pointer` object, return its parent node.
"""
# The `parent_node` property of the `Pointer` schema refers to the parents
# of the pointed-at `Node`, not the parents of the `Pointer`; use the
# back-reference syntax to find the parents of the `Pointer`.
parent_refs = pointer.node__parent
assert len(parent_refs) == 1, 'Pointer must have exactly one parent.'
return parent_refs[0]
def validate_category(value):
"""Validator for Node#category. Makes sure that the value is one of the
categories defined in CATEGORY_MAP.
"""
if value not in Node.CATEGORY_MAP.keys():
raise ValidationValueError('Invalid value for category.')
return True
def validate_title(value):
"""Validator for Node#title. Makes sure that the value exists and is not
above 200 characters.
"""
if value is None or not value.strip():
raise ValidationValueError('Title cannot be blank.')
value = sanitize.strip_html(value)
if value is None or not value.strip():
raise ValidationValueError('Invalid title.')
if len(value) > 200:
raise ValidationValueError('Title cannot exceed 200 characters.')
return True
def validate_user(value):
if value != {}:
user_id = value.iterkeys().next()
if User.find(Q('_id', 'eq', user_id)).count() != 1:
raise ValidationValueError('User does not exist.')
return True
class NodeUpdateError(Exception):
def __init__(self, reason, key, *args, **kwargs):
super(NodeUpdateError, self).__init__(*args, **kwargs)
self.key = key
self.reason = reason
class Node(GuidStoredObject, AddonModelMixin, IdentifierMixin):
#: Whether this is a pointer or not
primary = True
__indices__ = [{
'unique': False,
'key_or_list': [
('tags.$', pymongo.ASCENDING),
('is_public', pymongo.ASCENDING),
('is_deleted', pymongo.ASCENDING),
]
}]
# Node fields that trigger an update to Solr on save
SOLR_UPDATE_FIELDS = {
'title',
'category',
'description',
'visible_contributor_ids',
'tags',
'is_fork',
'is_registration',
'retraction',
'embargo',
'is_public',
'is_deleted',
'wiki_pages_current',
'is_retracted',
'node_license',
}
# Maps category identifier => Human-readable representation for use in
# titles, menus, etc.
# Use an OrderedDict so that menu items show in the correct order
CATEGORY_MAP = OrderedDict([
('analysis', 'Analysis'),
('communication', 'Communication'),
('data', 'Data'),
('hypothesis', 'Hypothesis'),
('instrumentation', 'Instrumentation'),
('methods and measures', 'Methods and Measures'),
('procedure', 'Procedure'),
('project', 'Project'),
('software', 'Software'),
('other', 'Other'),
('', 'Uncategorized')
])
# Fields that are writable by Node.update
WRITABLE_WHITELIST = [
'title',
'description',
'category',
'is_public',
'node_license',
]
# Named constants
PRIVATE = 'private'
PUBLIC = 'public'
_id = fields.StringField(primary=True)
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow, index=True)
date_modified = fields.DateTimeField()
# Privacy
is_public = fields.BooleanField(default=False, index=True)
# User mappings
permissions = fields.DictionaryField()
visible_contributor_ids = fields.StringField(list=True)
# Project Organization
is_dashboard = fields.BooleanField(default=False, index=True)
is_folder = fields.BooleanField(default=False, index=True)
# Expanded: Dictionary field mapping user IDs to expand state of this node:
# {
# 'icpnw': True,
# 'cdi38': False,
# }
expanded = fields.DictionaryField(default={}, validate=validate_user)
is_deleted = fields.BooleanField(default=False, index=True)
deleted_date = fields.DateTimeField(index=True)
is_registration = fields.BooleanField(default=False, index=True)
registered_date = fields.DateTimeField(index=True)
registered_user = fields.ForeignField('user', backref='registered')
# A list of all MetaSchemas for which this Node has registered_meta
registered_schema = fields.ForeignField('metaschema', backref='registered', list=True, default=list)
# A set of <metaschema._id>: <schema> pairs, where <schema> is a
# flat set of <question_id>: <response> pairs-- these question ids_above
# map the the ids in the registrations MetaSchema (see registered_schema).
# {
# <question_id>: {
# 'value': <value>,
# 'comments': [
# <comment>
# ]
# }
registered_meta = fields.DictionaryField()
registration_approval = fields.ForeignField('registrationapproval')
retraction = fields.ForeignField('retraction')
embargo = fields.ForeignField('embargo')
is_fork = fields.BooleanField(default=False, index=True)
forked_date = fields.DateTimeField(index=True)
title = fields.StringField(validate=validate_title)
description = fields.StringField()
category = fields.StringField(validate=validate_category, index=True)
node_license = fields.ForeignField('nodelicenserecord')
# One of 'public', 'private'
# TODO: Add validator
comment_level = fields.StringField(default='private')
wiki_pages_current = fields.DictionaryField()
wiki_pages_versions = fields.DictionaryField()
# Dictionary field mapping node wiki page to sharejs private uuid.
# {<page_name>: <sharejs_id>}
wiki_private_uuids = fields.DictionaryField()
file_guid_to_share_uuids = fields.DictionaryField()
creator = fields.ForeignField('user', index=True)
contributors = fields.ForeignField('user', list=True)
users_watching_node = fields.ForeignField('user', list=True, backref='watched')
logs = fields.ForeignField('nodelog', list=True, backref='logged')
tags = fields.ForeignField('tag', list=True, backref='tagged')
# Tags for internal use
system_tags = fields.StringField(list=True)
nodes = fields.AbstractForeignField(list=True, backref='parent')
forked_from = fields.ForeignField('node', backref='forked', index=True)
registered_from = fields.ForeignField('node', backref='registrations', index=True)
root = fields.ForeignField('node', index=True)
parent_node = fields.ForeignField('node', index=True)
# The node (if any) used as a template for this node's creation
template_node = fields.ForeignField('node', backref='template_node', index=True)
piwik_site_id = fields.StringField()
# Dictionary field mapping user id to a list of nodes in node.nodes which the user has subscriptions for
# {<User.id>: [<Node._id>, <Node2._id>, ...] }
child_node_subscriptions = fields.DictionaryField(default=dict)
alternative_citations = fields.ForeignField('alternativecitation', list=True, backref='citations')
_meta = {
'optimistic': True,
}
def __init__(self, *args, **kwargs):
tags = kwargs.pop('tags', [])
super(Node, self).__init__(*args, **kwargs)
# Ensure when Node is created with tags through API, tags are added to Tag
if tags:
for tag in tags:
self.add_tag(tag, Auth(self.creator), save=False, log=False)
if kwargs.get('_is_loaded', False):
return
if self.creator:
self.contributors.append(self.creator)
self.set_visible(self.creator, visible=True, log=False)
# Add default creator permissions
for permission in CREATOR_PERMISSIONS:
self.add_permission(self.creator, permission, save=False)
def __repr__(self):
return ('<Node(title={self.title!r}, category={self.category!r}) '
'with _id {self._id!r}>').format(self=self)
# For Django compatibility
@property
def pk(self):
return self._id
@property
def license(self):
node_license = self.node_license
if not node_license and self.parent_node:
return self.parent_node.license
return node_license
@property
def category_display(self):
"""The human-readable representation of this node's category."""
return self.CATEGORY_MAP[self.category]
# We need the following 2 properties in order to serialize related links in NodeRegistrationSerializer
@property
def registered_user_id(self):
"""The ID of the user who registered this node if this is a registration, else None.
"""
if self.registered_user:
return self.registered_user._id
return None
@property
def registered_from_id(self):
"""The ID of the user who registered this node if this is a registration, else None.
"""
if self.registered_from:
return self.registered_from._id
return None
@property
def sanction(self):
sanction = self.registration_approval or self.embargo or self.retraction
if sanction:
return sanction
elif self.parent_node:
return self.parent_node.sanction
else:
return None
@property
def is_pending_registration(self):
if not self.is_registration:
return False
if self.registration_approval is None:
if self.parent_node:
return self.parent_node.is_pending_registration
return False
return self.registration_approval.is_pending_approval
@property
def is_registration_approved(self):
if self.registration_approval is None:
if self.parent_node:
return self.parent_node.is_registration_approved
return False
return self.registration_approval.is_approved
@property
def is_retracted(self):
if self.retraction is None:
if self.parent_node:
return self.parent_node.is_retracted
return False
return self.retraction.is_approved
@property
def is_pending_retraction(self):
if self.retraction is None:
if self.parent_node:
return self.parent_node.is_pending_retraction
return False
return self.retraction.is_pending_approval
@property
def embargo_end_date(self):
if self.embargo is None:
if self.parent_node:
return self.parent_node.embargo_end_date
return False
return self.embargo.embargo_end_date
@property
def is_pending_embargo(self):
if self.embargo is None:
if self.parent_node:
return self.parent_node.is_pending_embargo
return False
return self.embargo.is_pending_approval
@property
def is_pending_embargo_for_existing_registration(self):
""" Returns True if Node has an Embargo pending approval for an
existing registrations. This is used specifically to ensure
registrations pre-dating the Embargo feature do not get deleted if
their respective Embargo request is rejected.
"""
if self.embargo is None:
if self.parent_node:
return self.parent_node.is_pending_embargo_for_existing_registration
return False
return self.embargo.pending_registration
@property
def private_links(self):
return self.privatelink__shared
@property
def private_links_active(self):
return [x for x in self.private_links if not x.is_deleted]
@property
def private_link_keys_active(self):
return [x.key for x in self.private_links if not x.is_deleted]
@property
def private_link_keys_deleted(self):
return [x.key for x in self.private_links if x.is_deleted]
def path_above(self, auth):
parents = self.parents
return '/' + '/'.join([p.title if p.can_view(auth) else '-- private project --' for p in reversed(parents)])
@property
def ids_above(self):
parents = self.parents
return {p._id for p in parents}
@property
def nodes_active(self):
return [x for x in self.nodes if not x.is_deleted]
@property
def draft_registrations_active(self):
drafts = DraftRegistration.find(
Q('branched_from', 'eq', self)
)
for draft in drafts:
if not draft.registered_node or draft.registered_node.is_deleted:
yield draft
@property
def has_active_draft_registrations(self):
try:
next(self.draft_registrations_active)
except StopIteration:
return False
else:
return True
def can_edit(self, auth=None, user=None):
"""Return if a user is authorized to edit this node.
Must specify one of (`auth`, `user`).
:param Auth auth: Auth object to check
:param User user: User object to check
:returns: Whether user has permission to edit this node.
"""
if not auth and not user:
raise ValueError('Must pass either `auth` or `user`')
if auth and user:
raise ValueError('Cannot pass both `auth` and `user`')
user = user or auth.user
if auth:
is_api_node = auth.api_node == self
else:
is_api_node = False
return (
(user and self.has_permission(user, 'write'))
or is_api_node
)
def active_contributors(self, include=lambda n: True):
for contrib in self.contributors:
if contrib.is_active and include(contrib):
yield contrib
def is_admin_parent(self, user):
if self.has_permission(user, 'admin', check_parent=False):
return True
if self.parent_node:
return self.parent_node.is_admin_parent(user)
return False
def can_view(self, auth):
if not auth and not self.is_public:
return False
return (
self.is_public or
(auth.user and self.has_permission(auth.user, 'read')) or
auth.private_key in self.private_link_keys_active or
self.is_admin_parent(auth.user)
)
def is_expanded(self, user=None):
"""Return if a user is has expanded the folder in the dashboard view.
Must specify one of (`auth`, `user`).
:param User user: User object to check
:returns: Boolean if the folder is expanded.
"""
if user._id in self.expanded:
return self.expanded[user._id]
else:
return False
def expand(self, user=None):
self.expanded[user._id] = True
self.save()
def collapse(self, user=None):
self.expanded[user._id] = False
self.save()
def is_derived_from(self, other, attr):
derived_from = getattr(self, attr)
while True:
if derived_from is None:
return False
if derived_from == other:
return True
derived_from = getattr(derived_from, attr)
def is_fork_of(self, other):
return self.is_derived_from(other, 'forked_from')
def is_registration_of(self, other):
return self.is_derived_from(other, 'registered_from')
@property
def forks(self):
"""List of forks of this node"""
return list(self.node__forked.find(Q('is_deleted', 'eq', False) &
Q('is_registration', 'ne', True)))
def add_permission(self, user, permission, save=False):
"""Grant permission to a user.
:param User user: User to grant permission to
:param str permission: Permission to grant
:param bool save: Save changes
:raises: ValueError if user already has permission
"""
if user._id not in self.permissions:
self.permissions[user._id] = [permission]
else:
if permission in self.permissions[user._id]:
raise ValueError('User already has permission {0}'.format(permission))
self.permissions[user._id].append(permission)
if save:
self.save()
def remove_permission(self, user, permission, save=False):
"""Revoke permission from a user.
:param User user: User to revoke permission from
:param str permission: Permission to revoke
:param bool save: Save changes
:raises: ValueError if user does not have permission
"""
try:
self.permissions[user._id].remove(permission)
except (KeyError, ValueError):
raise ValueError('User does not have permission {0}'.format(permission))
if save:
self.save()
def clear_permission(self, user, save=False):
"""Clear all permissions for a user.
:param User user: User to revoke permission from
:param bool save: Save changes
:raises: ValueError if user not in permissions
"""
try:
self.permissions.pop(user._id)
except KeyError:
raise ValueError(
'User {0} not in permissions list for node {1}'.format(
user._id, self._id,
)
)
if save:
self.save()
def set_permissions(self, user, permissions, save=False):
self.permissions[user._id] = permissions
if save:
self.save()
def has_permission(self, user, permission, check_parent=True):
"""Check whether user has permission.
:param User user: User to test
:param str permission: Required permission
:returns: User has required permission
"""
if user is None:
logger.warn('User is ``None``.')
return False
if permission in self.permissions.get(user._id, []):
return True
if permission == 'read' and check_parent:
return self.is_admin_parent(user)
return False
def has_permission_on_children(self, user, permission):
"""Checks if the given user has a given permission on any child nodes
that are not registrations or deleted
"""
if self.has_permission(user, permission):
return True
for node in self.nodes:
if not node.primary or node.is_deleted:
continue
if node.has_permission_on_children(user, permission):
return True
return False
def has_addon_on_children(self, addon):
"""Checks if a given node has a specific addon on child nodes
that are not registrations or deleted
"""
if self.has_addon(addon):
return True
for node in self.nodes:
if not node.primary or node.is_deleted:
continue
if node.has_addon_on_children(addon):
return True
return False
def get_permissions(self, user):
"""Get list of permissions for user.
:param User user: User to check
:returns: List of permissions
:raises: ValueError if user not found in permissions
"""
return self.permissions.get(user._id, [])
def adjust_permissions(self):
for key in self.permissions.keys():
if key not in self.contributors:
self.permissions.pop(key)
@property
def visible_contributors(self):
return [
User.load(_id)
for _id in self.visible_contributor_ids
]
@property
def parents(self):
if self.parent_node:
return [self.parent_node] + self.parent_node.parents
return []
@property
def admin_contributor_ids(self, contributors=None):
contributor_ids = self.contributors._to_primary_keys()
admin_ids = set()
for parent in self.parents:
admins = [
user for user, perms in parent.permissions.iteritems()
if 'admin' in perms
]
admin_ids.update(set(admins).difference(contributor_ids))
return admin_ids
@property
def admin_contributors(self):
return sorted(
[User.load(_id) for _id in self.admin_contributor_ids],
key=lambda user: user.family_name,
)
def get_visible(self, user):
if not self.is_contributor(user):
raise ValueError(u'User {0} not in contributors'.format(user))
return user._id in self.visible_contributor_ids
def update_visible_ids(self, save=False):
"""Update the order of `visible_contributor_ids`. Updating on making
a contributor visible is more efficient than recomputing order on
accessing `visible_contributors`.
"""
self.visible_contributor_ids = [
contributor._id
for contributor in self.contributors
if contributor._id in self.visible_contributor_ids
]
if save:
self.save()
def set_visible(self, user, visible, log=True, auth=None, save=False):
if not self.is_contributor(user):
raise ValueError(u'User {0} not in contributors'.format(user))
if visible and user._id not in self.visible_contributor_ids:
self.visible_contributor_ids.append(user._id)
self.update_visible_ids(save=False)
elif not visible and user._id in self.visible_contributor_ids:
if len(self.visible_contributor_ids) == 1:
raise ValueError('Must have at least one visible contributor')
self.visible_contributor_ids.remove(user._id)
else:
return
message = (
NodeLog.MADE_CONTRIBUTOR_VISIBLE
if visible
else NodeLog.MADE_CONTRIBUTOR_INVISIBLE
)
if log:
self.add_log(
message,
params={
'parent': self.parent_id,
'node': self._id,
'contributors': [user._id],
},
auth=auth,
save=False,
)
if save:
self.save()
def can_comment(self, auth):
if self.comment_level == 'public':
return auth.logged_in and (
self.is_public or
(auth.user and self.has_permission(auth.user, 'read'))
)
return self.is_contributor(auth.user)
def set_node_license(self, license_id, year, copyright_holders, auth, save=True):
if not self.has_permission(auth.user, ADMIN):
raise PermissionsError("Only admins can change a project's license.")
try:
node_license = NodeLicense.find_one(
Q('id', 'eq', license_id)
)
except NoResultsFound:
raise NodeStateError("Trying to update a Node with an invalid license.")
record = self.node_license
if record is None:
record = NodeLicenseRecord(
node_license=node_license
)
record.node_license = node_license
record.year = year
record.copyright_holders = copyright_holders or []
record.save()
self.node_license = record
self.add_log(
action=NodeLog.CHANGED_LICENSE,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'new_license': node_license.name
},
auth=auth,
save=False,
)
if save:
self.save()
def update(self, fields, auth=None, save=True):
"""Update the node with the given fields.
:param dict fields: Dictionary of field_name:value pairs.
:param Auth auth: Auth object for the user making the update.
:param bool save: Whether to save after updating the object.
"""
if self.is_registration:
raise NodeUpdateError(reason="Registered content cannot be updated")
if not fields: # Bail out early if there are no fields to update
return False
values = {}
for key, value in fields.iteritems():
if key not in self.WRITABLE_WHITELIST:
continue
# Title and description have special methods for logging purposes
if key == 'title':
self.set_title(title=value, auth=auth, save=False)
elif key == 'description':
self.set_description(description=value, auth=auth, save=False)
elif key == 'is_public':
self.set_privacy(
Node.PUBLIC if value else Node.PRIVATE,
auth=auth,
log=True,
save=False
)
elif key == 'node_license':
self.set_node_license(
value.get('id'),
value.get('year'),
value.get('copyright_holders'),
auth,
save=False
)
else:
with warnings.catch_warnings():
try:
# This is in place because historically projects and components
# live on different ElasticSearch indexes, and at the time of Node.save
# there is no reliable way to check what the old Node.category
# value was. When the cateogory changes it is possible to have duplicate/dead
# search entries, so always delete the ES doc on categoryt change
# TODO: consolidate Node indexes into a single index, refactor search
if key == 'category':
self.delete_search_entry()
###############
old_value = getattr(self, key)
if old_value != value:
values[key] = {
'old': old_value,
'new': value,
}
setattr(self, key, value)
except AttributeError:
raise NodeUpdateError(reason="Invalid value for attribute '{0}'".format(key), key=key)
except warnings.Warning:
raise NodeUpdateError(reason="Attribute '{0}' doesn't exist on the Node class".format(key), key=key)
if save:
updated = self.save()
else:
updated = []
for key in values:
values[key]['new'] = getattr(self, key)
if values:
self.add_log(
NodeLog.UPDATED_FIELDS,
params={
'node': self._id,
'updated_fields': {
key: {
'old': values[key]['old'],
'new': values[key]['new']
}
for key in values
}
},
auth=auth)
return updated
def save(self, *args, **kwargs):
update_piwik = kwargs.pop('update_piwik', True)
self.adjust_permissions()
first_save = not self._is_loaded
if first_save and self.is_dashboard:
existing_dashboards = self.find_for_user(
self.creator,
Q('is_dashboard', 'eq', True)
)
if existing_dashboards.count() > 0:
raise NodeStateError("Only one dashboard allowed per user.")
is_original = not self.is_registration and not self.is_fork
if 'suppress_log' in kwargs.keys():
suppress_log = kwargs['suppress_log']
del kwargs['suppress_log']
else:
suppress_log = False
self.root = self._root._id
self.parent_node = self._parent_node
# If you're saving a property, do it above this super call
saved_fields = super(Node, self).save(*args, **kwargs)
if first_save and is_original and not suppress_log:
# TODO: This logic also exists in self.use_as_template()
for addon in settings.ADDONS_AVAILABLE:
if 'node' in addon.added_default:
self.add_addon(addon.short_name, auth=None, log=False)
# Define log fields for non-component project
log_action = NodeLog.PROJECT_CREATED
log_params = {
'node': self._primary_key,
}
if getattr(self, 'parent', None):
# Append log to parent
self.parent.nodes.append(self)
self.parent.save()
log_params.update({'parent_node': self.parent._primary_key})
# Add log with appropriate fields
self.add_log(
log_action,
params=log_params,
auth=Auth(user=self.creator),
log_date=self.date_created,
save=True,
)
# Only update Solr if at least one stored field has changed, and if
# public or privacy setting has changed
need_update = bool(self.SOLR_UPDATE_FIELDS.intersection(saved_fields))
if not self.is_public:
if first_save or 'is_public' not in saved_fields:
need_update = False
if self.is_folder or self.archiving:
need_update = False
if need_update:
self.update_search()
if 'node_license' in saved_fields:
children = [c for c in self.get_descendants_recursive(
include=lambda n: n.node_license is None
)]
# this returns generator, that would get unspooled anyways
if children:
Node.bulk_update_search(children)
# This method checks what has changed.
if settings.PIWIK_HOST and update_piwik:
piwik_tasks.update_node(self._id, saved_fields)
# Return expected value for StoredObject::save
return saved_fields
######################################
# Methods that return a new instance #
######################################
def use_as_template(self, auth, changes=None, top_level=True):
"""Create a new project, using an existing project as a template.
:param auth: The user to be assigned as creator
:param changes: A dictionary of changes, keyed by node id, which
override the attributes of the template project or its
children.
:return: The `Node` instance created.
"""
changes = changes or dict()
# build the dict of attributes to change for the new node
try:
attributes = changes[self._id]
# TODO: explicitly define attributes which may be changed.
except (AttributeError, KeyError):
attributes = dict()
new = self.clone()
# clear permissions, which are not cleared by the clone method
new.permissions = {}
new.visible_contributor_ids = []
# Clear quasi-foreign fields
new.wiki_pages_current = {}
new.wiki_pages_versions = {}
new.wiki_private_uuids = {}
new.file_guid_to_share_uuids = {}
# set attributes which may be overridden by `changes`
new.is_public = False
new.description = None
# apply `changes`
for attr, val in attributes.iteritems():
setattr(new, attr, val)
# set attributes which may NOT be overridden by `changes`
new.creator = auth.user
new.template_node = self
new.add_contributor(contributor=auth.user, permissions=CREATOR_PERMISSIONS, log=False, save=False)
new.is_fork = False
new.is_registration = False
new.piwik_site_id = None
new.node_license = self.license.copy() if self.license else None
# If that title hasn't been changed, apply the default prefix (once)
if (new.title == self.title
and top_level
and language.TEMPLATED_FROM_PREFIX not in new.title):
new.title = ''.join((language.TEMPLATED_FROM_PREFIX, new.title, ))
# Slight hack - date_created is a read-only field.
new._fields['date_created'].__set__(
new,
datetime.datetime.utcnow(),
safe=True
)
new.save(suppress_log=True)
# Log the creation
new.add_log(
NodeLog.CREATED_FROM,
params={
'node': new._primary_key,
'template_node': {
'id': self._primary_key,
'url': self.url,
'title': self.title,
},
},
auth=auth,
log_date=new.date_created,
save=False,
)
# add mandatory addons
# TODO: This logic also exists in self.save()
for addon in settings.ADDONS_AVAILABLE:
if 'node' in addon.added_default:
new.add_addon(addon.short_name, auth=None, log=False)
# deal with the children of the node, if any
new.nodes = [
x.use_as_template(auth, changes, top_level=False)
for x in self.nodes
if x.can_view(auth)
]
new.save()
return new
############
# Pointers #
############
def add_pointer(self, node, auth, save=True):
"""Add a pointer to a node.
:param Node node: Node to add
:param Auth auth: Consolidated authorization
:param bool save: Save changes
:return: Created pointer
"""
# Fail if node already in nodes / pointers. Note: cast node and node
# to primary keys to test for conflicts with both nodes and pointers
# contained in `self.nodes`.
if node._id in self.node_ids:
raise ValueError(
'Pointer to node {0} already in list'.format(node._id)
)
if self.is_registration:
raise NodeStateError('Cannot add a pointer to a registration')
# If a folder, prevent more than one pointer to that folder. This will prevent infinite loops on the Dashboard.
# Also, no pointers to the dashboard project, which could cause loops as well.
already_pointed = node.pointed
if node.is_folder and len(already_pointed) > 0:
raise ValueError(
'Pointer to folder {0} already exists. Only one pointer to any given folder allowed'.format(node._id)
)
if node.is_dashboard:
raise ValueError(
'Pointer to dashboard ({0}) not allowed.'.format(node._id)
)
# Append pointer
pointer = Pointer(node=node)
pointer.save()
self.nodes.append(pointer)
# Add log
self.add_log(
action=NodeLog.POINTER_CREATED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
# Optionally save changes
if save:
self.save()
return pointer
def rm_pointer(self, pointer, auth):
"""Remove a pointer.
:param Pointer pointer: Pointer to remove
:param Auth auth: Consolidated authorization
"""
if pointer not in self.nodes:
raise ValueError('Node link does not belong to the requested node.')
# Remove `Pointer` object; will also remove self from `nodes` list of
# parent node
Pointer.remove_one(pointer)
# Add log
self.add_log(
action=NodeLog.POINTER_REMOVED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
@property
def node_ids(self):
return [
node._id if node.primary else node.node._id
for node in self.nodes
]
@property
def nodes_primary(self):
return [
node
for node in self.nodes
if node.primary
]
def node_and_primary_descendants(self):
"""Return an iterator for a node and all of its primary (non-pointer) descendants.
:param node Node: target Node
"""
return itertools.chain([self], self.get_descendants_recursive(lambda n: n.primary))
@property
def depth(self):
return len(self.parents)
def next_descendants(self, auth, condition=lambda auth, node: True):
"""
Recursively find the first set of descedants under a given node that meet a given condition
returns a list of [(node, [children]), ...]
"""
ret = []
for node in self.nodes:
if condition(auth, node):
# base case
ret.append((node, []))
else:
ret.append((node, node.next_descendants(auth, condition)))
ret = [item for item in ret if item[1] or condition(auth, item[0])] # prune empty branches
return ret
def get_descendants_recursive(self, include=lambda n: True):
for node in self.nodes:
if include(node):
yield node
if node.primary:
for descendant in node.get_descendants_recursive(include):
if include(descendant):
yield descendant
def get_aggregate_logs_query(self, auth):
ids = [self._id] + [n._id
for n in self.get_descendants_recursive()
if n.can_view(auth)]
query = Q('__backrefs.logged.node.logs', 'in', ids) & Q('should_hide', 'ne', True)
return query
def get_aggregate_logs_queryset(self, auth):
query = self.get_aggregate_logs_query(auth)
return NodeLog.find(query).sort('-_id')
@property
def nodes_pointer(self):
return [
node
for node in self.nodes
if not node.primary
]
@property
def has_pointers_recursive(self):
"""Recursively checks whether the current node or any of its nodes
contains a pointer.
"""
if self.nodes_pointer:
return True
for node in self.nodes_primary:
if node.has_pointers_recursive:
return True
return False
@property
def pointed(self):
return getattr(self, '_pointed', [])
def pointing_at(self, pointed_node_id):
"""This node is pointed at another node.
:param Node pointed_node_id: The node id of the node being pointed at.
:return: pointer_id
"""
for pointer in self.nodes_pointer:
node_id = pointer.node._id
if node_id == pointed_node_id:
return pointer._id
return None
def get_points(self, folders=False, deleted=False, resolve=True):
ret = []
for each in self.pointed:
pointer_node = get_pointer_parent(each)
if not folders and pointer_node.is_folder:
continue
if not deleted and pointer_node.is_deleted:
continue
if resolve:
ret.append(pointer_node)
else:
ret.append(each)
return ret
def resolve(self):
return self
def fork_pointer(self, pointer, auth, save=True):
"""Replace a pointer with a fork. If the pointer points to a project,
fork the project and replace the pointer with a new pointer pointing
to the fork. If the pointer points to a component, fork the component
and add it to the current node.
:param Pointer pointer:
:param Auth auth:
:param bool save:
:return: Forked node
"""
# Fail if pointer not contained in `nodes`
try:
index = self.nodes.index(pointer)
except ValueError:
raise ValueError('Pointer {0} not in list'.format(pointer._id))
# Get pointed node
node = pointer.node
# Fork into current node and replace pointer with forked component
forked = node.fork_node(auth)
if forked is None:
raise ValueError('Could not fork node')
self.nodes[index] = forked
# Add log
self.add_log(
NodeLog.POINTER_FORKED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
# Optionally save changes
if save:
self.save()
# Garbage-collect pointer. Note: Must save current node before
# removing pointer, else remove will fail when trying to remove
# backref from self to pointer.
Pointer.remove_one(pointer)
# Return forked content
return forked
def get_recent_logs(self, n=10):
"""Return a list of the n most recent logs, in reverse chronological
order.
:param int n: Number of logs to retrieve
"""
return list(reversed(self.logs)[:n])
def set_title(self, title, auth, save=False):
"""Set the title of this Node and log it.
:param str title: The new title.
:param auth: All the auth information including user, API key.
"""
#Called so validation does not have to wait until save.
validate_title(title)
original_title = self.title
new_title = sanitize.strip_html(title)
# Title hasn't changed after sanitzation, bail out
if original_title == new_title:
return False
self.title = new_title
self.add_log(
action=NodeLog.EDITED_TITLE,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'title_new': self.title,
'title_original': original_title,
},
auth=auth,
save=False,
)
if save:
self.save()
return None
def set_description(self, description, auth, save=False):
"""Set the description and log the event.
:param str description: The new description
:param auth: All the auth informtion including user, API key.
:param bool save: Save self after updating.
"""
original = self.description
new_description = sanitize.strip_html(description)
if original == new_description:
return False
self.description = new_description
self.add_log(
action=NodeLog.EDITED_DESCRIPTION,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'description_new': self.description,
'description_original': original
},
auth=auth,
save=False,
)
if save:
self.save()
return None
def update_search(self):
from website import search
try:
search.search.update_node(self, bulk=False, async=True)
except search.exceptions.SearchUnavailableError as e:
logger.exception(e)
log_exception()
@classmethod
def bulk_update_search(cls, nodes):
from website import search
try:
serialize = functools.partial(search.search.update_node, bulk=True, async=False)
search.search.bulk_update_nodes(serialize, nodes)
except search.exceptions.SearchUnavailableError as e:
logger.exception(e)
log_exception()
def delete_search_entry(self):
from website import search
try:
search.search.delete_node(self)
except search.exceptions.SearchUnavailableError as e:
logger.exception(e)
log_exception()
def delete_registration_tree(self, save=False):
self.is_deleted = True
if not getattr(self.embargo, 'for_existing_registration', False):
self.registered_from = None
if save:
self.save()
self.update_search()
for child in self.nodes_primary:
child.delete_registration_tree(save=save)
def remove_node(self, auth, date=None):
"""Marks a node as deleted.
TODO: Call a hook on addons
Adds a log to the parent node if applicable
:param auth: an instance of :class:`Auth`.
:param date: Date node was removed
:type date: `datetime.datetime` or `None`
"""
# TODO: rename "date" param - it's shadowing a global
if self.is_dashboard:
raise NodeStateError("Dashboards may not be deleted.")
if not self.can_edit(auth):
raise PermissionsError('{0!r} does not have permission to modify this {1}'.format(auth.user, self.category or 'node'))
#if this is a folder, remove all the folders that this is pointing at.
if self.is_folder:
for pointed in self.nodes_pointer:
if pointed.node.is_folder:
pointed.node.remove_node(auth=auth)
if [x for x in self.nodes_primary if not x.is_deleted]:
raise NodeStateError("Any child components must be deleted prior to deleting this project.")
# After delete callback
for addon in self.get_addons():
message = addon.after_delete(self, auth.user)
if message:
status.push_status_message(message, kind='info', trust=False)
log_date = date or datetime.datetime.utcnow()
# Add log to parent
if self.node__parent:
self.node__parent[0].add_log(
NodeLog.NODE_REMOVED,
params={
'project': self._primary_key,
},
auth=auth,
log_date=log_date,
save=True,
)
else:
self.add_log(
NodeLog.PROJECT_DELETED,
params={
'project': self._primary_key,
},
auth=auth,
log_date=log_date,
save=True,
)
self.is_deleted = True
self.deleted_date = date
self.save()
auth_signals.node_deleted.send(self)
return True
def fork_node(self, auth, title='Fork of '):
"""Recursively fork a node.
:param Auth auth: Consolidated authorization
:param str title: Optional text to prepend to forked title
:return: Forked node
"""
user = auth.user
# Non-contributors can't fork private nodes
if not (self.is_public or self.has_permission(user, 'read')):
raise PermissionsError('{0!r} does not have permission to fork node {1!r}'.format(user, self._id))
when = datetime.datetime.utcnow()
original = self.load(self._primary_key)
if original.is_deleted:
raise NodeStateError('Cannot fork deleted node.')
# Note: Cloning a node copies its `wiki_pages_current` and
# `wiki_pages_versions` fields, but does not clone the underlying
# database objects to which these dictionaries refer. This means that
# the cloned node must pass itself to its wiki objects to build the
# correct URLs to that content.
forked = original.clone()
forked.logs = self.logs
forked.tags = self.tags
# Recursively fork child nodes
for node_contained in original.nodes:
if not node_contained.is_deleted:
forked_node = None
try: # Catch the potential PermissionsError above
forked_node = node_contained.fork_node(auth=auth, title='')
except PermissionsError:
pass # If this exception is thrown omit the node from the result set
if forked_node is not None:
forked.nodes.append(forked_node)
forked.title = title + forked.title
forked.is_fork = True
forked.is_registration = False
forked.forked_date = when
forked.forked_from = original
forked.creator = user
forked.piwik_site_id = None
forked.node_license = original.license.copy() if original.license else None
# Forks default to private status
forked.is_public = False
# Clear permissions before adding users
forked.permissions = {}
forked.visible_contributor_ids = []
for citation in self.alternative_citations:
forked.add_citation(
auth=auth,
citation=citation.clone(),
log=False,
save=False
)
forked.add_contributor(
contributor=user,
permissions=CREATOR_PERMISSIONS,
log=False,
save=False
)
forked.add_log(
action=NodeLog.NODE_FORKED,
params={
'parent_node': original.parent_id,
'node': original._primary_key,
'registration': forked._primary_key,
},
auth=auth,
log_date=when,
save=False,
)
forked.save()
# After fork callback
for addon in original.get_addons():
_, message = addon.after_fork(original, forked, user)
if message:
status.push_status_message(message, kind='info', trust=True)
return forked
def register_node(self, schema, auth, data, parent=None):
"""Make a frozen copy of a node.
:param schema: Schema object
:param auth: All the auth information including user, API key.
:param template: Template name
:param data: Form data
:param parent Node: parent registration of registration to be created
"""
# TODO(lyndsysimon): "template" param is not necessary - use schema.name?
# NOTE: Admins can register child nodes even if they don't have write access them
if not self.can_edit(auth=auth) and not self.is_admin_parent(user=auth.user):
raise PermissionsError(
'User {} does not have permission '
'to register this node'.format(auth.user._id)
)
if self.is_folder:
raise NodeStateError("Folders may not be registered")
when = datetime.datetime.utcnow()
original = self.load(self._primary_key)
# Note: Cloning a node copies its `wiki_pages_current` and
# `wiki_pages_versions` fields, but does not clone the underlying
# database objects to which these dictionaries refer. This means that
# the cloned node must pass itself to its wiki objects to build the
# correct URLs to that content.
if original.is_deleted:
raise NodeStateError('Cannot register deleted node.')
registered = original.clone()
registered.is_registration = True
registered.registered_date = when
registered.registered_user = auth.user
registered.registered_schema.append(schema)
registered.registered_from = original
if not registered.registered_meta:
registered.registered_meta = {}
registered.registered_meta[schema._id] = data
registered.contributors = self.contributors
registered.forked_from = self.forked_from
registered.creator = self.creator
registered.logs = self.logs
registered.tags = self.tags
registered.piwik_site_id = None
registered.alternative_citations = self.alternative_citations
registered.node_license = original.license.copy() if original.license else None
registered.save()
registered.is_public = False
for node in registered.get_descendants_recursive():
node.is_public = False
node.save()
if parent:
registered._parent_node = parent
# After register callback
for addon in original.get_addons():
_, message = addon.after_register(original, registered, auth.user)
if message:
status.push_status_message(message, kind='info', trust=False)
for node_contained in original.nodes:
if not node_contained.is_deleted:
child_registration = node_contained.register_node(
schema=schema,
auth=auth,
data=data,
parent=registered,
)
if child_registration and not child_registration.primary:
registered.nodes.append(child_registration)
registered.save()
if settings.ENABLE_ARCHIVER:
project_signals.after_create_registration.send(self, dst=registered, user=auth.user)
return registered
def remove_tag(self, tag, auth, save=True):
if tag in self.tags:
self.tags.remove(tag)
self.add_log(
action=NodeLog.TAG_REMOVED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'tag': tag,
},
auth=auth,
save=False,
)
if save:
self.save()
def add_tag(self, tag, auth, save=True, log=True):
if tag not in self.tags:
new_tag = Tag.load(tag)
if not new_tag:
new_tag = Tag(_id=tag)
new_tag.save()
self.tags.append(new_tag)
if log:
self.add_log(
action=NodeLog.TAG_ADDED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'tag': tag,
},
auth=auth,
save=False,
)
if save:
self.save()
def add_citation(self, auth, save=False, log=True, citation=None, **kwargs):
if not citation:
citation = AlternativeCitation(**kwargs)
citation.save()
self.alternative_citations.append(citation)
citation_dict = {'name': citation.name, 'text': citation.text}
if log:
self.add_log(
action=NodeLog.CITATION_ADDED,
params={
'node': self._primary_key,
'citation': citation_dict
},
auth=auth,
save=False
)
if save:
self.save()
return citation
def edit_citation(self, auth, instance, save=False, log=True, **kwargs):
citation = {'name': instance.name, 'text': instance.text}
new_name = kwargs.get('name', instance.name)
new_text = kwargs.get('text', instance.text)
if new_name != instance.name:
instance.name = new_name
citation['new_name'] = new_name
if new_text != instance.text:
instance.text = new_text
citation['new_text'] = new_text
instance.save()
if log:
self.add_log(
action=NodeLog.CITATION_EDITED,
params={
'node': self._primary_key,
'citation': citation
},
auth=auth,
save=False
)
if save:
self.save()
return instance
def remove_citation(self, auth, instance, save=False, log=True):
citation = {'name': instance.name, 'text': instance.text}
self.alternative_citations.remove(instance)
if log:
self.add_log(
action=NodeLog.CITATION_REMOVED,
params={
'node': self._primary_key,
'citation': citation
},
auth=auth,
save=False
)
if save:
self.save()
def add_log(self, action, params, auth, foreign_user=None, log_date=None, save=True):
user = auth.user if auth else None
params['node'] = params.get('node') or params.get('project')
log = NodeLog(
action=action,
user=user,
foreign_user=foreign_user,
params=params,
)
if log_date:
log.date = log_date
self.date_modified = log.date.replace(tzinfo=None)
log.save()
self.logs.append(log)
if save:
self.save()
if user:
increment_user_activity_counters(user._primary_key, action, log.date)
return log
@classmethod
def find_for_user(cls, user, subquery=None):
combined_query = Q('contributors', 'contains', user._id)
if subquery is not None:
combined_query = combined_query & subquery
return cls.find(combined_query)
@property
def url(self):
return '/{}/'.format(self._primary_key)
def web_url_for(self, view_name, _absolute=False, _guid=False, *args, **kwargs):
return web_url_for(view_name, pid=self._primary_key, _absolute=_absolute, _guid=_guid, *args, **kwargs)
def api_url_for(self, view_name, _absolute=False, *args, **kwargs):
return api_url_for(view_name, pid=self._primary_key, _absolute=_absolute, *args, **kwargs)
@property
def absolute_url(self):
if not self.url:
logger.error('Node {0} has a parent that is not a project'.format(self._id))
return None
return urlparse.urljoin(settings.DOMAIN, self.url)
@property
def display_absolute_url(self):
url = self.absolute_url
if url is not None:
return re.sub(r'https?:', '', url).strip('/')
@property
def api_v2_url(self):
return reverse('nodes:node-detail', kwargs={'node_id': self._id})
@property
def absolute_api_v2_url(self):
if self.is_registration:
return absolute_reverse('registrations:registration-detail', kwargs={'node_id': self._id})
if self.is_folder:
return absolute_reverse('collections:collection-detail', kwargs={'collection_id': self._id})
return absolute_reverse('nodes:node-detail', kwargs={'node_id': self._id})
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
@property
def api_url(self):
if not self.url:
logger.error('Node {0} has a parent that is not a project'.format(self._id))
return None
return '/api/v1{0}'.format(self.deep_url)
@property
def deep_url(self):
return '/project/{}/'.format(self._primary_key)
@property
def csl(self): # formats node information into CSL format for citation parsing
"""a dict in CSL-JSON schema
For details on this schema, see:
https://github.com/citation-style-language/schema#csl-json-schema
"""
csl = {
'id': self._id,
'title': sanitize.unescape_entities(self.title),
'author': [
contributor.csl_name # method in auth/model.py which parses the names of authors
for contributor in self.visible_contributors
],
'publisher': 'Open Science Framework',
'type': 'webpage',
'URL': self.display_absolute_url,
}
doi = self.get_identifier_value('doi')
if doi:
csl['DOI'] = doi
if self.logs:
csl['issued'] = datetime_to_csl(self.logs[-1].date)
return csl
def author_list(self, and_delim='&'):
author_names = [
author.biblio_name
for author in self.visible_contributors
if author
]
if len(author_names) < 2:
return ' {0} '.format(and_delim).join(author_names)
if len(author_names) > 7:
author_names = author_names[:7]
author_names.append('et al.')
return ', '.join(author_names)
return u'{0}, {1} {2}'.format(
', '.join(author_names[:-1]),
and_delim,
author_names[-1]
)
@property
def templated_list(self):
return [
x
for x in self.node__template_node
if not x.is_deleted
]
@property
def _parent_node(self):
"""The parent node, if it exists, otherwise ``None``. Note: this
property is named `parent_node` rather than `parent` to avoid a
conflict with the `parent` back-reference created by the `nodes`
field on this schema.
"""
try:
if not self.node__parent[0].is_deleted:
return self.node__parent[0]
except IndexError:
pass
return None
@_parent_node.setter
def _parent_node(self, parent):
parent.nodes.append(self)
parent.save()
@property
def _root(self):
if self._parent_node:
return self._parent_node._root
else:
return self
@property
def archiving(self):
job = self.archive_job
return job and not job.done and not job.archive_tree_finished()
@property
def archive_job(self):
return self.archivejob__active[0] if self.archivejob__active else None
@property
def registrations(self):
return self.node__registrations.find(Q('archiving', 'eq', False))
@property
def watch_url(self):
return os.path.join(self.api_url, "watch/")
@property
def parent_id(self):
if self.node__parent:
return self.node__parent[0]._primary_key
return None
@property
def forked_from_id(self):
if self.forked_from:
return self.forked_from._id
return None
@property
def project_or_component(self):
return 'project' if self.category == 'project' else 'component'
def is_contributor(self, user):
return (
user is not None
and (
user._id in self.contributors
)
)
def add_addon(self, addon_name, auth, log=True, *args, **kwargs):
"""Add an add-on to the node. Do nothing if the addon is already
enabled.
:param str addon_name: Name of add-on
:param Auth auth: Consolidated authorization object
:param bool log: Add a log after adding the add-on
:return: A boolean, whether the addon was added
"""
ret = AddonModelMixin.add_addon(self, addon_name, auth=auth,
*args, **kwargs)
if ret and log:
config = settings.ADDONS_AVAILABLE_DICT[addon_name]
self.add_log(
action=NodeLog.ADDON_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'addon': config.full_name,
},
auth=auth,
save=False,
)
self.save() # TODO: here, or outside the conditional? @mambocab
return ret
def delete_addon(self, addon_name, auth, _force=False):
"""Delete an add-on from the node.
:param str addon_name: Name of add-on
:param Auth auth: Consolidated authorization object
:param bool _force: For migration testing ONLY. Do not set to True
in the application, or else projects will be allowed to delete
mandatory add-ons!
:return bool: Add-on was deleted
"""
ret = super(Node, self).delete_addon(addon_name, auth, _force)
if ret:
config = settings.ADDONS_AVAILABLE_DICT[addon_name]
self.add_log(
action=NodeLog.ADDON_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'addon': config.full_name,
},
auth=auth,
save=False,
)
self.save()
# TODO: save here or outside the conditional? @mambocab
return ret
def callback(self, callback, recursive=False, *args, **kwargs):
"""Invoke callbacks of attached add-ons and collect messages.
:param str callback: Name of callback method to invoke
:param bool recursive: Apply callback recursively over nodes
:return list: List of callback messages
"""
messages = []
for addon in self.get_addons():
method = getattr(addon, callback)
message = method(self, *args, **kwargs)
if message:
messages.append(message)
if recursive:
for child in self.nodes:
if not child.is_deleted:
messages.extend(
child.callback(
callback, recursive, *args, **kwargs
)
)
return messages
def replace_contributor(self, old, new):
for i, contrib in enumerate(self.contributors):
if contrib._primary_key == old._primary_key:
self.contributors[i] = new
# Remove unclaimed record for the project
if self._primary_key in old.unclaimed_records:
del old.unclaimed_records[self._primary_key]
old.save()
for permission in self.get_permissions(old):
self.add_permission(new, permission)
self.permissions.pop(old._id)
if old._id in self.visible_contributor_ids:
self.visible_contributor_ids[self.visible_contributor_ids.index(old._id)] = new._id
return True
return False
def remove_contributor(self, contributor, auth, log=True):
"""Remove a contributor from this node.
:param contributor: User object, the contributor to be removed
:param auth: All the auth information including user, API key.
"""
# remove unclaimed record if necessary
if self._primary_key in contributor.unclaimed_records:
del contributor.unclaimed_records[self._primary_key]
self.contributors.remove(contributor._id)
self.clear_permission(contributor)
if contributor._id in self.visible_contributor_ids:
self.visible_contributor_ids.remove(contributor._id)
if not self.visible_contributor_ids:
return False
# Node must have at least one registered admin user
# TODO: Move to validator or helper
admins = [
user for user in self.contributors
if self.has_permission(user, 'admin')
and user.is_registered
]
if not admins:
return False
# Clear permissions for removed user
self.permissions.pop(contributor._id, None)
# After remove callback
for addon in self.get_addons():
message = addon.after_remove_contributor(self, contributor, auth)
if message:
status.push_status_message(message, kind='info', trust=True)
if log:
self.add_log(
action=NodeLog.CONTRIB_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': [contributor._id],
},
auth=auth,
save=False,
)
self.save()
#send signal to remove this user from project subscriptions
auth_signals.contributor_removed.send(contributor, node=self)
return True
def remove_contributors(self, contributors, auth=None, log=True, save=False):
results = []
removed = []
for contrib in contributors:
outcome = self.remove_contributor(
contributor=contrib, auth=auth, log=False,
)
results.append(outcome)
if outcome:
project_signals.contributor_removed.send(self, user=contrib)
removed.append(contrib._id)
if log:
self.add_log(
action=NodeLog.CONTRIB_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': removed,
},
auth=auth,
save=False,
)
if save:
self.save()
if False in results:
return False
return True
def update_contributor(self, user, permission, visible, auth, save=False):
""" TODO: this method should be updated as a replacement for the main loop of
Node#manage_contributors. Right now there are redundancies, but to avoid major
feature creep this will not be included as this time.
Also checks to make sure unique admin is not removing own admin privilege.
"""
if not self.has_permission(auth.user, ADMIN):
raise PermissionsError("Only admins can modify contributor permissions")
permissions = expand_permissions(permission) or DEFAULT_CONTRIBUTOR_PERMISSIONS
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
if not len(admins) > 1:
# has only one admin
admin = admins[0]
if admin == user and ADMIN not in permissions:
raise NodeStateError('{} is the only admin.'.format(user.fullname))
if user not in self.contributors:
raise ValueError(
'User {0} not in contributors'.format(user.fullname)
)
if permission:
permissions = expand_permissions(permission)
if set(permissions) != set(self.get_permissions(user)):
self.set_permissions(user, permissions, save=save)
permissions_changed = {
user._id: permissions
}
self.add_log(
action=NodeLog.PERMISSIONS_UPDATED,
params={
'project': self.parent_id,
'node': self._id,
'contributors': permissions_changed,
},
auth=auth,
save=save
)
with TokuTransaction():
if ['read'] in permissions_changed.values():
project_signals.write_permissions_revoked.send(self)
if visible is not None:
self.set_visible(user, visible, auth=auth, save=save)
self.update_visible_ids()
def manage_contributors(self, user_dicts, auth, save=False):
"""Reorder and remove contributors.
:param list user_dicts: Ordered list of contributors represented as
dictionaries of the form:
{'id': <id>, 'permission': <One of 'read', 'write', 'admin'>, 'visible': bool}
:param Auth auth: Consolidated authentication information
:param bool save: Save changes
:raises: ValueError if any users in `users` not in contributors or if
no admin contributors remaining
"""
with TokuTransaction():
users = []
user_ids = []
permissions_changed = {}
visibility_removed = []
to_retain = []
to_remove = []
for user_dict in user_dicts:
user = User.load(user_dict['id'])
if user is None:
raise ValueError('User not found')
if user not in self.contributors:
raise ValueError(
'User {0} not in contributors'.format(user.fullname)
)
permissions = expand_permissions(user_dict['permission'])
if set(permissions) != set(self.get_permissions(user)):
self.set_permissions(user, permissions, save=False)
permissions_changed[user._id] = permissions
# visible must be added before removed to ensure they are validated properly
if user_dict['visible']:
self.set_visible(user,
visible=True,
auth=auth)
else:
visibility_removed.append(user)
users.append(user)
user_ids.append(user_dict['id'])
for user in visibility_removed:
self.set_visible(user,
visible=False,
auth=auth)
for user in self.contributors:
if user._id in user_ids:
to_retain.append(user)
else:
to_remove.append(user)
# TODO: Move to validator or helper @jmcarp
admins = [
user for user in users
if self.has_permission(user, 'admin')
and user.is_registered
]
if users is None or not admins:
raise ValueError(
'Must have at least one registered admin contributor'
)
if to_retain != users:
self.add_log(
action=NodeLog.CONTRIB_REORDERED,
params={
'project': self.parent_id,
'node': self._id,
'contributors': [
user._id
for user in users
],
},
auth=auth,
save=False,
)
if to_remove:
self.remove_contributors(to_remove, auth=auth, save=False)
self.contributors = users
if permissions_changed:
self.add_log(
action=NodeLog.PERMISSIONS_UPDATED,
params={
'project': self.parent_id,
'node': self._id,
'contributors': permissions_changed,
},
auth=auth,
save=False,
)
# Update list of visible IDs
self.update_visible_ids()
if save:
self.save()
with TokuTransaction():
if to_remove or permissions_changed and ['read'] in permissions_changed.values():
project_signals.write_permissions_revoked.send(self)
def add_contributor(self, contributor, permissions=None, visible=True,
auth=None, log=True, save=False):
"""Add a contributor to the project.
:param User contributor: The contributor to be added
:param list permissions: Permissions to grant to the contributor
:param bool visible: Contributor is visible in project dashboard
:param Auth auth: All the auth information including user, API key
:param bool log: Add log to self
:param bool save: Save after adding contributor
:returns: Whether contributor was added
"""
MAX_RECENT_LENGTH = 15
# If user is merged into another account, use master account
contrib_to_add = contributor.merged_by if contributor.is_merged else contributor
if contrib_to_add not in self.contributors:
self.contributors.append(contrib_to_add)
if visible:
self.set_visible(contrib_to_add, visible=True, log=False)
# Add default contributor permissions
permissions = permissions or DEFAULT_CONTRIBUTOR_PERMISSIONS
for permission in permissions:
self.add_permission(contrib_to_add, permission, save=False)
# Add contributor to recently added list for user
if auth is not None:
user = auth.user
if contrib_to_add in user.recently_added:
user.recently_added.remove(contrib_to_add)
user.recently_added.insert(0, contrib_to_add)
while len(user.recently_added) > MAX_RECENT_LENGTH:
user.recently_added.pop()
if log:
self.add_log(
action=NodeLog.CONTRIB_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': [contrib_to_add._primary_key],
},
auth=auth,
save=False,
)
if save:
self.save()
project_signals.contributor_added.send(self, contributor=contributor, auth=auth)
return True
# Permissions must be overridden if changed when contributor is added to parent he/she is already on a child of.
elif contrib_to_add in self.contributors and permissions is not None:
self.set_permissions(contrib_to_add, permissions)
if save:
self.save()
return False
else:
return False
def add_contributors(self, contributors, auth=None, log=True, save=False):
"""Add multiple contributors
:param list contributors: A list of dictionaries of the form:
{
'user': <User object>,
'permissions': <Permissions list, e.g. ['read', 'write']>,
'visible': <Boolean indicating whether or not user is a bibliographic contributor>
}
:param auth: All the auth information including user, API key.
:param log: Add log to self
:param save: Save after adding contributor
"""
for contrib in contributors:
self.add_contributor(
contributor=contrib['user'], permissions=contrib['permissions'],
visible=contrib['visible'], auth=auth, log=False, save=False,
)
if log and contributors:
self.add_log(
action=NodeLog.CONTRIB_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': [
contrib['user']._id
for contrib in contributors
],
},
auth=auth,
save=False,
)
if save:
self.save()
def add_unregistered_contributor(self, fullname, email, auth,
permissions=None, save=False):
"""Add a non-registered contributor to the project.
:param str fullname: The full name of the person.
:param str email: The email address of the person.
:param Auth auth: Auth object for the user adding the contributor.
:returns: The added contributor
:raises: DuplicateEmailError if user with given email is already in the database.
"""
# Create a new user record
contributor = User.create_unregistered(fullname=fullname, email=email)
contributor.add_unclaimed_record(node=self, referrer=auth.user,
given_name=fullname, email=email)
try:
contributor.save()
except ValidationValueError: # User with same email already exists
contributor = get_user(email=email)
# Unregistered users may have multiple unclaimed records, so
# only raise error if user is registered.
if contributor.is_registered or self.is_contributor(contributor):
raise
contributor.add_unclaimed_record(node=self, referrer=auth.user,
given_name=fullname, email=email)
contributor.save()
self.add_contributor(
contributor, permissions=permissions, auth=auth,
log=True, save=False,
)
self.save()
return contributor
def set_privacy(self, permissions, auth=None, log=True, save=True, meeting_creation=False):
"""Set the permissions for this node. Also, based on meeting_creation, queues an email to user about abilities of
public projects.
:param permissions: A string, either 'public' or 'private'
:param auth: All the auth information including user, API key.
:param bool log: Whether to add a NodeLog for the privacy change.
:param bool meeting_creation: Whther this was creayed due to a meetings email.
"""
if auth and not self.has_permission(auth.user, ADMIN):
raise PermissionsError('Must be an admin to change privacy settings.')
if permissions == 'public' and not self.is_public:
if self.is_registration:
if self.is_pending_embargo:
raise NodeStateError("A registration with an unapproved embargo cannot be made public.")
elif self.is_pending_registration:
raise NodeStateError("An unapproved registration cannot be made public.")
if self.embargo_end_date and not self.is_pending_embargo:
self.embargo.state = Embargo.REJECTED
self.embargo.save()
self.is_public = True
elif permissions == 'private' and self.is_public:
if self.is_registration and not self.is_pending_embargo:
raise NodeStateError("Public registrations must be retracted, not made private.")
else:
self.is_public = False
else:
return False
# After set permissions callback
for addon in self.get_addons():
message = addon.after_set_privacy(self, permissions)
if message:
status.push_status_message(message, kind='info', trust=False)
if log:
action = NodeLog.MADE_PUBLIC if permissions == 'public' else NodeLog.MADE_PRIVATE
self.add_log(
action=action,
params={
'project': self.parent_id,
'node': self._primary_key,
},
auth=auth,
save=False,
)
if save:
self.save()
if auth and permissions == 'public':
project_signals.privacy_set_public.send(auth.user, node=self, meeting_creation=meeting_creation)
return True
def admin_public_wiki(self, user):
return (
self.has_addon('wiki') and
self.has_permission(user, 'admin') and
self.is_public
)
def include_wiki_settings(self, user):
"""Check if node meets requirements to make publicly editable."""
return (
self.admin_public_wiki(user) or
any(
each.admin_public_wiki(user)
for each in self.get_descendants_recursive()
)
)
# TODO: Move to wiki add-on
def get_wiki_page(self, name=None, version=None, id=None):
from website.addons.wiki.model import NodeWikiPage
if name:
name = (name or '').strip()
key = to_mongo_key(name)
try:
if version and (isinstance(version, int) or version.isdigit()):
id = self.wiki_pages_versions[key][int(version) - 1]
elif version == 'previous':
id = self.wiki_pages_versions[key][-2]
elif version == 'current' or version is None:
id = self.wiki_pages_current[key]
else:
return None
except (KeyError, IndexError):
return None
return NodeWikiPage.load(id)
# TODO: Move to wiki add-on
def update_node_wiki(self, name, content, auth):
"""Update the node's wiki page with new content.
:param page: A string, the page's name, e.g. ``"home"``.
:param content: A string, the posted content.
:param auth: All the auth information including user, API key.
"""
from website.addons.wiki.model import NodeWikiPage
name = (name or '').strip()
key = to_mongo_key(name)
if key not in self.wiki_pages_current:
if key in self.wiki_pages_versions:
version = len(self.wiki_pages_versions[key]) + 1
else:
version = 1
else:
current = NodeWikiPage.load(self.wiki_pages_current[key])
current.is_current = False
version = current.version + 1
current.save()
new_page = NodeWikiPage(
page_name=name,
version=version,
user=auth.user,
is_current=True,
node=self,
content=content
)
new_page.save()
# check if the wiki page already exists in versions (existed once and is now deleted)
if key not in self.wiki_pages_versions:
self.wiki_pages_versions[key] = []
self.wiki_pages_versions[key].append(new_page._primary_key)
self.wiki_pages_current[key] = new_page._primary_key
self.add_log(
action=NodeLog.WIKI_UPDATED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': new_page.page_name,
'page_id': new_page._primary_key,
'version': new_page.version,
},
auth=auth,
log_date=new_page.date,
save=False,
)
self.save()
# TODO: Move to wiki add-on
def rename_node_wiki(self, name, new_name, auth):
"""Rename the node's wiki page with new name.
:param name: A string, the page's name, e.g. ``"My Page"``.
:param new_name: A string, the new page's name, e.g. ``"My Renamed Page"``.
:param auth: All the auth information including user, API key.
"""
# TODO: Fix circular imports
from website.addons.wiki.exceptions import (
PageCannotRenameError,
PageConflictError,
PageNotFoundError,
)
name = (name or '').strip()
key = to_mongo_key(name)
new_name = (new_name or '').strip()
new_key = to_mongo_key(new_name)
page = self.get_wiki_page(name)
if key == 'home':
raise PageCannotRenameError('Cannot rename wiki home page')
if not page:
raise PageNotFoundError('Wiki page not found')
if (new_key in self.wiki_pages_current and key != new_key) or new_key == 'home':
raise PageConflictError(
'Page already exists with name {0}'.format(
new_name,
)
)
# rename the page first in case we hit a validation exception.
old_name = page.page_name
page.rename(new_name)
# TODO: merge historical records like update (prevents log breaks)
# transfer the old page versions/current keys to the new name.
if key != new_key:
self.wiki_pages_versions[new_key] = self.wiki_pages_versions[key]
del self.wiki_pages_versions[key]
self.wiki_pages_current[new_key] = self.wiki_pages_current[key]
del self.wiki_pages_current[key]
if key in self.wiki_private_uuids:
self.wiki_private_uuids[new_key] = self.wiki_private_uuids[key]
del self.wiki_private_uuids[key]
self.add_log(
action=NodeLog.WIKI_RENAMED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': page.page_name,
'page_id': page._primary_key,
'old_page': old_name,
'version': page.version,
},
auth=auth,
save=False,
)
self.save()
def delete_node_wiki(self, name, auth):
name = (name or '').strip()
key = to_mongo_key(name)
page = self.get_wiki_page(key)
del self.wiki_pages_current[key]
self.add_log(
action=NodeLog.WIKI_DELETED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': page.page_name,
'page_id': page._primary_key,
},
auth=auth,
save=False,
)
self.save()
def get_stats(self, detailed=False):
if detailed:
raise NotImplementedError(
'Detailed stats exist, but are not yet implemented.'
)
else:
return get_basic_counters('node:%s' % self._primary_key)
# TODO: Deprecate this; it duplicates much of what serialize_project already
# does
def serialize(self, auth=None):
"""Dictionary representation of node that is nested within a NodeLog's
representation.
"""
# TODO: incomplete implementation
return {
'id': str(self._primary_key),
'category': self.category_display,
'node_type': self.project_or_component,
'url': self.url,
# TODO: Titles shouldn't contain escaped HTML in the first place
'title': sanitize.unescape_entities(self.title),
'path': self.path_above(auth),
'api_url': self.api_url,
'is_public': self.is_public,
'is_registration': self.is_registration,
'registered_from_id': self.registered_from_id,
}
def _initiate_retraction(self, user, justification=None):
"""Initiates the retraction process for a registration
:param user: User who initiated the retraction
:param justification: Justification, if given, for retraction
"""
retraction = Retraction(
initiated_by=user,
justification=justification or None, # make empty strings None
state=Retraction.UNAPPROVED
)
retraction.save() # Save retraction so it has a primary key
self.retraction = retraction
self.save() # Set foreign field reference Node.retraction
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
for admin in admins:
retraction.add_authorizer(admin)
retraction.save() # Save retraction approval state
return retraction
def retract_registration(self, user, justification=None, save=True):
"""Retract public registration. Instantiate new Retraction object
and associate it with the respective registration.
"""
if not self.is_registration or (not self.is_public and not (self.embargo_end_date or self.is_pending_embargo)):
raise NodeStateError('Only public or embargoed registrations may be retracted.')
if self.root is not self:
raise NodeStateError('Retraction of non-parent registrations is not permitted.')
retraction = self._initiate_retraction(user, justification)
self.registered_from.add_log(
action=NodeLog.RETRACTION_INITIATED,
params={
'node': self._id,
'retraction_id': retraction._id,
},
auth=Auth(user),
)
self.retraction = retraction
if save:
self.save()
def _is_embargo_date_valid(self, end_date):
today = datetime.datetime.utcnow()
if (end_date - today) >= settings.EMBARGO_END_DATE_MIN:
if (end_date - today) <= settings.EMBARGO_END_DATE_MAX:
return True
return False
def _initiate_embargo(self, user, end_date, for_existing_registration=False, notify_initiator_on_complete=False):
"""Initiates the retraction process for a registration
:param user: User who initiated the retraction
:param end_date: Date when the registration should be made public
"""
embargo = Embargo(
initiated_by=user,
end_date=datetime.datetime.combine(end_date, datetime.datetime.min.time()),
for_existing_registration=for_existing_registration,
notify_initiator_on_complete=notify_initiator_on_complete
)
embargo.save() # Save embargo so it has a primary key
self.embargo = embargo
self.save() # Set foreign field reference Node.embargo
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
for admin in admins:
embargo.add_authorizer(admin)
embargo.save() # Save embargo's approval_state
return embargo
def embargo_registration(self, user, end_date, for_existing_registration=False, notify_initiator_on_complete=False):
"""Enter registration into an embargo period at end of which, it will
be made public
:param user: User initiating the embargo
:param end_date: Date when the registration should be made public
:raises: NodeStateError if Node is not a registration
:raises: PermissionsError if user is not an admin for the Node
:raises: ValidationValueError if end_date is not within time constraints
"""
if not self.is_registration:
raise NodeStateError('Only registrations may be embargoed')
if not self.has_permission(user, 'admin'):
raise PermissionsError('Only admins may embargo a registration')
if not self._is_embargo_date_valid(end_date):
raise ValidationValueError('Embargo end date must be more than one day in the future')
embargo = self._initiate_embargo(user, end_date, for_existing_registration=for_existing_registration, notify_initiator_on_complete=notify_initiator_on_complete)
self.registered_from.add_log(
action=NodeLog.EMBARGO_INITIATED,
params={
'node': self._id,
'embargo_id': embargo._id,
},
auth=Auth(user),
save=True,
)
if self.is_public:
self.set_privacy('private', Auth(user))
def _initiate_approval(self, user, notify_initiator_on_complete=False):
end_date = datetime.datetime.now() + settings.REGISTRATION_APPROVAL_TIME
approval = RegistrationApproval(
initiated_by=user,
end_date=end_date,
notify_initiator_on_complete=notify_initiator_on_complete
)
approval.save() # Save approval so it has a primary key
self.registration_approval = approval
self.save() # Set foreign field reference Node.registration_approval
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
for admin in admins:
approval.add_authorizer(admin)
approval.save() # Save approval's approval_state
return approval
def require_approval(self, user, notify_initiator_on_complete=False):
if not self.is_registration:
raise NodeStateError('Only registrations can require registration approval')
if not self.has_permission(user, 'admin'):
raise PermissionsError('Only admins can initiate a registration approval')
approval = self._initiate_approval(user, notify_initiator_on_complete)
self.registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_INITIATED,
params={
'node': self._id,
'registration_approval_id': approval._id,
},
auth=Auth(user),
save=True,
)
@Node.subscribe('before_save')
def validate_permissions(schema, instance):
"""Ensure that user IDs in `contributors` and `permissions` match.
"""
node = instance
contributor_ids = set([user._id for user in node.contributors])
permission_ids = set(node.permissions.keys())
mismatched_contributors = contributor_ids.difference(permission_ids)
if mismatched_contributors:
raise ValidationValueError(
'Contributors {0} missing from `permissions` on node {1}'.format(
', '.join(mismatched_contributors),
node._id,
)
)
mismatched_permissions = permission_ids.difference(contributor_ids)
if mismatched_permissions:
raise ValidationValueError(
'Permission keys {0} missing from `contributors` on node {1}'.format(
', '.join(mismatched_contributors),
node._id,
)
)
@Node.subscribe('before_save')
def validate_visible_contributors(schema, instance):
"""Ensure that user IDs in `contributors` and `visible_contributor_ids`
match.
"""
node = instance
for user_id in node.visible_contributor_ids:
if user_id not in node.contributors:
raise ValidationValueError(
('User {0} is in `visible_contributor_ids` but not in '
'`contributors` on node {1}').format(
user_id,
node._id,
)
)
class WatchConfig(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
node = fields.ForeignField('Node', backref='watched')
digest = fields.BooleanField(default=False)
immediate = fields.BooleanField(default=False)
def __repr__(self):
return '<WatchConfig(node="{self.node}")>'.format(self=self)
class PrivateLink(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
key = fields.StringField(required=True, unique=True)
name = fields.StringField()
is_deleted = fields.BooleanField(default=False)
anonymous = fields.BooleanField(default=False)
nodes = fields.ForeignField('node', list=True, backref='shared')
creator = fields.ForeignField('user', backref='created')
@property
def node_ids(self):
node_ids = [node._id for node in self.nodes]
return node_ids
def node_scale(self, node):
# node may be None if previous node's parent is deleted
if node is None or node.parent_id not in self.node_ids:
return -40
else:
offset = 20 if node.parent_node is not None else 0
return offset + self.node_scale(node.parent_node)
def to_json(self):
return {
"id": self._id,
"date_created": iso8601format(self.date_created),
"key": self.key,
"name": sanitize.unescape_entities(self.name),
"creator": {'fullname': self.creator.fullname, 'url': self.creator.profile_url},
"nodes": [{'title': x.title, 'url': x.url, 'scale': str(self.node_scale(x)) + 'px', 'category': x.category}
for x in self.nodes if not x.is_deleted],
"anonymous": self.anonymous
}
class Sanction(StoredObject):
"""Sanction class is a generic way to track approval states"""
# Tell modularodm not to attach backends
_meta = {
'abstract': True,
}
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
# Neither approved not cancelled
UNAPPROVED = 'unapproved'
# Has approval
APPROVED = 'approved'
# Rejected by at least one person
REJECTED = 'rejected'
state = fields.StringField(
default=UNAPPROVED,
validate=validators.choice_in((
UNAPPROVED,
APPROVED,
REJECTED
))
)
DISPLAY_NAME = 'Sanction'
# SHORT_NAME must correspond with the associated foreign field to query against,
# e.g. Node.find_one(Q(sanction.SHORT_NAME, 'eq', sanction))
SHORT_NAME = 'sanction'
APPROVAL_NOT_AUTHORIZED_MESSAGE = 'This user is not authorized to approve this {DISPLAY_NAME}'
APPROVAL_INVALID_TOKEN_MESSAGE = 'Invalid approval token provided for this {DISPLAY_NAME}.'
REJECTION_NOT_AUTHORIZED_MESSAEGE = 'This user is not authorized to reject this {DISPLAY_NAME}'
REJECTION_INVALID_TOKEN_MESSAGE = 'Invalid rejection token provided for this {DISPLAY_NAME}.'
# Controls whether or not the Sanction needs unanimous approval or just a single approval
ANY = 'any'
UNANIMOUS = 'unanimous'
mode = UNANIMOUS
initiation_date = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
# Expiration date-- Sanctions in the UNAPPROVED state that are older than their end_date
# are automatically made ACTIVE by a daily cron job
# Use end_date=None for a non-expiring Sanction
end_date = fields.DateTimeField(default=None)
# Sanction subclasses must have an initiated_by field
# initiated_by = fields.ForeignField('user', backref='initiated')
# Expanded: Dictionary field mapping admin IDs their approval status and relevant tokens:
# {
# 'b3k97': {
# 'has_approved': False,
# 'approval_token': 'Pew7wj1Puf7DENUPFPnXSwa1rf3xPN',
# 'rejection_token': 'TwozClTFOic2PYxHDStby94bCQMwJy'}
# }
approval_state = fields.DictionaryField()
def __repr__(self):
return '<Sanction(end_date={self.end_date!r}) with _id {self._id!r}>'.format(self=self)
@property
def is_pending_approval(self):
return self.state == Sanction.UNAPPROVED
@property
def is_approved(self):
return self.state == Sanction.APPROVED
@property
def is_rejected(self):
return self.state == Sanction.REJECTED
def approve(self, user):
raise NotImplementedError("Sanction subclasses must implement an approve method.")
def reject(self, user):
raise NotImplementedError("Sanction subclasses must implement an approve method.")
def _on_reject(self, user):
"""Callback for rejection of a Sanction
:param User user:
"""
raise NotImplementedError('Sanction subclasses must implement an #_on_reject method')
def _on_complete(self, user):
"""Callback for when a Sanction has approval and enters the ACTIVE state
:param User user:
"""
raise NotImplementedError('Sanction subclasses must implement an #_on_complete method')
def forcibly_reject(self):
self.state = Sanction.REJECTED
class TokenApprovableSanction(Sanction):
# Tell modularodm not to attach backends
_meta = {
'abstract': True,
}
def _validate_authorizer(self, user):
"""Subclasses may choose to provide extra restrictions on who can be an authorizer
:return Boolean: True if user is allowed to be an authorizer else False
"""
return True
def add_authorizer(self, user, approved=False, save=False):
valid = self._validate_authorizer(user)
if valid and user._id not in self.approval_state:
self.approval_state[user._id] = {
'has_approved': approved,
'approval_token': tokens.encode(
{
'user_id': user._id,
'sanction_id': self._id,
'action': 'approve_{}'.format(self.SHORT_NAME)
}
),
'rejection_token': tokens.encode(
{
'user_id': user._id,
'sanction_id': self._id,
'action': 'reject_{}'.format(self.SHORT_NAME)
}
),
}
if save:
self.save()
return True
return False
def remove_authorizer(self, user, save=False):
"""Remove a user as an authorizer
:param User user:
:return Boolean: True if user is removed else False
"""
if user._id not in self.approval_state:
return False
del self.approval_state[user._id]
if save:
self.save()
return True
def _on_approve(self, user, token):
"""Callback for when a single user approves a Sanction. Calls #_on_complete under two conditions:
- mode is ANY and the Sanction has not already been cancelled
- mode is UNANIMOUS and all users have given approval
:param User user:
:param str token: user's approval token
"""
if self.mode == self.ANY or all(authorizer['has_approved'] for authorizer in self.approval_state.values()):
self.state = Sanction.APPROVED
self._on_complete(user)
def token_for_user(self, user, method):
"""
:param str method: 'approval' | 'rejection'
"""
try:
user_state = self.approval_state[user._id]
except KeyError:
raise PermissionsError(self.APPROVAL_NOT_AUTHORIZED_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
return user_state['{0}_token'.format(method)]
def approve(self, user, token):
"""Add user to approval list if user is admin and token verifies."""
try:
if self.approval_state[user._id]['approval_token'] != token:
raise InvalidSanctionApprovalToken(self.APPROVAL_INVALID_TOKEN_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
except KeyError:
raise PermissionsError(self.APPROVAL_NOT_AUTHORIZED_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
self.approval_state[user._id]['has_approved'] = True
self._on_approve(user, token)
def reject(self, user, token):
"""Cancels sanction if user is admin and token verifies."""
try:
if self.approval_state[user._id]['rejection_token'] != token:
raise InvalidSanctionRejectionToken(self.REJECTION_INVALID_TOKEN_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
except KeyError:
raise PermissionsError(self.REJECTION_NOT_AUTHORIZED_MESSAEGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
self.state = Sanction.REJECTED
self._on_reject(user)
def _notify_authorizer(self, user):
pass
def _notify_non_authorizer(self, user):
pass
def ask(self, group):
for contrib in group:
if contrib._id in self.approval_state:
self._notify_authorizer(contrib)
else:
self._notify_non_authorizer(contrib)
class EmailApprovableSanction(TokenApprovableSanction):
# Tell modularodm not to attach backends
_meta = {
'abstract': True,
}
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = None
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = None
VIEW_URL_TEMPLATE = ''
APPROVE_URL_TEMPLATE = ''
REJECT_URL_TEMPLATE = ''
# A flag to conditionally run a callback on complete
notify_initiator_on_complete = fields.BooleanField(default=False)
# Store a persistant copy of urls for use when needed outside of a request context.
# This field gets automagically updated whenever models approval_state is modified
# and the model is saved
# {
# 'abcde': {
# 'approve': [APPROVAL_URL],
# 'reject': [REJECT_URL],
# }
# }
stashed_urls = fields.DictionaryField(default=dict)
@staticmethod
def _format_or_empty(template, context):
if context:
return template.format(**context)
return ''
def _view_url(self, user_id):
return self._format_or_empty(self.VIEW_URL_TEMPLATE, self._view_url_context(user_id))
def _view_url_context(self, user_id):
return None
def _approval_url(self, user_id):
return self._format_or_empty(self.APPROVE_URL_TEMPLATE, self._approval_url_context(user_id))
def _approval_url_context(self, user_id):
return None
def _rejection_url(self, user_id):
return self._format_or_empty(self.REJECT_URL_TEMPLATE, self._rejection_url_context(user_id))
def _rejection_url_context(self, user_id):
return None
def _send_approval_request_email(self, user, template, context):
mails.send_mail(
user.username,
template,
user=user,
**context
)
def _email_template_context(self, user, is_authorizer=False):
return {}
def _notify_authorizer(self, authorizer):
context = self._email_template_context(authorizer, is_authorizer=True)
if self.AUTHORIZER_NOTIFY_EMAIL_TEMPLATE:
self._send_approval_request_email(authorizer, self.AUTHORIZER_NOTIFY_EMAIL_TEMPLATE, context)
else:
raise NotImplementedError
def _notify_non_authorizer(self, user):
context = self._email_template_context(user)
if self.NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE:
self._send_approval_request_email(user, self.NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE, context)
else:
raise NotImplementedError
def add_authorizer(self, user, **kwargs):
super(EmailApprovableSanction, self).add_authorizer(user, **kwargs)
self.stashed_urls[user._id] = {
'view': self._view_url(user._id),
'approve': self._approval_url(user._id),
'reject': self._rejection_url(user._id)
}
self.save()
def _notify_initiator(self):
raise NotImplementedError
def _on_complete(self, *args):
if self.notify_initiator_on_complete:
self._notify_initiator()
class PreregCallbackMixin(object):
def _notify_initiator(self):
registration = self._get_registration()
prereg_schema = prereg_utils.get_prereg_schema()
draft = DraftRegistration.find_one(
Q('registered_node', 'eq', registration)
)
if prereg_schema in registration.registered_schema:
mails.send_mail(
draft.initiator.username,
mails.PREREG_CHALLENGE_ACCEPTED,
user=draft.initiator,
registration_url=registration.absolute_url,
mimetype='html'
)
def _email_template_context(self, user, is_authorizer=False, urls=None):
registration = self._get_registration()
prereg_schema = prereg_utils.get_prereg_schema()
if prereg_schema in registration.registered_schema:
return {
'custom_message': ' as part of the Preregistration Challenge (https://cos.io/prereg)'
}
else:
return {}
class Embargo(PreregCallbackMixin, EmailApprovableSanction):
"""Embargo object for registrations waiting to go public."""
COMPLETED = 'completed'
DISPLAY_NAME = 'Embargo'
SHORT_NAME = 'embargo'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='embargoed')
for_existing_registration = fields.BooleanField(default=False)
@property
def is_completed(self):
return self.state == self.COMPLETED
@property
def embargo_end_date(self):
if self.state == self.APPROVED:
return self.end_date
return False
# NOTE(hrybacki): Old, private registrations are grandfathered and do not
# require to be made public or embargoed. This field differentiates them
# from new registrations entering into an embargo field which should not
# show up in any search related fields.
@property
def pending_registration(self):
return not self.for_existing_registration and self.is_pending_approval
def __repr__(self):
parent_registration = None
try:
parent_registration = Node.find_one(Q('embargo', 'eq', self))
except NoResultsFound:
pass
return ('<Embargo(parent_registration={0}, initiated_by={1}, '
'end_date={2}) with _id {3}>').format(
parent_registration,
self.initiated_by,
self.end_date,
self._id
)
def _get_registration(self):
return Node.find_one(Q('embargo', 'eq', self))
def _view_url_context(self, user_id):
registration = self._get_registration()
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
approval_token = self.approval_state.get(user_id, {}).get('approval_token')
if approval_token:
registration = self._get_registration()
return {
'node_id': registration._id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
rejection_token = self.approval_state.get(user_id, {}).get('rejection_token')
if rejection_token:
registration = self._get_registration()
return {
'node_id': registration._id,
'token': rejection_token,
}
def _email_template_context(self, user, is_authorizer=False, urls=None):
context = super(Embargo, self)._email_template_context(user, is_authorizer, urls)
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.EMBARGO_PENDING_TIME.days * 24
registration = self._get_registration()
context.update({
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'approval_link': approval_link,
'project_name': registration.title,
'disapproval_link': disapproval_link,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
'approval_time_span': approval_time_span,
})
else:
context.update({
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
})
return context
def _validate_authorizer(self, user):
registration = self._get_registration()
return registration.has_permission(user, ADMIN)
def _on_reject(self, user):
parent_registration = self._get_registration()
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_CANCELLED,
params={
'node': parent_registration._id,
'embargo_id': self._id,
},
auth=Auth(user),
)
# Remove backref to parent project if embargo was for a new registration
if not self.for_existing_registration:
parent_registration.delete_registration_tree(save=True)
parent_registration.registered_from = None
# Delete parent registration if it was created at the time the embargo was initiated
if not self.for_existing_registration:
parent_registration.is_deleted = True
parent_registration.save()
def disapprove_embargo(self, user, token):
"""Cancels retraction if user is admin and token verifies."""
self.reject(user, token)
def _on_complete(self, user):
super(Embargo, self)._on_complete(user)
parent_registration = self._get_registration()
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_APPROVED,
params={
'node': parent_registration._id,
'embargo_id': self._id,
},
auth=Auth(self.initiated_by),
)
self.save()
def approve_embargo(self, user, token):
"""Add user to approval list if user is admin and token verifies."""
self.approve(user, token)
class Retraction(EmailApprovableSanction):
"""Retraction object for public registrations."""
DISPLAY_NAME = 'Retraction'
SHORT_NAME = 'retraction'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_RETRACTION_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_RETRACTION_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='initiated')
justification = fields.StringField(default=None, validate=MaxLengthValidator(2048))
def __repr__(self):
parent_registration = None
try:
parent_registration = Node.find_one(Q('retraction', 'eq', self))
except NoResultsFound:
pass
return ('<Retraction(parent_registration={0}, initiated_by={1}) '
'with _id {2}>').format(
parent_registration,
self.initiated_by,
self._id
)
def _view_url_context(self, user_id):
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
approval_token = self.approval_state.get(user_id, {}).get('approval_token')
if approval_token:
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'node_id': registration._id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
rejection_token = self.approval_state.get(user_id, {}).get('rejection_token')
if rejection_token:
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'node_id': registration._id,
'token': rejection_token,
}
def _email_template_context(self, user, is_authorizer=False, urls=None):
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.RETRACTION_PENDING_TIME.days * 24
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'project_name': registration.title,
'registration_link': registration_link,
'approval_link': approval_link,
'disapproval_link': disapproval_link,
'approval_time_span': approval_time_span,
}
else:
return {
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
}
def _on_reject(self, user):
parent_registration = Node.find_one(Q('retraction', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.RETRACTION_CANCELLED,
params={
'node': parent_registration._id,
'retraction_id': self._id,
},
auth=Auth(user),
save=True,
)
def _on_complete(self, user):
parent_registration = Node.find_one(Q('retraction', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.RETRACTION_APPROVED,
params={
'node': parent_registration._id,
'retraction_id': self._id,
},
auth=Auth(self.initiated_by),
)
# Remove any embargoes associated with the registration
if parent_registration.embargo_end_date or parent_registration.is_pending_embargo:
parent_registration.embargo.state = self.REJECTED
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_CANCELLED,
params={
'node': parent_registration._id,
'embargo_id': parent_registration.embargo._id,
},
auth=Auth(self.initiated_by),
)
parent_registration.embargo.save()
# Ensure retracted registration is public
auth = Auth(self.initiated_by)
for node in parent_registration.node_and_primary_descendants():
node.set_privacy('public', auth=auth, save=True)
node.update_search()
def approve_retraction(self, user, token):
self.approve(user, token)
def disapprove_retraction(self, user, token):
self.reject(user, token)
class RegistrationApproval(PreregCallbackMixin, EmailApprovableSanction):
DISPLAY_NAME = 'Approval'
SHORT_NAME = 'registration_approval'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_REGISTRATION_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_REGISTRATION_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='registration_approved')
def _get_registration(self):
return Node.find_one(Q('registration_approval', 'eq', self))
def _view_url_context(self, user_id):
registration = self._get_registration()
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
approval_token = self.approval_state.get(user_id, {}).get('approval_token')
if approval_token:
registration = self._get_registration()
return {
'node_id': registration._id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
rejection_token = self.approval_state.get(user_id, {}).get('rejection_token')
if rejection_token:
registration = self._get_registration()
return {
'node_id': registration._id,
'token': rejection_token,
}
def _email_template_context(self, user, is_authorizer=False, urls=None):
context = super(RegistrationApproval, self)._email_template_context(user, is_authorizer, urls)
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.REGISTRATION_APPROVAL_TIME.days * 24
registration = self._get_registration()
context.update({
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'approval_link': approval_link,
'disapproval_link': disapproval_link,
'approval_time_span': approval_time_span,
'project_name': registration.title,
})
else:
context.update({
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
})
return context
def _add_success_logs(self, node, user):
src = node.registered_from
src.add_log(
action=NodeLog.PROJECT_REGISTERED,
params={
'parent_node': src.parent_id,
'node': src._primary_key,
'registration': node._primary_key,
},
auth=Auth(user),
save=False
)
src.save()
def _on_complete(self, user):
super(RegistrationApproval, self)._on_complete(user)
self.state = Sanction.APPROVED
register = self._get_registration()
registered_from = register.registered_from
auth = Auth(self.initiated_by)
register.set_privacy('public', auth, log=False)
for child in register.get_descendants_recursive(lambda n: n.primary):
child.set_privacy('public', auth, log=False)
# Accounts for system actions where no `User` performs the final approval
auth = Auth(user) if user else None
registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_APPROVED,
params={
'node': registered_from._id,
'registration_approval_id': self._id,
},
auth=auth,
)
for node in register.root.node_and_primary_descendants():
self._add_success_logs(node, user)
node.update_search() # update search if public
self.save()
def _on_reject(self, user):
register = self._get_registration()
registered_from = register.registered_from
register.delete_registration_tree(save=True)
registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_CANCELLED,
params={
'node': register._id,
'registration_approval_id': self._id,
},
auth=Auth(user),
)
class AlternativeCitation(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
name = fields.StringField(required=True, validate=MaxLengthValidator(256))
text = fields.StringField(required=True, validate=MaxLengthValidator(2048))
def to_json(self):
return {
"id": self._id,
"name": self.name,
"text": self.text
}
class DraftRegistrationApproval(Sanction):
mode = Sanction.ANY
# Since draft registrations that require approval are not immediately registered,
# meta stores registration_choice and embargo_end_date (when applicable)
meta = fields.DictionaryField(default=dict)
def _send_rejection_email(self, user, draft):
schema = draft.registration_schema
prereg_schema = prereg_utils.get_prereg_schema()
if schema._id == prereg_schema._id:
mails.send_mail(
user.username,
mails.PREREG_CHALLENGE_REJECTED,
user=user,
draft_url=draft.absolute_url
)
else:
raise NotImplementedError(
'TODO: add a generic email template for registration approvals'
)
def approve(self, user):
if settings.PREREG_ADMIN_TAG not in user.system_tags:
raise PermissionsError("This user does not have permission to approve this draft.")
self.state = Sanction.APPROVED
self._on_complete(user)
def reject(self, user):
if settings.PREREG_ADMIN_TAG not in user.system_tags:
raise PermissionsError("This user does not have permission to approve this draft.")
self.state = Sanction.REJECTED
self._on_reject(user)
def _on_complete(self, user):
draft = DraftRegistration.find_one(
Q('approval', 'eq', self)
)
auth = Auth(draft.initiator)
registration = draft.register(
auth=auth,
save=True
)
registration_choice = self.meta['registration_choice']
if registration_choice == 'immediate':
sanction = functools.partial(registration.require_approval, draft.initiator)
elif registration_choice == 'embargo':
sanction = functools.partial(
registration.embargo_registration,
draft.initiator,
parse_date(self.meta.get('embargo_end_date'), ignoretz=True)
)
else:
raise ValueError("'registration_choice' must be either 'embargo' or 'immediate'")
sanction(notify_initiator_on_complete=True)
def _on_reject(self, user, *args, **kwargs):
# clear out previous registration options
self.meta = {}
self.save()
draft = DraftRegistration.find_one(
Q('approval', 'eq', self)
)
self._send_rejection_email(draft.initiator, draft)
class DraftRegistration(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/drafts/{draft_id}'
datetime_initiated = fields.DateTimeField(auto_now_add=True)
datetime_updated = fields.DateTimeField(auto_now=True)
# Original Node a draft registration is associated with
branched_from = fields.ForeignField('node', index=True)
initiator = fields.ForeignField('user', index=True)
# Dictionary field mapping question id to a question's comments and answer
# {
# <qid>: {
# 'comments': [{
# 'user': {
# 'id': <uid>,
# 'name': <name>
# },
# value: <value>,
# lastModified: <datetime>
# }],
# 'value': <value>
# }
# }
registration_metadata = fields.DictionaryField(default=dict)
registration_schema = fields.ForeignField('metaschema')
registered_node = fields.ForeignField('node', index=True)
approval = fields.ForeignField('draftregistrationapproval', default=None)
# Dictionary field mapping extra fields defined in the MetaSchema.schema to their
# values. Defaults should be provided in the schema (e.g. 'paymentSent': false),
# and these values are added to the DraftRegistration
_metaschema_flags = fields.DictionaryField(default=None)
# lazily set flags
@property
def flags(self):
if not self._metaschema_flags:
self._metaschema_flags = {}
meta_schema = self.registration_schema
if meta_schema:
schema = meta_schema.schema
flags = schema.get('flags', {})
for flag, value in flags.iteritems():
if flag not in self._metaschema_flags:
self._metaschema_flags[flag] = value
self.save()
return self._metaschema_flags
@flags.setter
def flags(self, flags):
self._metaschema_flags.update(flags)
notes = fields.StringField()
@property
def url(self):
return self.URL_TEMPLATE.format(
node_id=self.branched_from,
draft_id=self._id
)
@property
def absolute_url(self):
return urlparse.urljoin(settings.DOMAIN, self.url)
@property
def requires_approval(self):
return self.registration_schema.requires_approval
@property
def is_pending_review(self):
return self.approval.is_pending_approval if (self.requires_approval and self.approval) else False
@property
def is_approved(self):
if self.requires_approval:
if not self.approval:
return False
else:
return self.approval.is_approved
else:
return True
@property
def is_rejected(self):
if self.requires_approval:
if not self.approval:
return False
else:
return self.approval.is_rejected
else:
return False
@classmethod
def create_from_node(cls, node, user, schema, data=None):
draft = cls(
initiator=user,
branched_from=node,
registration_schema=schema,
registration_metadata=data or {},
)
draft.save()
return draft
def update_metadata(self, metadata):
changes = []
for question_id, value in metadata.iteritems():
old_value = self.registration_metadata.get(question_id)
if old_value:
old_comments = {
comment['created']: comment
for comment in old_value.get('comments', [])
}
new_comments = {
comment['created']: comment
for comment in value.get('comments', [])
}
old_comments.update(new_comments)
metadata[question_id]['comments'] = sorted(
old_comments.values(),
key=lambda c: c['created']
)
if old_value.get('value') != value.get('value'):
changes.append(question_id)
else:
changes.append(question_id)
self.registration_metadata.update(metadata)
return changes
def submit_for_review(self, initiated_by, meta, save=False):
approval = DraftRegistrationApproval(
initiated_by=initiated_by,
meta=meta
)
approval.save()
self.approval = approval
if save:
self.save()
def register(self, auth, save=False):
node = self.branched_from
# Create the registration
register = node.register_node(
schema=self.registration_schema,
auth=auth,
data=self.registration_metadata
)
self.registered_node = register
if save:
self.save()
return register
def approve(self, user):
self.approval.approve(user)
self.approval.save()
def reject(self, user):
self.approval.reject(user)
self.approval.save()
|
Ghalko/osf.io
|
website/project/model.py
|
Python
|
apache-2.0
| 156,052
|
import datetime
import math
import random
class Ordine:
#costruttore
def __init__(self, id_es, num_ord, id_ttl, qta, prezzo):
self.id_es=id_es
self.num_ord=num_ord
self.id_ttl=id_ttl
self.qta=qta
self.qta_eff=0
self.prezzo=prezzo
now = datetime.datetime.now()
self.date=str(now.day)+"/"+str(now.month)+"/"+str(now.year)+" "+str(now.hour)+":"+str(now.minute)+":"+str(now.second)
self.state='Da processare'
def set_state(self, state):
self.state=state
def get_state(self):
return self.state
def set_state(self, value):
self.state=value
def get_id_ttl(self):
return self.id_ttl
def get_id_es(self):
return self.id_es
def get_num_ord(self):
return self.num_ord
def get_date(self):
return self.date
def get_qta(self):
return self.qta
def get_qta_eff(self):
return self.qta_eff
def set_qta(self, value):
self.qta=value
def set_qta_eff(self, value):
self.qta_eff=value
def get_prz(self):
return self.prezzo
#stampa i dati dell'ordine
def to_string(self):
return "Id ordine: "+self.id_es+"-"+str(self.num_ord)+" Id Titolo: "+self.id_ttl+" Qta: "+str(self.qta)+" Qta Eff: "+str(self.qta_eff)+" Prezzo: "+str(self.prezzo) + " Stato: "+self.state
|
DrFr4nk/Agent_Base_Market_Simulator
|
Ordine.py
|
Python
|
mit
| 1,379
|
from flask.ext.script import Command, Manager, Option
from flask import current_app
import os
from subprocess import Popen
class InvalidPathException(Exception):
pass
class SyncJS(Command):
option_list = (
Option('--path', '-p', dest='path'),
)
def run_command(self, command):
cmd = Popen(command, shell=True, cwd=self.cwd)
cmd.wait()
def run(self, path=None):
if path is None:
raise InvalidPathException
path = os.path.expanduser(path)
self.cwd = os.path.abspath(path)
frontend_path = os.path.abspath(os.path.join(current_app.config['REPO_PATH'], current_app.config['FRONTEND_PATH']))
for the_file in os.listdir(current_app.config['FRONTEND_PATH']):
file_path = os.path.join(current_app.config['FRONTEND_PATH'], the_file)
try:
if os.path.isfile(file_path) and the_file != ".vc":
os.unlink(file_path)
except Exception, e:
print e
self.run_command("npm install")
self.run_command("grunt default")
self.run_command("rm -rf {0}/*".format(frontend_path))
self.run_command("cp -a dist/* {0}".format(os.path.abspath(frontend_path)))
|
realizeapp/realize-core
|
core/commands/frontend.py
|
Python
|
agpl-3.0
| 1,248
|
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class to serve as proxy for the target engine for testing.
Receives documents from the oplog worker threads and indexes them
into the backend.
Please look at the Solr and ElasticSearch doc manager classes for a sample
implementation with real systems.
"""
from mongo_connector.errors import OperationFailed
from mongo_connector.doc_managers import DocManagerBase
class DocManager(DocManagerBase):
"""BackendSimulator emulates both a target DocManager and a server.
The DocManager class creates a connection to the backend engine and
adds/removes documents, and in the case of rollback, searches for them.
The reason for storing id/doc pairs as opposed to doc's is so that multiple
updates to the same doc reflect the most up to date version as opposed to
multiple, slightly different versions of a doc.
"""
def __init__(self, url=None, unique_key='_id', **kwargs):
"""Creates a dictionary to hold document id keys mapped to the
documents as values.
"""
self.unique_key = unique_key
self.doc_dict = {}
self.url = url
def stop(self):
"""Stops any running threads in the DocManager.
"""
pass
def update(self, doc, update_spec):
"""Apply updates given in update_spec to the document whose id
matches that of doc.
"""
document = self.doc_dict[doc["_id"]]
updated = self.apply_update(document, update_spec)
self.upsert(updated)
return updated
def upsert(self, doc):
"""Adds a document to the doc dict.
"""
self.doc_dict[doc[self.unique_key]] = doc
def remove(self, doc):
"""Removes the document from the doc dict.
"""
doc_id = doc[self.unique_key]
try:
del self.doc_dict[doc_id]
except KeyError:
raise OperationFailed("Document does not exist: %s" % str(doc))
def search(self, start_ts, end_ts):
"""Searches through all documents and finds all documents within the
range.
Since we have very few documents in the doc dict when this is called,
linear search is fine. This method is only used by rollbacks to query
all the documents in the target engine within a certain timestamp
window. The input will be two longs (converted from Bson timestamp)
which specify the time range. The start_ts refers to the timestamp
of the last oplog entry after a rollback. The end_ts is the timestamp
of the last document committed to the backend.
"""
ret_list = []
for stored_doc in self.doc_dict.values():
time_stamp = stored_doc['_ts']
if time_stamp <= end_ts or time_stamp >= start_ts:
ret_list.append(stored_doc)
return ret_list
def commit(self):
"""Simply passes since we're not using an engine that needs commiting.
"""
pass
def get_last_doc(self):
"""Searches through the doc dict to find the document with the latest
timestamp.
"""
last_doc = None
last_ts = None
for stored_doc in self.doc_dict.values():
time_stamp = stored_doc['_ts']
if last_ts is None or time_stamp >= last_ts:
last_doc = stored_doc
last_ts = time_stamp
return last_doc
def _search(self):
"""Returns all documents in the doc dict.
This function is not a part of the DocManager API, and is only used
to simulate searching all documents from a backend.
"""
ret_list = []
for doc in self.doc_dict.values():
ret_list.append(doc)
return ret_list
def _delete(self):
"""Deletes all documents.
This function is not a part of the DocManager API, and is only used
to simulate deleting all documents from a backend.
"""
self.doc_dict = {}
|
llvtt/mongo-connector
|
mongo_connector/doc_managers/doc_manager_simulator.py
|
Python
|
apache-2.0
| 4,571
|
"""Tests for utility functions used in remote consent forms.
Copyright (C) 2020 A. Samuel Pottinger ("Sam Pottinger", gleap.org)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import datetime
import unittest
import unittest.mock
import prog_code.util.consent_util as consent_util
import prog_code.util.constants as constants
from ..struct import models
class ConsentUtilTests(unittest.TestCase):
def __run_with_mocks(self, body, settings, filings):
with unittest.mock.patch('prog_code.util.db_util.get_consent_settings') as get_consent_settings:
with unittest.mock.patch('prog_code.util.db_util.get_consent_filings') as get_consent_filings:
get_consent_settings.return_value = settings
get_consent_filings.return_value = filings
body()
def __create_settings(self, requirement_type):
return models.ConsentFormSettings(
'test-study',
requirement_type,
'',
[],
datetime.datetime.now()
)
def __create_filing(self, child_id, access_key):
return models.ConsentFormFiling(
'test-study',
'test name',
child_id,
datetime.datetime.now(),
[],
'test@example.com',
access_key
)
def test_requires_consent_form_never(self):
def body():
result = consent_util.requires_consent_form(
'test-1',
'test-study',
'1234'
)
self.assertFalse(result)
self.__run_with_mocks(
body,
self.__create_settings(constants.CONSENT_FORM_NONE),
[]
)
def test_requires_consent_form_always_incomplete(self):
def body():
result = consent_util.requires_consent_form(
'test-1',
'test-study',
'1234'
)
self.assertTrue(result)
self.__run_with_mocks(
body,
self.__create_settings(constants.CONSENT_FORM_ALWAYS),
[
self.__create_filing('test-1', '5678')
]
)
def test_requires_consent_form_always_complete(self):
def body():
result = consent_util.requires_consent_form(
'test-1',
'test-study',
'1234'
)
self.assertFalse(result)
self.__run_with_mocks(
body,
self.__create_settings(constants.CONSENT_FORM_ALWAYS),
[
self.__create_filing('test-1', '5678'),
self.__create_filing('test-1', '1234')
]
)
def test_requires_consent_form_once_no_match_prior(self):
def body():
result = consent_util.requires_consent_form(
'test-1',
'test-study',
'5678'
)
self.assertTrue(result)
self.__run_with_mocks(
body,
self.__create_settings(constants.CONSENT_FORM_ONCE),
[
self.__create_filing('test-2', '1234')
]
)
def test_requires_consent_form_once_no_prior(self):
def body():
result = consent_util.requires_consent_form(
'test-1',
'test-study',
'5678'
)
self.assertTrue(result)
self.__run_with_mocks(
body,
self.__create_settings(constants.CONSENT_FORM_ONCE),
[]
)
def test_requires_consent_form_once_with_prior(self):
def body():
result = consent_util.requires_consent_form(
'test-1',
'test-study',
'5678'
)
self.assertFalse(result)
self.__run_with_mocks(
body,
self.__create_settings(constants.CONSENT_FORM_ONCE),
[
self.__create_filing('test-1', '1234')
]
)
|
Samnsparky/cdibase
|
prog_code/util/consent_util_test.py
|
Python
|
gpl-3.0
| 4,657
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
do the unit tests!
"""
import sys, os, unittest
import getopt, sys
import boto
from boto.tests.test_sqsconnection import SQSConnectionTest
from boto.tests.test_s3connection import S3ConnectionTest
from boto.tests.test_ec2connection import EC2ConnectionTest
from boto.tests.test_sdbconnection import SDBConnectionTest
def usage():
print 'test.py [-t testsuite] [-v verbosity]'
print ' -t run specific testsuite (s3|sqs|ec2|sdb|all)'
print ' -v verbosity (0|1|2)'
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'ht:v:',
['help', 'testsuite', 'verbosity'])
except:
usage()
sys.exit(2)
testsuite = 'all'
verbosity = 1
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit()
if o in ('-t', '--testsuite'):
testsuite = a
if o in ('-v', '--verbosity'):
verbosity = int(a)
if len(args) != 0:
usage()
sys.exit()
suite = unittest.TestSuite()
if testsuite == 'all':
suite.addTest(unittest.makeSuite(SQSConnectionTest))
suite.addTest(unittest.makeSuite(S3ConnectionTest))
suite.addTest(unittest.makeSuite(EC2ConnectionTest))
suite.addTest(unittest.makeSuite(SDBConnectionTest))
elif testsuite == 's3':
suite.addTest(unittest.makeSuite(S3ConnectionTest))
elif testsuite == 'sqs':
suite.addTest(unittest.makeSuite(SQSConnectionTest))
elif testsuite == 'ec2':
suite.addTest(unittest.makeSuite(EC2ConnectionTest))
elif testsuite == 'sdb':
suite.addTest(unittest.makeSuite(SDBConnectionTest))
else:
usage()
sys.exit()
unittest.TextTestRunner(verbosity=verbosity).run(suite)
if __name__ == "__main__":
main()
|
carlgao/lenga
|
images/lenny64-peon/usr/share/python-support/python-boto/boto/tests/test.py
|
Python
|
mit
| 2,935
|
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from common import public
class qyxg_chinesefinancialnews():
need_check_ziduan = [
'bbd_table',
'search_key'
]
def check_bbd_table(self, indexstr, ustr):
ret = None
if ustr and len(ustr):
pass
else:
ret = u'不为空'
return ret
def check_search_key(self, indexstr, ustr):
ret = None
if ustr and len(ustr):
pass
else:
ret = u'不为空'
return ret
|
mefly2012/platform
|
src/clean_validate/qyxg_chinesefinancialnews.py
|
Python
|
apache-2.0
| 577
|
#!/usr/bin/env python2
# Shane Tully (shane@shanetully.com)
# shanetully.com
# GitHub repo: https://github.com/shanet/Google-Music-Playlist-Sync
# Makes use of the Unofficial Google Music API by Simon Weber
# https://github.com/simon-weber/Unofficial-Google-Music-API
# Copyright (C) 2013 Shane Tully
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import difflib
import json
import re
import sys
from os import path
from getpass import getpass
from gmusicapi import Mobileclient
from mutagen.easyid3 import EasyID3
from mutagen.easymp4 import EasyMP4
from mutagen.flac import FLAC
from mutagen.id3 import ID3NoHeaderError
from xml.etree.ElementTree import parse
CREDS_FILE = 'creds.json'
no_remove = False
dry_run = False
yes = False
def main():
[user, root_dir, playlists] = parse_cmdline_args()
# Show some pretty ASCII art
print ' ____________________________ '
print '< Google Music Playlist Sync > '
print ' ---------------------------- '
print ' \ ^__^ '
print ' \ (oo)\_______ '
print ' (__)\ )\\/\\ '
print ' ||----w | '
print ' || || '
print '\nThis script will sync a local XSPF or M3U format playlist, to a playlist on Google Music. Use the Google Music uploader to first upload the songs in the playlist.\n'
# Log in to Google Music
api = login_to_google_music(user)
# Get all songs in the library
print 'Retrieving all songs in library. This may take a minute...'
remote_library = api.get_all_songs()
for playlist in playlists:
print 'Syncing playlist: %s' % (playlist)
process_playlist(api, playlist, remote_library, root_dir)
# Be a good citizen and log out
api.logout()
print 'Bye!'
exit(0)
def parse_cmdline_args():
argvParser = argparse.ArgumentParser()
argvParser.add_argument('-u', '--user', dest='user', nargs='?', help='The Google username/email to log in with.')
argvParser.add_argument('-r', '--root-dir', dest='root_dir', nargs='?', default='./', help='The root directory of a music directory. Useful for M3U playlists.')
argvParser.add_argument('-n', '--no-remove', dest='no_remove', action='store_true', help='Only add to playlists; don\'t remove anything.')
argvParser.add_argument('-d', '--dry-run', dest='dry_run', action='store_true', help='Only show what would be sync\'d; don\'t actually sync anything.')
argvParser.add_argument('-y', '--yes', dest='yes', action='store_true', help='Say yes to all prompts.')
argvParser.add_argument('playlists', nargs='+', help='The filenames of playlists.')
args = argvParser.parse_args()
# If the root directory doesn't have a directory separator at the end, add it
if not args.root_dir.endswith('/'):
args.root_dir += '/'
if args.no_remove:
global no_remove
no_remove = args.no_remove
if args.dry_run:
global dry_run
dry_run = args.dry_run
if args.yes:
global yes
yes = args.yes
return [args.user, args.root_dir, args.playlists]
def login_to_google_music(user):
api = Mobileclient()
# Try to read the username and password from a file
# This is useful for 2-step authentication
# Don't store your regular password in plain text!
try:
creds_file = open(CREDS_FILE)
creds = json.load(creds_file)
except IOError:
creds = {}
try:
user = creds['username']
except KeyError:
if not user:
user = raw_input('Google username/email: ')
try:
password = creds['password']
except KeyError:
password = getpass()
print '\nLogging in...'
if api.login(user, password, Mobileclient.FROM_MAC_ADDRESS):
return api
else:
print 'Login failed. Giving up.'
exit(1)
def process_playlist(api, local_playlist_path, remote_library, root_dir):
# Get the filename. This will be used as the playlist name.
local_playlist_name, local_playlist_type = path.splitext(path.basename(local_playlist_path))
# Check that the file extension and parse the playlist
if local_playlist_type == '.xspf':
(name, local_tracks) = parse_xml(local_playlist_path)
# If the xml contained a playlist name, use that instead of the filename
if name:
local_playlist_name = name
elif local_playlist_type == '.m3u':
local_tracks = parse_m3u(local_playlist_path, root_dir)
else:
print 'Error: Playlist \'%s\' must be XSPF or M3U format.' % (local_playlist_name)
return
# Check that the playlist has tracks in it
if len(local_tracks) == 0:
print 'Error: Playlist \'%s\' is empty.' % (local_playlist_name)
if local_playlist_type == '.m3u':
print 'Friendly reminder: m3u playlists use relative paths. Use the \'--root-dir\' option for syncing m3u playlists in a different directory than this script.'
return
# Sync the playlist
if sync_playlist(api, remote_library, local_tracks , local_playlist_name):
print 'Playlist \'%s\' sync\'d.' % (local_playlist_name)
else:
print 'Syncing playlist \'%s\' failed.' % (local_playlist_name)
return
def parse_xml(local_playlist_path):
# Parse the playlist XML file
xml_root = parse(local_playlist_path).getroot()
# Get the playlist title
playlist_name = None
title_element = xml_root.find('{http://xspf.org/ns/0/}title')
if not title_element is None:
playlist_name = title_element.text.strip()
# Get the list of tracks in the playlists
tracks_elements = xml_root.find('{http://xspf.org/ns/0/}trackList')
if tracks_elements is None:
print 'Error: Malformed or empty playlist.'
exit(1)
# Convert the XML elements to a dict
tracks = []
for track in tracks_elements:
new_track = {}
for field in track:
if field.tag == '{http://xspf.org/ns/0/}title':
new_track['title'] = field.text.strip()
elif field.tag == '{http://xspf.org/ns/0/}creator':
new_track['artist'] = field.text.strip()
elif field.tag == '{http://xspf.org/ns/0/}album':
new_track['album'] = field.text.strip()
elif field.tag == '{http://xspf.org/ns/0/}location':
new_track['path'] = field.text.strip()
tracks.append(new_track)
return (playlist_name, tracks)
def parse_m3u(local_playlist_path, root_dir):
playlist = open(local_playlist_path, 'r')
# Convert the tracks in the playlist into a dict
tracks = []
for line in playlist:
# Remove the newline from the end of the string
line = line.rstrip()
format = get_song_format(line)
try:
if format == 'mp3':
song = EasyID3(root_dir + line)
elif format == 'mp4' or format == 'm4a':
song = EasyMP4(root_dir + line)
elif format == 'flac':
song = FLAC(root_dir + line)
else:
print '\'%s\' is not a supported format. Supported formats are MP3, MP4, M4A, or FLAC.' % (line)
continue
except ID3NoHeaderError:
print '\'%s\' does not contain an ID3 tag.' % (filename)
continue
# IO errors are most likely file not found errors or the wrong format file
except IOError as ioe:
print '\'' + line + '\': ' + ioe.strerror
continue
# Only take the first metadata info for each category
try:
track = {}
track['title'] = song['title'][0]
track['artist'] = song['artist'][0]
track['album'] = song['album'][0]
track['path'] = root_dir + line
tracks.append(track)
except KeyError:
print 'The following track has missing metadata: %s. Skipping.' % str(track)
return tracks
def get_song_format(filename):
# Use the filename extension as the format
return path.splitext(path.basename(filename))[1].lower()[1:]
def sync_playlist(api, remote_library, local_tracks, local_playlist_name):
global dry_run
remote_playlist = get_playlist(api, local_playlist_name)
# If the playlist wasn't found, create it
if remote_playlist is None:
print 'Playlist not found on Google Music. Creating it.'
if dry_run:
print 'Dry-run option given, but cannot continue without creating new playlist.'
return False
api.create_playlist(local_playlist_name)
remote_playlist = get_playlist(api, local_playlist_name)
# Tracks on playlists have IDs unique to that playlist. We need the ID for the overall track.
remote_tracks = get_track_ids_from_playlist_ids(remote_playlist['tracks'], remote_library)
# Get the tracks to be added/removed from the remote playlist
(tracks_to_add_ids, tracks_to_add_names) = get_tracks_to_add(api, local_tracks, remote_tracks, remote_library)
(tracks_to_remove_ids, tracks_to_remove_names) = get_tracks_to_remove(api, local_tracks, remote_tracks)
# Check that there are tracks to add/remove
if len(tracks_to_add_ids) == 0 and len(tracks_to_remove_ids) == 0:
print '\nPlaylist is already up-to-date.'
return True
# Finally, add/remove the tracks to/from the playlist
if confirm_pending_modifications(local_playlist_name, tracks_to_add_names, tracks_to_remove_names):
if not dry_run:
api.add_songs_to_playlist(remote_playlist['id'], tracks_to_add_ids)
api.remove_entries_from_playlist(tracks_to_remove_ids)
else:
print 'Dry-run enabled. Not doing anything.'
else:
print 'Sorry!'
return False
return True
def get_playlist(api, local_playlist_name):
remote_playlists = api.get_all_user_playlist_contents()
remote_playlist = None
for playlist in remote_playlists:
if playlist['name'] == local_playlist_name:
return playlist
return None
def get_track_ids_from_playlist_ids(playlist_tracks, remote_library):
tracks = []
# Find the track in the library each playlist track cooresponds to
for track in remote_library:
for playlist_track in playlist_tracks:
if track['id'] == playlist_track['trackId']:
# Keep the unique playlist track ID for track removal from the playlist
track['playlistId'] = playlist_track['id']
tracks.append(track)
return tracks
def get_tracks_to_add(api, local_tracks, remote_tracks, remote_library):
track_names = []
track_ids = []
for local_track in local_tracks:
# Check if the local track is already present in the remote playlist
result = find_track(local_track, remote_tracks)
if result is None:
track_id = find_track_id(local_track, remote_library)
# Check if the song wasn't found in the library
if track_id == None:
print 'Warning: Track \'%s - %s\' in local playlist, but not found in Google Music library. Skipping this track.' % (local_track['artist'], local_track['title'])
else:
track_names.append('%s - %s' % (local_track['artist'], local_track['title']))
track_ids.append(track_id)
return (track_ids, track_names)
def get_tracks_to_remove(api, local_tracks, remote_tracks):
global no_remove
if no_remove:
return ([], [])
track_names = []
track_ids = []
for remote_track in remote_tracks:
# Check if the remote track is present in the local playlist
result = find_track(remote_track, local_tracks)
if result is None:
track_names.append('%s - %s' % (remote_track['artist'], remote_track['title']))
track_ids.append(remote_track['playlistId'])
return (track_ids, track_names)
def find_track(local_track, remote_library):
artist_match = difflib.SequenceMatcher(None, 'foobar', clean_string(local_track['artist']))
title_match = difflib.SequenceMatcher(None, 'foobar', clean_string(local_track['title']))
best_match = 0
for remote_track in remote_library:
artist_match.set_seq1(clean_string(remote_track['artist']))
title_match.set_seq1(clean_string(remote_track['title']))
artist_score = artist_match.quick_ratio()
title_score = title_match.quick_ratio()
total_score = (artist_score + title_score) / 2
if total_score == 1:
return remote_track
elif total_score >= best_match:
best_match = total_score
best_match_track = remote_track
if best_match >= 0.85:
return best_match_track
else:
return None
def clean_string(string):
# Strip whitespaces and use lowercase
string = string.strip()
string = string.lower()
# Remove (feat. [some artist])
patterns = ['^(.*?)\(feat\..*?\).*?$', '^(.*?)feat\..*?$']
for pattern in patterns:
reg = re.search(pattern, string)
if reg:
string = reg.group(1)
return string
def find_track_id(track, remote_library):
remote_track = find_track(track, remote_library)
return (remote_track['id'] if remote_track else None)
def confirm_pending_modifications(playlist_name, tracks_to_add, tracks_to_remove):
print '\nPlaylist \'%s\' will be modified.' % (playlist_name)
# Print the songs about to be added/removed
if len(tracks_to_add) > 0:
print 'Tracks to be added:'
for track in tracks_to_add:
print '\t' + track
if len(tracks_to_remove) > 0:
print 'Tracks to be removed:'
for track in tracks_to_remove:
print '\t' + track
global yes
return (yes or raw_input('Is this okay? (y,n) ') == 'y')
if __name__ == '__main__':
main()
|
shanet/Google-Music-Playlist-Sync
|
google-music-playlist-sync.py
|
Python
|
gpl-3.0
| 14,734
|
from extraction.core import ExtractionRunner
from extraction.runnables import Extractor, RunnableError, Filter, ExtractorResult
import os
import sys
import extractor.csxextract.extractors.grobid as grobid
import extractor.csxextract.extractors.pdfbox as pdfbox
import extractor.csxextract.extractors.tei as tei
import extractor.csxextract.extractors.parscit as parscit
import extractor.csxextract.extractors.figures as figures
import extractor.csxextract.extractors.algorithms as algorithms
import extractor.csxextract.filters as filters
def get_extraction_runner():
runner = ExtractionRunner()
runner.enable_logging('~/logs/results', '~/logs/runnables')
runner.add_runnable(pdfbox.PDFBoxPlainTextExtractor)
runner.add_runnable(filters.AcademicPaperFilter)
runner.add_runnable(grobid.GrobidHeaderTEIExtractor)
runner.add_runnable(tei.TEItoHeaderExtractor)
runner.add_runnable(parscit.ParsCitCitationExtractor)
runner.add_runnable(figures.PDFFiguresExtractor)
runner.add_runnable(algorithms.AlgorithmsExtractor)
return runner
if __name__ == '__main__':
runner = get_extraction_runner()
path = '/data/huy138/citeseerx-crawl-labeled-sample-b/pdf/'
outputDir = '/data/huy138/extraction_on_sample_b/'
listing = os.listdir(path)
folders = []
files = []
prefixes = []
for file in listing:
"""folders = []
files = []
prefixes = []"""
if file[-4:] == '.pdf':
files.append(path + file)
folders.append(outputDir + file[:-4])
prefixes.append(file[:-4])
#print dir
print file
runner.run_from_file_batch(files, folders, num_processes=8, file_prefixes=prefixes)
print 'done'
"""argc = len(sys.argv)
if argc == 2:
file_name = os.path.splitext(os.path.basename(sys.argv[1]))[0]
runner.run_from_file(sys.argv[1], file_prefix=file_name)
elif argc == 3:
file_name = os.path.splitext(os.path.basename(sys.argv[1]))[0]
runner.run_from_file(sys.argv[1], output_dir = sys.argv[2], file_prefix=file_name)
else:
print("USAGE: python {0} path_to_pdf [output_directory]".format(sys.argv[0]))"""
|
SeerLabs/PDFMEF
|
src/extractor/run_extraction.py
|
Python
|
apache-2.0
| 2,199
|
from datetime import timedelta
from django.db import models
from django.db.models import Q
from django.contrib.auth.models import User
from django.db.models import Sum, Avg
from timeslot.models import Program
class IcecastLog(models.Model):
datetime_start = models.DateTimeField(blank=True, null=True)
datetime_end = models.DateTimeField(blank=True, null=True)
ip = models.CharField(max_length=20)
country_code = models.CharField(max_length=4, blank=True, null=True)
mount = models.CharField(max_length=90)
status_code = models.IntegerField(blank=True, null=True)
duration = models.IntegerField(blank=True, null=True)
sent_bytes = models.IntegerField(blank=True, null=True)
agent = models.CharField(max_length=400, blank=True, null=True)
referer = models.CharField(max_length=400, blank=True, null=True)
server = models.CharField(max_length=50, blank=True, null=True)
auth_user = models.CharField(max_length=20, blank=True, null=True)
auth_pass = models.CharField(max_length=20, blank=True, null=True)
def __unicode__(self):
return self.mount
@property
def programs(self):
return User.objects.get(config__streamurl__icontains=self.mount).program_set.all()
def listened(self):
weekday = self.datetime_start.weekday() + 1
listened = self.programs.filter(days=weekday).filter(
Q(start__lte=self.datetime_start.time(), end__gte=self.datetime_start.time()) |
Q(start__lt=self.datetime_end.time(), end__gte=self.datetime_end.time())
)
programs = []
for l in listened:
programs.append([l.name, l.time_listened(self.datetime_end.time()).seconds/60])
return programs
class ProgramStat(models.Model):
log_entry = models.ForeignKey(IcecastLog)
program_name = models.CharField(max_length=256)
duration = models.IntegerField()
def __unicode__(self):
return u"%s: %s" % (self.program_name, self.duration)
@staticmethod
def duration_total(start, end, program_name=None):
duration = ProgramStat.objects.filter(log_entry__datetime_start__gte=start, log_entry__datetime_end__lte=end)
if program_name:
duration = duration.filter(program_name=program_name)
return duration.aggregate(total=Sum('duration'))['total']
@staticmethod
def duration_average(start, end, program_name=None):
duration = ProgramStat.objects.filter(log_entry__datetime_start__gte=start, log_entry__datetime_end__lte=end)
if program_name:
duration = duration.filter(program_name=program_name)
return duration.aggregate(average=Avg('duration'))['average']
|
Xicnet/radioflow-scheduler
|
project/icecast_stats/models.py
|
Python
|
agpl-3.0
| 2,778
|
# -*- coding: iso-8859-1 -*-
"""Get useful information from live Python objects.
This module encapsulates the interface provided by the internal special
attributes (func_*, co_*, im_*, tb_*, etc.) in a friendlier fashion.
It also provides some help for examining source code and class layout.
Here are some of the useful functions provided by this module:
ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(),
isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(),
isroutine() - check object types
getmembers() - get members of an object that satisfy a given condition
getfile(), getsourcefile(), getsource() - find an object's source code
getdoc(), getcomments() - get documentation on an object
getmodule() - determine the module that an object came from
getclasstree() - arrange classes so as to represent their hierarchy
getargspec(), getargvalues(), getcallargs() - get info about function arguments
formatargspec(), formatargvalues() - format an argument spec
getouterframes(), getinnerframes() - get info about frames
currentframe() - get the current stack frame
stack(), trace() - get info about frames on the stack or in a traceback
"""
# This module is in the public domain. No warranties.
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__date__ = '1 Jan 2001'
import sys
import os
import types
import string
import re
import dis
import imp
import tokenize
import linecache
from operator import attrgetter
from collections import namedtuple
# These constants are from Include/code.h.
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 0x1, 0x2, 0x4, 0x8
CO_NESTED, CO_GENERATOR, CO_NOFREE = 0x10, 0x20, 0x40
# See Include/object.h
TPFLAGS_IS_ABSTRACT = 1 << 20
# ----------------------------------------------------------- type-checking
def ismodule(object):
"""Return true if the object is a module.
Module objects provide these attributes:
__doc__ documentation string
__file__ filename (missing for built-in modules)"""
return isinstance(object, types.ModuleType)
def isclass(object):
"""Return true if the object is a class.
Class objects provide these attributes:
__doc__ documentation string
__module__ name of module in which this class was defined"""
return isinstance(object, (type, types.ClassType))
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
im_class class object in which this method belongs
im_func function object containing implementation of method
im_self instance to which this method is bound, or None"""
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
"""Return true if the object is a method descriptor.
But not if ismethod() or isclass() or isfunction() are true.
This is new in Python 2.2, and, for example, is true of int.__add__.
An object passing this test has a __get__ attribute but not a __set__
attribute, but beyond that the set of attributes varies. __name__ is
usually sensible, and __doc__ often is.
Methods implemented via descriptors that also pass one of the other
tests return false from the ismethoddescriptor() test, simply because
the other tests promise more -- you can, e.g., count on having the
im_func attribute (etc) when an object passes ismethod()."""
return (hasattr(object, "__get__")
and not hasattr(object, "__set__") # else it's a data descriptor
and not ismethod(object) # mutual exclusion
and not isfunction(object)
and not isclass(object))
def isdatadescriptor(object):
"""Return true if the object is a data descriptor.
Data descriptors have both a __get__ and a __set__ attribute. Examples are
properties (defined in Python) and getsets and members (defined in C).
Typically, data descriptors will also have __name__ and __doc__ attributes
(properties, getsets, and members have both of these attributes), but this
is not guaranteed."""
return (hasattr(object, "__set__") and hasattr(object, "__get__"))
if hasattr(types, 'MemberDescriptorType'):
# CPython and equivalent
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.MemberDescriptorType)
else:
# Other implementations
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return False
if hasattr(types, 'GetSetDescriptorType'):
# CPython and equivalent
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.GetSetDescriptorType)
else:
# Other implementations
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return False
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
func_code code object containing compiled function bytecode
func_defaults tuple of any default values for arguments
func_doc (same as __doc__)
func_globals global namespace in which this function was defined
func_name (same as __name__)"""
return isinstance(object, types.FunctionType)
def isgeneratorfunction(object):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See help(isfunction) for attributes listing."""
return bool((isfunction(object) or ismethod(object)) and
object.func_code.co_flags & CO_GENERATOR)
def isgenerator(object):
"""Return true if the object is a generator.
Generator objects provide these attributes:
__iter__ defined to support iteration over container
close raises a new GeneratorExit exception inside the
generator to terminate the iteration
gi_code code object
gi_frame frame object or possibly None once the generator has
been exhausted
gi_running set to 1 when generator is executing, 0 otherwise
next return the next item from the container
send resumes the generator and "sends" a value that becomes
the result of the current yield-expression
throw used to raise an exception inside the generator"""
return isinstance(object, types.GeneratorType)
def istraceback(object):
"""Return true if the object is a traceback.
Traceback objects provide these attributes:
tb_frame frame object at this level
tb_lasti index of last attempted instruction in bytecode
tb_lineno current line number in Python source code
tb_next next inner traceback object (called by this level)"""
return isinstance(object, types.TracebackType)
def isframe(object):
"""Return true if the object is a frame object.
Frame objects provide these attributes:
f_back next outer frame object (this frame's caller)
f_builtins built-in namespace seen by this frame
f_code code object being executed in this frame
f_exc_traceback traceback if raised in this frame, or None
f_exc_type exception type if raised in this frame, or None
f_exc_value exception value if raised in this frame, or None
f_globals global namespace seen by this frame
f_lasti index of last attempted instruction in bytecode
f_lineno current line number in Python source code
f_locals local namespace seen by this frame
f_restricted 0 or 1 if frame is in restricted execution mode
f_trace tracing function for this frame, or None"""
return isinstance(object, types.FrameType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
def isbuiltin(object):
"""Return true if the object is a built-in function or method.
Built-in functions and methods provide these attributes:
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
def isabstract(object):
"""Return true if the object is an abstract base class (ABC)."""
return bool(isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT)
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
results = []
for key in dir(object):
try:
value = getattr(object, key)
except AttributeError:
continue
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
Attribute = namedtuple('Attribute', 'name kind defining_class object')
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained directly from the defining class's
__dict__, not via getattr. This is especially important for
data attributes: C.data is just a data object, but
C.__dict__['data'] may be a data descriptor with additional
info, like a __doc__ string.
"""
mro = getmro(cls)
names = dir(cls)
result = []
for name in names:
# Get the object associated with the name, and where it was defined.
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
# Furthermore, some objects may raise an Exception when fetched with
# getattr(). This is the case with some descriptors (bug #1785).
# Thus, we only use getattr() as a last resort.
homecls = None
for base in (cls,) + mro:
if name in base.__dict__:
obj = base.__dict__[name]
homecls = base
break
else:
obj = getattr(cls, name)
homecls = getattr(obj, "__objclass__", homecls)
# Classify the object.
if isinstance(obj, staticmethod):
kind = "static method"
elif isinstance(obj, classmethod):
kind = "class method"
elif isinstance(obj, property):
kind = "property"
elif ismethoddescriptor(obj):
kind = "method"
elif isdatadescriptor(obj):
kind = "data"
else:
obj_via_getattr = getattr(cls, name)
if (ismethod(obj_via_getattr) or
ismethoddescriptor(obj_via_getattr)):
kind = "method"
else:
kind = "data"
obj = obj_via_getattr
result.append(Attribute(name, kind, homecls, obj))
return result
# ----------------------------------------------------------- class helpers
def _searchbases(cls, accum):
# Simulate the "classic class" search order.
if cls in accum:
return
accum.append(cls)
for base in cls.__bases__:
_searchbases(base, accum)
def getmro(cls):
"Return tuple of base classes (including cls) in method resolution order."
if hasattr(cls, "__mro__"):
return cls.__mro__
else:
result = []
_searchbases(cls, result)
return tuple(result)
# -------------------------------------------------- source code extraction
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = string.expandtabs(line)
return len(expline) - len(string.lstrip(expline))
def getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
try:
doc = object.__doc__
except AttributeError:
return None
if not isinstance(doc, types.StringTypes):
return None
return cleandoc(doc)
def cleandoc(doc):
"""Clean up indentation from docstrings.
Any whitespace that can be uniformly removed from the second line
onwards is removed."""
try:
lines = string.split(string.expandtabs(doc), '\n')
except UnicodeError:
return None
else:
# Find minimum indentation of any non-blank lines after first line.
margin = sys.maxint
for line in lines[1:]:
content = len(string.lstrip(line))
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation.
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxint:
for i in range(1, len(lines)): lines[i] = lines[i][margin:]
# Remove any trailing or leading blank lines.
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return string.join(lines, '\n')
def getfile(object):
"""Work out which source or compiled file an object was defined in."""
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in module'.format(object))
if isclass(object):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in class'.format(object))
if ismethod(object):
object = object.im_func
if isfunction(object):
object = object.func_code
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError('{!r} is not a module, class, method, '
'function, traceback, frame, or code object'.format(object))
ModuleInfo = namedtuple('ModuleInfo', 'name suffix mode module_type')
def getmoduleinfo(path):
"""Get the module name, suffix, mode, and module type for a given file."""
filename = os.path.basename(path)
suffixes = map(lambda info:
(-len(info[0]), info[0], info[1], info[2]),
imp.get_suffixes())
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return ModuleInfo(filename[:neglen], suffix, mode, mtype)
def getmodulename(path):
"""Return the module name for a given file, or None."""
info = getmoduleinfo(path)
if info: return info[0]
def getsourcefile(object):
"""Return the filename that can be used to locate an object's source.
Return None if no way can be identified to get the source.
"""
filename = getfile(object)
if string.lower(filename[-4:]) in ('.pyc', '.pyo'):
filename = filename[:-4] + '.py'
for suffix, mode, kind in imp.get_suffixes():
if 'b' in mode and string.lower(filename[-len(suffix):]) == suffix:
# Looks like a binary file. We want to only return a text file.
return None
if os.path.exists(filename):
return filename
# only return a non-existent filename if the module has a PEP 302 loader
if hasattr(getmodule(object, filename), '__loader__'):
return filename
# or it is in the linecache
if filename in linecache.cache:
return filename
def getabsfile(object, _filename=None):
"""Return an absolute path to the source or compiled file for an object.
The idea is for each object to have a unique origin, so this routine
normalizes the result as much as possible."""
if _filename is None:
_filename = getsourcefile(object) or getfile(object)
return os.path.normcase(os.path.abspath(_filename))
modulesbyfile = {}
_filesbymodname = {}
def getmodule(object, _filename=None):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if hasattr(object, '__module__'):
return sys.modules.get(object.__module__)
# Try the filename to modulename cache
if _filename is not None and _filename in modulesbyfile:
return sys.modules.get(modulesbyfile[_filename])
# Try the cache again with the absolute file name
try:
file = getabsfile(object, _filename)
except TypeError:
return None
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Update the filename to module name cache and check yet again
# Copy sys.modules in order to cope with changes while iterating
for modname, module in sys.modules.items():
if ismodule(module) and hasattr(module, '__file__'):
f = module.__file__
if f == _filesbymodname.get(modname, None):
# Have already mapped this module, so skip it
continue
_filesbymodname[modname] = f
f = getabsfile(module)
# Always map to the name the module knows itself by
modulesbyfile[f] = modulesbyfile[
os.path.realpath(f)] = module.__name__
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Check the main module
main = sys.modules['__main__']
if not hasattr(object, '__name__'):
return None
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
# Check builtins
builtin = sys.modules['__builtin__']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An IOError
is raised if the source code cannot be retrieved."""
file = getfile(object)
sourcefile = getsourcefile(object)
if not sourcefile and file[:1] + file[-1:] != '<>':
raise IOError('source code not available')
file = sourcefile if sourcefile else file
module = getmodule(object, file)
if module:
lines = linecache.getlines(file, module.__dict__)
else:
lines = linecache.getlines(file)
if not lines:
raise IOError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise IOError('could not find class definition')
if ismethod(object):
object = object.im_func
if isfunction(object):
object = object.func_code
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise IOError('could not find function definition')
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]): break
lnum = lnum - 1
return lines, lnum
raise IOError('could not find code object')
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code.
Returns None when source can't be found.
"""
try:
lines, lnum = findsource(object)
except (IOError, TypeError):
return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and string.strip(lines[start]) in ('', '#'):
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(string.expandtabs(lines[end]))
end = end + 1
return string.join(comments, '')
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and string.lstrip(lines[end])[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [string.lstrip(string.expandtabs(lines[end]))]
if end > 0:
end = end - 1
comment = string.lstrip(string.expandtabs(lines[end]))
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = string.lstrip(string.expandtabs(lines[end]))
while comments and string.strip(comments[0]) == '#':
comments[:1] = []
while comments and string.strip(comments[-1]) == '#':
comments[-1:] = []
return string.join(comments, '')
class EndOfBlock(Exception): pass
class BlockFinder:
"""Provide a tokeneater() method to detect the end of a code block."""
def __init__(self):
self.indent = 0
self.islambda = False
self.started = False
self.passline = False
self.last = 1
def tokeneater(self, type, token, srow_scol, erow_ecol, line):
srow, scol = srow_scol
erow, ecol = erow_ecol
if not self.started:
# look for the first "def", "class" or "lambda"
if token in ("def", "class", "lambda"):
if token == "lambda":
self.islambda = True
self.started = True
self.passline = True # skip to the end of the line
elif type == tokenize.NEWLINE:
self.passline = False # stop skipping when a NEWLINE is seen
self.last = srow
if self.islambda: # lambdas always end at the first NEWLINE
raise EndOfBlock
elif self.passline:
pass
elif type == tokenize.INDENT:
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
# the end of matching indent/dedent pairs end a block
# (note that this only works for "def"/"class" blocks,
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
raise EndOfBlock
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = BlockFinder()
try:
tokenize.tokenize(iter(lines).next, blockfinder.tokeneater)
except (EndOfBlock, IndentationError):
pass
return lines[:blockfinder.last]
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An IOError is
raised if the source code cannot be retrieved."""
lines, lnum = findsource(object)
if ismodule(object): return lines, 0
else: return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
IOError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return string.join(lines, '')
# --------------------------------------------------- class tree extraction
def walktree(classes, children, parent):
"""Recursive helper function for getclasstree()."""
results = []
classes.sort(key=attrgetter('__module__', '__name__'))
for c in classes:
results.append((c, c.__bases__))
if c in children:
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=0):
"""Arrange the given list of classes into a hierarchy of nested lists.
Where a nested list appears, it contains classes derived from the class
whose entry immediately precedes the list. Each entry is a 2-tuple
containing a class and a tuple of its base classes. If the 'unique'
argument is true, exactly one entry appears in the returned structure
for each class in the given list. Otherwise, classes using multiple
inheritance and their descendants will appear multiple times."""
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not parent in children:
children[parent] = []
if c not in children[parent]:
children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
for parent in children:
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
# ------------------------------------------------ argument list extraction
Arguments = namedtuple('Arguments', 'args varargs keywords')
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where 'args' is
a list of argument names (possibly containing nested lists), and
'varargs' and 'varkw' are the names of the * and ** arguments or None."""
if not iscode(co):
if hasattr(len, 'func_code') and type(co) is type(len.func_code):
# PyPy extension: built-in function objects have a func_code too.
# There is no co_code on it, but co_argcount and co_varnames and
# co_flags are present.
pass
else:
raise TypeError('{!r} is not a code object'.format(co))
code = getattr(co, 'co_code', '')
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
step = 0
# The following acrobatics are for anonymous (tuple) arguments.
for i in range(nargs):
if args[i][:1] in ('', '.'):
stack, remain, count = [], [], []
while step < len(code):
op = ord(code[step])
step = step + 1
if op >= dis.HAVE_ARGUMENT:
opname = dis.opname[op]
value = ord(code[step]) + ord(code[step+1])*256
step = step + 2
if opname in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'):
remain.append(value)
count.append(value)
elif opname == 'STORE_FAST':
stack.append(names[value])
# Special case for sublists of length 1: def foo((bar))
# doesn't generate the UNPACK_TUPLE bytecode, so if
# `remain` is empty here, we have such a sublist.
if not remain:
stack[0] = [stack[0]]
break
else:
remain[-1] = remain[-1] - 1
while remain[-1] == 0:
remain.pop()
size = count.pop()
stack[-size:] = [stack[-size:]]
if not remain: break
remain[-1] = remain[-1] - 1
if not remain: break
args[i] = stack[0]
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return Arguments(args, varargs, varkw)
ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults')
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
"""
if ismethod(func):
func = func.im_func
if not (isfunction(func) or
isbuiltin(func) and hasattr(func, 'func_code')):
# PyPy extension: this works for built-in functions too
raise TypeError('{!r} is not a Python function'.format(func))
args, varargs, varkw = getargs(func.func_code)
return ArgSpec(args, varargs, varkw, func.func_defaults)
ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals')
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names (it may contain nested lists).
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return ArgInfo(args, varargs, varkw, frame.f_locals)
def joinseq(seq):
if len(seq) == 1:
return '(' + seq[0] + ',)'
else:
return '(' + string.join(seq, ', ') + ')'
def strseq(object, convert, join=joinseq):
"""Recursively walk a sequence, stringifying each element."""
if type(object) in (list, tuple):
return join(map(lambda o, c=convert, j=join: strseq(o, c, j), object))
else:
return convert(object)
def formatargspec(args, varargs=None, varkw=None, defaults=None,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargspec.
The first four arguments are (args, varargs, varkw, defaults). The
other four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i, arg in enumerate(args):
spec = strseq(arg, formatarg, join)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(varargs))
if varkw is not None:
specs.append(formatvarkw(varkw))
return '(' + string.join(specs, ', ') + ')'
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(strseq(args[i], convert, join))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + string.join(specs, ', ') + ')'
def getcallargs(func, *positional, **named):
"""Get the mapping of arguments to values.
A dict is returned, with keys the function argument names (including the
names of the * and ** arguments, if any), and values the respective bound
values from 'positional' and 'named'."""
args, varargs, varkw, defaults = getargspec(func)
f_name = func.__name__
arg2value = {}
# The following closures are basically because of tuple parameter unpacking.
assigned_tuple_params = []
def assign(arg, value):
if isinstance(arg, str):
arg2value[arg] = value
else:
assigned_tuple_params.append(arg)
value = iter(value)
for i, subarg in enumerate(arg):
try:
subvalue = next(value)
except StopIteration:
raise ValueError('need more than %d %s to unpack' %
(i, 'values' if i > 1 else 'value'))
assign(subarg,subvalue)
try:
next(value)
except StopIteration:
pass
else:
raise ValueError('too many values to unpack')
def is_assigned(arg):
if isinstance(arg,str):
return arg in arg2value
return arg in assigned_tuple_params
if ismethod(func) and func.im_self is not None:
# implicit 'self' (or 'cls' for classmethods) argument
positional = (func.im_self,) + positional
num_pos = len(positional)
num_total = num_pos + len(named)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
for arg, value in zip(args, positional):
assign(arg, value)
if varargs:
if num_pos > num_args:
assign(varargs, positional[-(num_pos-num_args):])
else:
assign(varargs, ())
elif 0 < num_args < num_pos:
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at most' if defaults else 'exactly', num_args,
'arguments' if num_args > 1 else 'argument', num_total))
elif num_args == 0 and num_total:
if varkw:
if num_pos:
# XXX: We should use num_pos, but Python also uses num_total:
raise TypeError('%s() takes exactly 0 arguments '
'(%d given)' % (f_name, num_total))
else:
raise TypeError('%s() takes no arguments (%d given)' %
(f_name, num_total))
for arg in args:
if isinstance(arg, str) and arg in named:
if is_assigned(arg):
raise TypeError("%s() got multiple values for keyword "
"argument '%s'" % (f_name, arg))
else:
assign(arg, named.pop(arg))
if defaults: # fill in any missing values with the defaults
for arg, value in zip(args[-num_defaults:], defaults):
if not is_assigned(arg):
assign(arg, value)
if varkw:
assign(varkw, named)
elif named:
unexpected = next(iter(named))
if isinstance(unexpected, unicode):
unexpected = unexpected.encode(sys.getdefaultencoding(), 'replace')
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(f_name, unexpected))
unassigned = num_args - len([arg for arg in args if is_assigned(arg)])
if unassigned:
num_required = num_args - num_defaults
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at least' if defaults else 'exactly', num_required,
'arguments' if num_required > 1 else 'argument', num_total))
return arg2value
# -------------------------------------------------- stack frame extraction
Traceback = namedtuple('Traceback', 'filename lineno function code_context index')
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
if istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not isframe(frame):
raise TypeError('{!r} is not a frame or traceback object'.format(frame))
filename = getsourcefile(frame) or getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = findsource(frame)
except IOError:
lines = index = None
else:
start = max(start, 1)
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return Traceback(filename, lineno, frame.f_code.co_name, lines, index)
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno
def getouterframes(frame, context=1):
"""Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while frame:
framelist.append((frame,) + getframeinfo(frame, context))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
"""Get a list of records for a traceback's frame and all lower frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while tb:
framelist.append((tb.tb_frame,) + getframeinfo(tb, context))
tb = tb.tb_next
return framelist
if hasattr(sys, '_getframe'):
currentframe = sys._getframe
else:
currentframe = lambda _=None: None
def stack(context=1):
"""Return a list of records for the stack above the caller's frame."""
return getouterframes(sys._getframe(1), context)
def trace(context=1):
"""Return a list of records for the stack below the current exception."""
return getinnerframes(sys.exc_info()[2], context)
|
BartoszCichecki/onlinepython
|
onlinepython/pypy-2.4.0-win32/lib-python/2.7/inspect.py
|
Python
|
gpl-2.0
| 42,959
|
#
# Copyright (C) 2016 Rodrigo Freitas
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
__all__ = []
|
rsfreitas/shellbber
|
shellber/chat/__init__.py
|
Python
|
gpl-2.0
| 763
|
# Copyright 2014 A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from functools import wraps
from oslo_utils import excutils
from neutron_lbaas.db.loadbalancer import models
from neutron_lbaas.drivers import driver_mixins
class NotImplementedManager(object):
"""Helper class to make any subclass of LoadBalancerBaseDriver explode if
it is missing any of the required object managers.
"""
def create(self, context, obj):
raise NotImplementedError()
def update(self, context, old_obj, obj):
raise NotImplementedError()
def delete(self, context, obj):
raise NotImplementedError()
class LoadBalancerBaseDriver(object):
"""LBaaSv2 object model drivers should subclass LoadBalancerBaseDriver,
and initialize the following manager classes to create, update, and delete
the various load balancer objects.
"""
load_balancer = NotImplementedManager()
listener = NotImplementedManager()
pool = NotImplementedManager()
member = NotImplementedManager()
health_monitor = NotImplementedManager()
def __init__(self, plugin):
self.plugin = plugin
class BaseLoadBalancerManager(driver_mixins.BaseRefreshMixin,
driver_mixins.BaseStatsMixin,
driver_mixins.BaseManagerMixin):
model_class = models.LoadBalancer
@property
def allocates_vip(self):
"""Does this driver need to allocate its own virtual IPs"""
return False
def create_and_allocate_vip(self, context, obj):
"""Create the load balancer and allocate a VIP
If this method is implemented AND allocates_vip returns True, then
this method will be called instead of the create method. Any driver
that implements this method is responsible for allocating a virtual IP
and updating at least the vip_address attribute in the loadbalancer
database table.
"""
raise NotImplementedError
@property
def db_delete_method(self):
return self.driver.plugin.db.delete_loadbalancer
class BaseListenerManager(driver_mixins.BaseManagerMixin):
model_class = models.Listener
@property
def db_delete_method(self):
return self.driver.plugin.db.delete_listener
class BasePoolManager(driver_mixins.BaseManagerMixin):
model_class = models.PoolV2
@property
def db_delete_method(self):
return self.driver.plugin.db.delete_pool
class BaseMemberManager(driver_mixins.BaseManagerMixin):
model_class = models.MemberV2
@property
def db_delete_method(self):
return self.driver.plugin.db.delete_member
class BaseHealthMonitorManager(driver_mixins.BaseManagerMixin):
model_class = models.HealthMonitorV2
@property
def db_delete_method(self):
return self.driver.plugin.db.delete_healthmonitor
# A decorator for wrapping driver operations, which will automatically
# set the neutron object's status based on whether it sees an exception
def driver_op(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
d = (func.__name__ == 'delete')
try:
r = func(*args, **kwargs)
args[0].successful_completion(
args[1], args[2], delete=d)
return r
except Exception:
with excutils.save_and_reraise_exception():
args[0].failed_completion(args[1], args[2])
return func_wrapper
|
gandelman-a/neutron-lbaas
|
neutron_lbaas/drivers/driver_base.py
|
Python
|
apache-2.0
| 3,997
|
# Code imported from https://github.com/taskcluster/taskcluster/blob/32629c562f8d6f5a6b608a3141a8ee2e0984619f/services/treeherder/src/util/route_parser.js
# A Taskcluster routing key will be in the form:
# treeherder.<version>.<user/project>|<project>.<revision>.<pushLogId/pullRequestId>
# [0] Routing key prefix used for listening to only treeherder relevant messages
# [1] Routing key version
# [2] In the form of user/project for github repos and just project for hg.mozilla.org
# [3] Top level revision for the push
# [4] Pull Request ID (github) or Push Log ID (hg.mozilla.org) of the push
# Note: pushes on a branch on Github would not have a PR ID
# Function extracted from
# https://github.com/taskcluster/taskcluster/blob/32629c562f8d6f5a6b608a3141a8ee2e0984619f/services/treeherder/src/util/route_parser.js
def parseRoute(route):
id = None
owner = None
parsedProject = None
parsedRoute = route.split('.')
project = parsedRoute[2]
if len(project.split('/')) == 2:
[owner, parsedProject] = project.split('/')
else:
parsedProject = project
if len(parsedRoute) == 5:
id = parsedRoute[4]
pushInfo = {
"destination": parsedRoute[0],
"id": int(id) if id else 0,
"project": parsedProject,
"revision": parsedRoute[3],
}
if owner and parsedProject:
pushInfo["owner"] = owner
pushInfo["origin"] = 'github.com'
else:
pushInfo["origin"] = 'hg.mozilla.org'
return pushInfo
|
jmaher/treeherder
|
treeherder/etl/taskcluster_pulse/parse_route.py
|
Python
|
mpl-2.0
| 1,512
|
# -*- coding: utf-8 -*-
# Copyright 2015 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of FIWARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
__author__ = "@jframos"
from qautils.http.headers_utils import set_representation_headers, HEADER_REPRESENTATION_JSON
from qautils.logger.logger_utils import get_logger
from keystoneclient.v2_0 import Client as KeystoneClient
from fiwarecloto_client.tenantid_resource import TenantIdResourceClient
__logger__ = get_logger(__name__)
# HEADERS
X_AUTH_TOKEN = "X-Auth-Token"
TENANT_ID = "Tenant-Id"
class ClotoClient():
def __init__(self, username, password, tenant_id, auth_url, api_protocol, api_host, api_port, api_resource):
"""
Init a new Client for CLOTO component.
:param username (string): The username (OpenStack)
:param password (string): The password
:param tenant_id (string): TenantID
:param auth_url (string): Keystone/IdM auth URL
:param api_protocol (string): API protocol
:param api_host (string): API host
:param api_port (string): API port
:param api_resource (string): API base resource
:return: None
"""
__logger__.info("Init CLOTO Client")
__logger__.debug("Client parameters: Username: %s, Password: %s, TenantId: %s, API protocol: %s, API host: %s, "
"API port: %s, Base resource: %s", username, password, tenant_id, api_protocol, api_host,
api_port, api_resource)
self.headers = dict()
self.api_protocol = api_protocol
self.api_host = api_host
self.api_port = api_port
self.api_resource = api_resource
set_representation_headers(self.headers, content_type=HEADER_REPRESENTATION_JSON,
accept=HEADER_REPRESENTATION_JSON)
self._init_keystone_client(username, password, tenant_id, auth_url)
self.token = self._get_auth_token()
__logger__.debug("Token: %s", self.token)
self.headers.update({X_AUTH_TOKEN: self.token})
self.headers.update({TENANT_ID: tenant_id})
__logger__.debug("Headers with OpenStack credentials: %s", self.headers)
def _init_keystone_client(self, username, password, tenant_id, auth_url):
"""
Init the keystone client to request token and endpoint data
:param string username: Username for authentication.
:param string password: Password for authentication.
:param string tenant_id: Tenant id.
:param string auth_url: Keystone service endpoint for authorization.
:param string region_name: Name of a region to select when choosing an
endpoint from the service catalog.
:return None
"""
__logger__.debug("Init Keystone Client")
self.keystone_client = KeystoneClient(username=username, password=password, tenant_id=tenant_id,
auth_url=auth_url)
def _get_auth_token(self):
"""
Get token from Keystone
:return: Token (String)
"""
__logger__.debug("Getting auth Token")
return self.keystone_client.auth_ref['token']['id']
def get_tenant_id_resource_client(self):
"""
Create an API resource REST client
:return: Rest client for 'TenantId' API resource
"""
__logger__.info("Creating TenantIdResource")
return TenantIdResourceClient(protocol=self.api_protocol, host=self.api_host,
port=self.api_port, resource=self.api_resource, headers=self.headers)
|
telefonicaid/fiware-facts
|
tests/acceptance/fiwarecloto_client/client.py
|
Python
|
apache-2.0
| 4,297
|
from django.apps import AppConfig
class TestbConfig(AppConfig):
name = 'testb'
|
bluekvirus/django-express
|
testb/apps.py
|
Python
|
mit
| 85
|
# apis_v1/documentation_source/voter_guides_to_follow_retrieve_doc.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
def voter_guides_to_follow_retrieve_doc_template_values(url_root):
"""
Show documentation about voterGuidesToFollowRetrieve
"""
required_query_parameter_list = [
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'An 88 character unique identifier linked to a voter record on the server',
},
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
]
optional_query_parameter_list = [
{
'name': 'kind_of_ballot_item',
'value': 'string', # boolean, integer, long, string
'description': 'What is the type of ballot item that we are retrieving? '
'(kind_of_ballot_item is either "OFFICE", "CANDIDATE", "POLITICIAN" or "MEASURE")',
},
{
'name': 'ballot_item_we_vote_id',
'value': 'string', # boolean, integer, long, string
'description': 'The unique identifier for a particular ballot item. If this variable is provided, '
'we want to retrieve all of the voter guides that have something to say about this '
'particular ballot item.',
},
{
'name': 'google_civic_election_id',
'value': 'integer', # boolean, integer, long, string
'description': 'The unique identifier for a particular election. If not provided, use the most recent '
'ballot for the voter\'s address.',
},
{
'name': 'search_string',
'value': 'string', # boolean, integer, long, string
'description': 'A string of keyword(s) to search for (to find twitter handle or org name).',
},
{
'name': 'use_test_election',
'value': 'boolean', # boolean, integer, long, string
'description': 'If you need to request a test election, pass this with the value \'True\'. Note that '
'text_for_map_search (either passed into this API endpoint as a value, or previously saved '
'with voterAddressSave) is required with every election, including the test election.',
},
{
'name': 'maximum_number_to_retrieve',
'value': 'integer', # boolean, integer, long, string
'description': 'Defaults to 20 voter guides. Enter a value to set your own limit.',
},
]
potential_status_codes_list = [
{
'code': 'VOTER_GUIDES_TO_FOLLOW_RETRIEVED',
'description': 'At least one voter guide was returned.',
},
{
'code': 'ERROR_GUIDES_TO_FOLLOW_NO_VOTER_DEVICE_ID',
'description': 'A valid voter_device_id parameter was not included. Cannot proceed.',
},
{
'code': 'NO_VOTER_GUIDES_FOUND',
'description': 'No voter guides exist in the database matching the search terms.',
},
]
try_now_link_variables_dict = {
'kind_of_ballot_item': 'CANDIDATE',
'ballot_item_we_vote_id': 'wv01cand2897',
}
api_response = '{\n' \
' "status": string,\n' \
' "success": boolean,\n' \
' "voter_device_id": string (88 characters long),\n' \
' "google_civic_election_id": integer,\n' \
' "search_string": string,\n' \
' "maximum_number_to_retrieve": integer,\n' \
' "voter_guides": list\n' \
' [{\n' \
' "voter_guide_display_name": string (Name of this org or person),\n' \
' "voter_guide_owner_type": ORGANIZATION, PUBLIC_FIGURE, VOTER),\n' \
' "we_vote_id": string (We Vote ID of the voter guide),\n' \
' "organization_we_vote_id": string (We Vote ID for the org that owns the voter guide),\n' \
' "public_figure_we_vote_id": string (We Vote ID for the person that owns the voter guide),\n' \
' "voter_guide_image_url": string (We Vote ID for the person that owns the voter guide),\n' \
' "last_updated": string (time in this format %Y-%m-%d %H:%M),\n' \
' "google_civic_election_id": integer,\n' \
' "twitter_description": string,\n' \
' "twitter_followers_count": integer,\n' \
' "twitter_handle": integer,\n' \
' "owner_voter_id": integer TO BE DEPRECATED,\n' \
' "is_support": boolean (Exists if looking at voter guides for one ballot_item),\n' \
' "is_positive_rating": boolean (Exists if looking at voter guides for one ballot_item),\n' \
' "is_support_or_positive_rating": boolean (Exists if looking at one ballot_item),\n' \
' "is_oppose": boolean (Exists if looking at voter guides for one ballot_item),\n' \
' "is_negative_rating": boolean (Exists if looking at voter guides for one ballot_item),\n' \
' "is_oppose_or_negative_rating": boolean (Exists if looking at one ballot_item),\n' \
' "is_information_only": boolean (Exists if looking at voter guides for one ballot_item),\n' \
' "vote_smart_rating": integer (Exists if looking at voter guides for one ballot_item),\n' \
' "vote_smart_time_span": string (Exists if looking at voter guides for one ballot_item),\n' \
' "candidate_name": string (Exists if looking at voter guides for one ballot_item),\n' \
' "speaker_display_name": string (Exists if looking at voter guides for one ballot_item),\n' \
' "statement_text": string (Exists if looking at voter guides for one ballot_item),\n' \
' "more_info_url": string (Exists if looking at voter guides for one ballot_item),\n' \
' },],\n' \
'}\n'
template_values = {
'api_name': 'voterGuidesToFollowRetrieve',
'api_slug': 'voterGuidesToFollowRetrieve',
'api_introduction':
"Look up the election and ballot items that this person is focused on. Return the organizations, "
"public figures, and voters that have shared voter guides available to follow. Take into consideration "
"which voter guides the voter has previously ignored. "
"Do not show voter guides the voter is already following."
"If neither ballot_item_we_vote_id (paired with kind_of_ballot_item) nor google_civic_election_id are"
"passed in, and google_civic_election_id is set to '0', then simply return a list of voter guides "
"that haven't been followed yet. If google_civic_election_id is NOT set to 0, the routine tries to"
"figure out which election is being looked at in the voter_device_link or the voter_address.",
'try_now_link': 'apis_v1:voterGuidesToFollowRetrieveView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
|
wevote/WebAppPublic
|
apis_v1/documentation_source/voter_guides_to_follow_retrieve_doc.py
|
Python
|
bsd-3-clause
| 8,198
|
from datetime import date
from workalendar.tests import GenericCalendarTest
from workalendar.asia import HongKong, Japan, Qatar, Singapore
from workalendar.asia import SouthKorea, Taiwan, Malaysia
class HongKongTest(GenericCalendarTest):
cal_class = HongKong
def test_year_2010(self):
""" Interesting because Christmas fell on a Saturday and CNY fell
on a Sunday, so didn't roll, and Ching Ming was on the same day
as Easter Monday """
holidays = self.cal.holidays_set(2010)
self.assertIn(date(2010, 1, 1), holidays) # New Year
self.assertIn(date(2010, 2, 13), holidays) # Chinese new year (shift)
self.assertIn(date(2010, 2, 15), holidays) # Chinese new year
self.assertIn(date(2010, 2, 16), holidays) # Chinese new year
self.assertNotIn(date(2010, 2, 17), holidays) # Not Chinese new year
self.assertIn(date(2010, 4, 2), holidays) # Good Friday
self.assertIn(date(2010, 4, 3), holidays) # Day after Good Friday
self.assertIn(date(2010, 4, 5), holidays) # Easter Monday
self.assertIn(date(2010, 4, 6), holidays) # Ching Ming (shifted)
self.assertIn(date(2010, 5, 1), holidays) # Labour Day
self.assertIn(date(2010, 5, 21), holidays) # Buddha's Birthday
self.assertIn(date(2010, 6, 16), holidays) # Tuen Ng Festival
self.assertIn(date(2010, 7, 1), holidays) # HK SAR Establishment Day
self.assertIn(date(2010, 9, 23), holidays) # Day after Mid-Autumn
self.assertIn(date(2010, 10, 1), holidays) # National Day
self.assertIn(date(2010, 10, 16), holidays) # Chung Yeung Festival
self.assertIn(date(2010, 12, 25), holidays) # Christmas Day
self.assertIn(date(2010, 12, 27), holidays) # Boxing Day (shifted)
def test_year_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays) # New Year
self.assertIn(date(2013, 2, 11), holidays) # Chinese new year
self.assertIn(date(2013, 2, 12), holidays) # Chinese new year
self.assertIn(date(2013, 2, 13), holidays) # Chinese new year
self.assertIn(date(2013, 3, 29), holidays) # Good Friday
self.assertIn(date(2013, 3, 30), holidays) # Day after Good Friday
self.assertIn(date(2013, 4, 1), holidays) # Easter Monday
self.assertIn(date(2013, 4, 4), holidays) # Ching Ming
self.assertIn(date(2013, 5, 1), holidays) # Labour Day
self.assertIn(date(2013, 5, 17), holidays) # Buddha's Birthday
self.assertIn(date(2013, 6, 12), holidays) # Tuen Ng Festival
self.assertIn(date(2013, 7, 1), holidays) # HK SAR Establishment Day
self.assertIn(date(2013, 9, 20), holidays) # Day after Mid-Autumn
self.assertIn(date(2013, 10, 1), holidays) # National Day
self.assertIn(date(2013, 10, 14), holidays) # Chung Yeung Festival
self.assertIn(date(2013, 12, 25), holidays) # Christmas Day
self.assertIn(date(2013, 12, 26), holidays) # Boxing Day
def test_year_2016(self):
holidays = self.cal.holidays_set(2016)
self.assertIn(date(2016, 1, 1), holidays) # New Year
self.assertIn(date(2016, 2, 8), holidays) # Chinese new year
self.assertIn(date(2016, 2, 9), holidays) # Chinese new year
self.assertIn(date(2016, 2, 10), holidays) # Chinese new year
self.assertIn(date(2016, 3, 25), holidays) # Good Friday
self.assertIn(date(2016, 3, 26), holidays) # Day after Good Friday
self.assertIn(date(2016, 3, 28), holidays) # Easter Monday
self.assertIn(date(2016, 4, 4), holidays) # Ching Ming
self.assertIn(date(2016, 5, 2), holidays) # Labour Day (shifted)
self.assertIn(date(2016, 5, 14), holidays) # Buddha's Birthday
self.assertIn(date(2016, 6, 9), holidays) # Tuen Ng Festival
self.assertIn(date(2016, 7, 1), holidays) # HK SAR Establishment Day
self.assertIn(date(2016, 9, 16), holidays) # Day after Mid-Autumn
self.assertIn(date(2016, 10, 1), holidays) # National Day
self.assertIn(date(2016, 10, 10), holidays) # Chung Yeung Festival
self.assertIn(date(2016, 12, 26), holidays) # Christmas Day (shifted)
self.assertIn(date(2016, 12, 27), holidays) # Boxing Day (shifted)
def test_year_2017(self):
holidays = self.cal.holidays_set(2017)
self.assertIn(date(2017, 1, 2), holidays) # New Year (shifted)
self.assertIn(date(2017, 1, 28), holidays) # Chinese new year
self.assertIn(date(2017, 1, 30), holidays) # Chinese new year
self.assertIn(date(2017, 1, 31), holidays) # Chinese new year
self.assertIn(date(2017, 4, 4), holidays) # Ching Ming
self.assertIn(date(2017, 4, 14), holidays) # Good Friday
self.assertIn(date(2017, 4, 15), holidays) # Day after Good Friday
self.assertIn(date(2017, 4, 17), holidays) # Easter Monday
self.assertIn(date(2017, 5, 1), holidays) # Labour Day
self.assertIn(date(2017, 5, 3), holidays) # Buddha's Birthday
self.assertIn(date(2017, 5, 30), holidays) # Tuen Ng Festival
self.assertIn(date(2017, 7, 1), holidays) # HK SAR Establishment Day
self.assertIn(date(2017, 10, 2), holidays) # National Day (shifted)
self.assertIn(date(2017, 10, 5), holidays) # Day after Mid-Autumn
self.assertIn(date(2017, 10, 28), holidays) # Chung Yeung Festival
self.assertIn(date(2017, 12, 25), holidays) # Christmas Day
self.assertIn(date(2017, 12, 26), holidays) # Boxing Day
def test_chingming_festival(self):
# This is the same as the Taiwan test, just different spelling
# Could move this into a Core test
self.assertIn(date(2005, 4, 5), self.cal.holidays_set(2005))
self.assertIn(date(2006, 4, 5), self.cal.holidays_set(2006))
self.assertIn(date(2007, 4, 5), self.cal.holidays_set(2007))
self.assertIn(date(2008, 4, 4), self.cal.holidays_set(2008))
self.assertIn(date(2010, 4, 5), self.cal.holidays_set(2010))
self.assertIn(date(2011, 4, 5), self.cal.holidays_set(2011))
self.assertIn(date(2012, 4, 4), self.cal.holidays_set(2012))
self.assertIn(date(2013, 4, 4), self.cal.holidays_set(2013))
self.assertIn(date(2014, 4, 5), self.cal.holidays_set(2014))
self.assertIn(date(2015, 4, 4), self.cal.holidays_set(2015))
self.assertIn(date(2016, 4, 4), self.cal.holidays_set(2016))
self.assertIn(date(2017, 4, 4), self.cal.holidays_set(2017))
self.assertIn(date(2018, 4, 5), self.cal.holidays_set(2018))
class JapanTest(GenericCalendarTest):
cal_class = Japan
def test_year_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays) # new year
self.assertIn(date(2013, 2, 11), holidays) # Foundation Day
self.assertIn(date(2013, 3, 20), holidays) # Vernal Equinox Day
self.assertIn(date(2013, 4, 29), holidays) # Showa Day
self.assertIn(date(2013, 5, 3), holidays) # Constitution Memorial Day
self.assertIn(date(2013, 5, 4), holidays) # Greenery Day
self.assertIn(date(2013, 5, 5), holidays) # Children's Day
self.assertIn(date(2013, 9, 23), holidays) # Autumnal Equinox Day
self.assertIn(date(2013, 11, 3), holidays) # Culture Day
self.assertIn(date(2013, 11, 23), holidays) # Labour Thanksgiving Day
self.assertIn(date(2013, 12, 23), holidays) # The Emperor's Birthday
# Variable days
self.assertIn(date(2013, 1, 14), holidays) # Coming of Age Day
self.assertIn(date(2013, 7, 15), holidays) # Marine Day
self.assertIn(date(2013, 9, 16), holidays) # Respect-for-the-Aged Day
self.assertIn(date(2013, 10, 14), holidays) # Health and Sports Day
def test_year_2016(self):
# Before 2016, no Mountain Day
holidays = self.cal.holidays_set(2014)
self.assertNotIn(date(2014, 8, 11), holidays) # Mountain Day
holidays = self.cal.holidays_set(2015)
self.assertNotIn(date(2015, 8, 11), holidays) # Mountain Day
# After 2016, yes
holidays = self.cal.holidays_set(2016)
self.assertIn(date(2016, 8, 11), holidays) # Mountain Day
holidays = self.cal.holidays_set(2017)
self.assertIn(date(2017, 8, 11), holidays) # Mountain Day
class MalaysiaTest(GenericCalendarTest):
cal_class = Malaysia
def test_year_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays) # New Year's Day
self.assertIn(date(2013, 1, 28), holidays) # Thaipusam
self.assertIn(date(2013, 2, 1), holidays) # Federal Territory Day
self.assertIn(date(2013, 2, 11), holidays) # 2nd day of Lunar NY
self.assertIn(date(2013, 2, 12), holidays) # 1st day (Sun lieu)
self.assertIn(date(2013, 5, 1), holidays) # Workers' Day
self.assertIn(date(2013, 5, 24), holidays) # Vesak Day
self.assertIn(date(2013, 8, 8), holidays) # 1st day eid-al-fitr
self.assertIn(date(2013, 8, 9), holidays) # 2nd day eid-al-fitr
self.assertIn(date(2013, 8, 31), holidays) # National Day
self.assertIn(date(2013, 9, 16), holidays) # Malaysia Day
self.assertIn(date(2013, 10, 15), holidays) # Hari Raya Haji
self.assertIn(date(2013, 11, 2), holidays) # Deepavali
self.assertIn(date(2013, 11, 5), holidays) # Islamic New Year
self.assertIn(date(2013, 12, 25), holidays) # Xmas
def test_year_2012(self):
holidays = self.cal.holidays_set(2012)
self.assertIn(date(2012, 1, 1), holidays) # New Year's Day
self.assertIn(date(2012, 1, 24), holidays) # Federal Territory Day
self.assertIn(date(2012, 2, 1), holidays) # 2nd day of Lunar NY
self.assertIn(date(2012, 5, 1), holidays) # 1st day (Sun lieu)
self.assertIn(date(2012, 5, 5), holidays) # Workers' Day
self.assertIn(date(2012, 8, 19), holidays) # 1st day eid-al-fitr
self.assertIn(date(2012, 8, 20), holidays) # 2nd day eid-al-fitr
self.assertIn(date(2012, 8, 31), holidays) # National Day
self.assertIn(date(2012, 9, 16), holidays) # Malaysia Day
self.assertIn(date(2012, 10, 26), holidays) # Hari Raya Haji
self.assertIn(date(2012, 11, 13), holidays) # Islamic New Year
self.assertIn(date(2012, 11, 15), holidays) # Deepavali
self.assertIn(date(2012, 12, 25), holidays) # Xmas
def test_nuzul_al_quran(self):
holidays = self.cal.holidays_set(2017)
self.assertIn(date(2017, 6, 12), holidays)
holidays = self.cal.holidays_set(2018)
self.assertIn(date(2018, 6, 1), holidays)
class QatarTest(GenericCalendarTest):
cal_class = Qatar
def test_year_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 7, 9), holidays) # start ramadan
# warning, the official date was (2013, 8, 10)
self.assertIn(date(2013, 8, 8), holidays) # eid al fitr
# The official date was (2013, 10, 14)
self.assertIn(date(2013, 10, 15), holidays) # eid al adha
self.assertIn(date(2013, 10, 16), holidays) # eid al adha
self.assertIn(date(2013, 10, 17), holidays) # eid al adha
self.assertIn(date(2013, 10, 18), holidays) # eid al adha
self.assertIn(date(2013, 12, 18), holidays) # National Day
def test_weekend(self):
# In Qatar, Week-end days are Friday / Sunday.
weekend_day = date(2017, 5, 12) # This is a Friday
non_weekend_day = date(2017, 5, 14) # This is a Sunday
self.assertFalse(self.cal.is_working_day(weekend_day))
self.assertTrue(self.cal.is_working_day(non_weekend_day))
class SingaporeTest(GenericCalendarTest):
cal_class = Singapore
def test_CNY_2010(self):
holidays = self.cal.holidays_set(2010)
self.assertIn(date(2010, 2, 14), holidays) # CNY1
self.assertIn(date(2010, 2, 15), holidays) # CNY2
self.assertIn(date(2010, 2, 16), holidays) # Rolled day for CNY
def test_year_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays) # New Year
self.assertIn(date(2013, 2, 10), holidays) # CNY1
self.assertIn(date(2013, 2, 11), holidays) # CNY2
self.assertIn(date(2013, 2, 12), holidays) # Rolled day for CNY
self.assertIn(date(2013, 3, 29), holidays) # Good Friday
self.assertIn(date(2013, 5, 1), holidays) # Labour Day
self.assertIn(date(2013, 5, 24), holidays) # Vesak Day
self.assertIn(date(2013, 8, 8), holidays) # Hari Raya Puasa
self.assertIn(date(2013, 8, 9), holidays) # National Day
self.assertIn(date(2013, 10, 15), holidays) # Hari Raya Haji
self.assertIn(date(2013, 11, 3), holidays) # Deepavali
self.assertIn(date(2013, 11, 4), holidays) # Deepavali shift
self.assertIn(date(2013, 12, 25), holidays) # Christmas Day
def test_year_2018(self):
holidays = self.cal.holidays_set(2018)
self.assertIn(date(2018, 1, 1), holidays) # New Year
self.assertIn(date(2018, 2, 16), holidays) # CNY
self.assertIn(date(2018, 2, 17), holidays) # CNY
self.assertIn(date(2018, 3, 30), holidays) # Good Friday
self.assertIn(date(2018, 5, 1), holidays) # Labour Day
self.assertIn(date(2018, 5, 29), holidays) # Vesak Day
self.assertIn(date(2018, 6, 15), holidays) # Hari Raya Puasa
self.assertIn(date(2018, 8, 9), holidays) # National Day
self.assertIn(date(2018, 8, 22), holidays) # Hari Raya Haji
self.assertIn(date(2018, 11, 6), holidays) # Deepavali
self.assertIn(date(2018, 12, 25), holidays) # Christmas Day
def test_fixed_holiday_shift(self):
# Labour Day was on a Sunday in 2016
holidays = self.cal.holidays_set(2016)
# Labour Day (sunday)
self.assertIn(date(2016, 5, 1), holidays)
# Shifted day (Monday)
self.assertIn(date(2016, 5, 2), holidays)
class SouthKoreaTest(GenericCalendarTest):
cal_class = SouthKorea
def test_year_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays) # new year
self.assertIn(date(2013, 3, 1), holidays) # Independence day
self.assertIn(date(2013, 5, 5), holidays) # children's day
self.assertIn(date(2013, 6, 6), holidays) # Memorial day
self.assertIn(date(2013, 8, 15), holidays) # Liberation day
self.assertIn(date(2013, 10, 3), holidays) # National Foundation Day
self.assertIn(date(2013, 10, 9), holidays) # Hangul Day
self.assertIn(date(2013, 12, 25), holidays) # Christmas
# Variable days
self.assertIn(date(2013, 2, 9), holidays)
self.assertIn(date(2013, 2, 10), holidays)
self.assertIn(date(2013, 2, 11), holidays)
self.assertIn(date(2013, 5, 17), holidays)
self.assertIn(date(2013, 9, 18), holidays)
self.assertIn(date(2013, 9, 19), holidays)
self.assertIn(date(2013, 9, 20), holidays)
class TaiwanTest(GenericCalendarTest):
cal_class = Taiwan
def test_year_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays) # New Year
self.assertIn(date(2013, 2, 9), holidays) # Chinese new year's eve
self.assertIn(date(2013, 2, 10), holidays) # Chinese new year
self.assertIn(date(2013, 2, 11), holidays) # Spring Festival
self.assertIn(date(2013, 2, 12), holidays) # Spring Festival
self.assertIn(date(2013, 2, 28), holidays) # 228 Peace Memorial Day
self.assertIn(date(2013, 4, 4), holidays) # Children's Day
self.assertIn(date(2013, 6, 12), holidays) # Dragon Boat Festival
self.assertIn(date(2013, 9, 19), holidays) # Mid-Autumn Festival
self.assertIn(date(2013, 10, 10), holidays) # National Day
def test_qingming_festival(self):
self.assertIn(date(2001, 4, 5), self.cal.holidays_set(2001))
self.assertIn(date(2002, 4, 5), self.cal.holidays_set(2002))
self.assertIn(date(2005, 4, 5), self.cal.holidays_set(2005))
self.assertIn(date(2006, 4, 5), self.cal.holidays_set(2006))
self.assertIn(date(2007, 4, 5), self.cal.holidays_set(2007))
self.assertIn(date(2008, 4, 4), self.cal.holidays_set(2008))
self.assertIn(date(2010, 4, 5), self.cal.holidays_set(2010))
self.assertIn(date(2011, 4, 5), self.cal.holidays_set(2011))
self.assertIn(date(2012, 4, 4), self.cal.holidays_set(2012))
self.assertIn(date(2013, 4, 4), self.cal.holidays_set(2013))
self.assertIn(date(2014, 4, 4), self.cal.holidays_set(2014))
|
sayoun/workalendar
|
workalendar/tests/test_asia.py
|
Python
|
mit
| 17,173
|
#!/usr/bin/env python3
import os
import sys
import argparse
from .usage import usage
from .smdimerge import smdimerge
from .timerge import timerge
from .maganim import maganim
def parse_args(args):
funcs = {
"smdimerge": smdimerge,
"timerge": timerge,
"maganim": maganim
}
if "--help" in args or len(args) == 0:
return usage(args)
if args[0] in funcs.keys():
pargs = list(filter(lambda x: x[0] != "-", args[1:]))
oargs = list(filter(lambda x: x[0] == "-", args[1:]))
rcode = funcs[args[0]](pargs, oargs)
if rcode < 0:
return usage(args)
else:
sys.exit(rcode)
return usage(args)
|
KoffeinFlummi/ArmaUtils
|
armautils_cli/__init__.py
|
Python
|
mit
| 701
|
import time
from django.conf import settings
from django.contrib.auth.models import User
from selenium.common.exceptions import StaleElementReferenceException
from inthe_am.taskmanager.models import TaskStore
def find_element_and_do(
selector,
args=None,
kwargs=None,
test=lambda x: x.visible,
action=lambda x: x.click(),
retries=3,
retry_sleep=1,
post_sleep=1,
):
if args is None:
args = []
if kwargs is None:
kwargs = {}
for loop in range(3):
matches = selector(*args, **kwargs)
for match in matches:
try:
if test(match):
action(match)
time.sleep(post_sleep)
return True
except StaleElementReferenceException:
pass
time.sleep(retry_sleep)
return False
def monkey_patch_browser(context):
context.browser.execute_script(
"""
window.confirm = function(message) {
lastConfirmationMessage = message; return true;
}
""".replace(
"\n", " "
)
)
def get_user():
u, _ = User.objects.get_or_create(
username="integration-test", email=settings.TESTING_LOGIN_USER
)
u.set_password(settings.TESTING_LOGIN_PASSWORD)
u.save()
return u
def get_store():
u = get_user()
store = TaskStore.get_for_user(u)
if not store.configured:
store.autoconfigure_taskd()
return store
|
coddingtonbear/inthe.am
|
inthe_am/taskmanager/features/steps/utils.py
|
Python
|
agpl-3.0
| 1,490
|
from __future__ import unicode_literals
import os
import csv
import json
from jsonpath_rw import parse
import dvc.logger as logger
from dvc.exceptions import OutputNotFoundError, BadMetricError, NoMetricsError
from dvc.utils.compat import builtin_str, open, StringIO, csv_reader
def _read_metric_json(fd, json_path):
parser = parse(json_path)
return [x.value for x in parser.find(json.load(fd))]
def _get_values(row):
if isinstance(row, dict):
return list(row.values())
else:
return row
def _do_read_metric_xsv(reader, row, col):
if col is not None and row is not None:
return [reader[row][col]]
elif col is not None:
return [r[col] for r in reader]
elif row is not None:
return _get_values(reader[row])
return [_get_values(r) for r in reader]
def _read_metric_hxsv(fd, hxsv_path, delimiter):
indices = hxsv_path.split(",")
row = indices[0]
row = int(row) if row else None
col = indices[1] if len(indices) > 1 and indices[1] else None
reader = list(csv.DictReader(fd, delimiter=builtin_str(delimiter)))
return _do_read_metric_xsv(reader, row, col)
def _read_metric_xsv(fd, xsv_path, delimiter):
indices = xsv_path.split(",")
row = indices[0]
row = int(row) if row else None
col = int(indices[1]) if len(indices) > 1 and indices[1] else None
reader = list(csv.reader(fd, delimiter=builtin_str(delimiter)))
return _do_read_metric_xsv(reader, row, col)
def _read_typed_metric(typ, xpath, fd):
if typ == "json":
ret = _read_metric_json(fd, xpath)
elif typ == "csv":
ret = _read_metric_xsv(fd, xpath, ",")
elif typ == "tsv":
ret = _read_metric_xsv(fd, xpath, "\t")
elif typ == "hcsv":
ret = _read_metric_hxsv(fd, xpath, ",")
elif typ == "htsv":
ret = _read_metric_hxsv(fd, xpath, "\t")
else:
ret = fd.read().strip()
return ret
def _format_csv(content, delimiter):
"""Format delimited text to have same column width.
Args:
content (str): The content of a metric.
delimiter (str): Value separator
Returns:
str: Formatted content.
Example:
>>> content = (
"value_mse,deviation_mse,data_set\n"
"0.421601,0.173461,train\n"
"0.67528,0.289545,testing\n"
"0.671502,0.297848,validation\n"
)
>>> _format_csv(content, ",")
"value_mse deviation_mse data_set\n"
"0.421601 0.173461 train\n"
"0.67528 0.289545 testing\n"
"0.671502 0.297848 validation\n"
"""
reader = csv_reader(StringIO(content), delimiter=builtin_str(delimiter))
rows = [row for row in reader]
max_widths = [max(map(len, column)) for column in zip(*rows)]
lines = [
" ".join(
"{entry:{width}}".format(entry=entry, width=width + 2)
for entry, width in zip(row, max_widths)
)
for row in rows
]
return "\n".join(lines)
def _format_output(content, typ):
"""Tabularize the content according to its type.
Args:
content (str): The content of a metric.
typ (str): The type of metric -- (raw|json|tsv|htsv|csv|hcsv).
Returns:
str: Content in a raw or tabular format.
"""
if "csv" in str(typ):
return _format_csv(content, delimiter=",")
if "tsv" in str(typ):
return _format_csv(content, delimiter="\t")
return content
def _read_metric(fd, typ=None, xpath=None, rel_path=None, branch=None):
typ = typ.lower().strip() if typ else typ
try:
if xpath:
return _read_typed_metric(typ, xpath.strip(), fd)
else:
return _format_output(fd.read().strip(), typ)
# Json path library has to be replaced or wrapped in
# order to fix this too broad except clause.
except Exception:
logger.warning(
"unable to read metric in '{}' in branch '{}'".format(
rel_path, branch
),
parse_exception=True,
)
return None
def _collect_metrics(self, path, recursive, typ, xpath, branch):
"""Gather all the metric outputs.
Args:
path (str): Path to a metric file or a directory.
recursive (bool): If path is a directory, do a recursive search for
metrics on the given path.
typ (str): The type of metric to search for, could be one of the
following (raw|json|tsv|htsv|csv|hcsv).
xpath (str): Path to search for.
branch (str): Branch to look up for metrics.
Returns:
list(tuple): (output, typ, xpath)
- output:
- typ:
- xpath:
"""
outs = [out for stage in self.stages() for out in stage.outs]
if path:
try:
outs = self.find_outs_by_path(path, outs=outs, recursive=recursive)
except OutputNotFoundError:
logger.debug(
"stage file not for found for '{}' in branch '{}'".format(
path, branch
)
)
return []
res = []
for o in outs:
if not o.metric:
continue
if not typ and isinstance(o.metric, dict):
t = o.metric.get(o.PARAM_METRIC_TYPE, typ)
x = o.metric.get(o.PARAM_METRIC_XPATH, xpath)
else:
t = typ
x = xpath
res.append((o, t, x))
return res
def _read_metrics_filesystem(path, typ, xpath, rel_path, branch):
if not os.path.exists(path):
return None
with open(path, "r") as fd:
return _read_metric(
fd, typ=typ, xpath=xpath, rel_path=rel_path, branch=branch
)
def _read_metrics(self, metrics, branch):
"""Read the content of each metric file and format it.
Args:
metrics (list): List of metric touples
branch (str): Branch to look up for metrics.
Returns:
A dict mapping keys with metrics path name and content.
For example:
{'metric.csv': ("value_mse deviation_mse data_set\n"
"0.421601 0.173461 train\n"
"0.67528 0.289545 testing\n"
"0.671502 0.297848 validation\n")}
"""
res = {}
for out, typ, xpath in metrics:
assert out.scheme == "local"
if not typ:
typ = os.path.splitext(out.path.lower())[1].replace(".", "")
if out.use_cache:
metric = _read_metrics_filesystem(
self.cache.local.get(out.checksum),
typ=typ,
xpath=xpath,
rel_path=out.rel_path,
branch=branch,
)
else:
fd = self.tree.open(out.path)
metric = _read_metric(
fd, typ=typ, xpath=xpath, rel_path=out.rel_path, branch=branch
)
if not metric:
continue
res[out.rel_path] = metric
return res
def show(
self,
path=None,
typ=None,
xpath=None,
all_branches=False,
all_tags=False,
recursive=False,
):
res = {}
for branch in self.brancher(all_branches=all_branches, all_tags=all_tags):
entries = _collect_metrics(self, path, recursive, typ, xpath, branch)
metrics = _read_metrics(self, entries, branch)
if metrics:
res[branch] = metrics
if not res:
if path:
raise BadMetricError(path)
raise NoMetricsError()
return res
|
dataversioncontrol/dvc
|
dvc/repo/metrics/show.py
|
Python
|
apache-2.0
| 7,590
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilentBaseESG import *
class agilentE4425B(agilentBaseESG):
"Agilent E4425B ESG-AP IVI RF signal generator driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'ESG-A4000B')
super(agilentE4425B, self).__init__(*args, **kwargs)
self._frequency_low = 250e3
self._frequency_high = 3e9
|
Diti24/python-ivi
|
ivi/agilent/agilentE4425B.py
|
Python
|
mit
| 1,494
|
"""playbook URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^situation/', include('situation.urls')),
]
|
sir-code-a-lot/playbook
|
playbook/urls.py
|
Python
|
mit
| 826
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Indexed slices."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import warnings
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_conversion_registry
from tensorflow.python.framework import tensor_like
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import type_spec
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
# Use LazyLoader to avoid circular dependencies.
#
# Note: these can all be changed to regular imports once all code has been
# updated to refer the symbols defined in this module directly, rather than
# using the backwards-compatible aliases in ops.py. (E.g.,
# "indexed_slices.IndexedSlices" rather than "ops.IndexedSlices".)
math_ops = LazyLoader(
"math_ops", globals(),
"tensorflow.python.ops.math_ops")
ops = LazyLoader(
"ops", globals(), "tensorflow.python.framework.ops")
tensor_spec = LazyLoader(
"tensor_spec", globals(),
"tensorflow.python.framework.tensor_spec")
tensor_util = LazyLoader(
"tensor_util", globals(),
"tensorflow.python.framework.tensor_util")
# pylint: disable=protected-access
_TensorLike = tensor_like._TensorLike
# pylint: enable=protected-access
@tf_export("IndexedSlices")
class IndexedSlices(_TensorLike, composite_tensor.CompositeTensor):
"""A sparse representation of a set of tensor slices at given indices.
This class is a simple wrapper for a pair of `Tensor` objects:
* `values`: A `Tensor` of any dtype with shape `[D0, D1, ..., Dn]`.
* `indices`: A 1-D integer `Tensor` with shape `[D0]`.
An `IndexedSlices` is typically used to represent a subset of a larger
tensor `dense` of shape `[LARGE0, D1, .. , DN]` where `LARGE0 >> D0`.
The values in `indices` are the indices in the first dimension of
the slices that have been extracted from the larger tensor.
The dense tensor `dense` represented by an `IndexedSlices` `slices` has
```python
dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...]
```
The `IndexedSlices` class is used principally in the definition of
gradients for operations that have sparse gradients
(e.g. `tf.gather`).
Contrast this representation with
`tf.SparseTensor`,
which uses multi-dimensional indices and scalar values.
"""
def __init__(self, values, indices, dense_shape=None):
"""Creates an `IndexedSlices`."""
ops._get_graph_from_inputs([values, indices, dense_shape]) # pylint: disable=protected-access
self._values = values
self._indices = indices
self._dense_shape = dense_shape
@property
def values(self):
"""A `Tensor` containing the values of the slices."""
return self._values
@property
def indices(self):
"""A 1-D `Tensor` containing the indices of the slices."""
return self._indices
@property
def dense_shape(self):
"""A 1-D `Tensor` containing the shape of the corresponding dense tensor."""
return self._dense_shape
@property
def name(self):
"""The name of this `IndexedSlices`."""
return self.values.name
@property
def device(self):
"""The name of the device on which `values` will be produced, or `None`."""
return self.values.device
@property
def op(self):
"""The `Operation` that produces `values` as an output."""
return self.values.op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self.values.dtype
@property
def graph(self):
"""The `Graph` that contains the values, indices, and shape tensors."""
return self._values.graph
def __str__(self):
return "IndexedSlices(indices=%s, values=%s%s)" % (
self._indices, self._values,
(", dense_shape=%s" %
self._dense_shape) if self._dense_shape is not None else "")
def __neg__(self):
return IndexedSlices(-self.values, self.indices, self.dense_shape)
@property
def _type_spec(self):
indices_shape = self._indices.shape.merge_with(self._values.shape[:1])
dense_shape = tensor_shape.TensorShape([None]).concatenate(
self._values.shape[1:])
if self._dense_shape is not None:
dense_shape_dtype = self._dense_shape.dtype
dense_shape = dense_shape.merge_with(
tensor_util.constant_value_as_shape(self._dense_shape))
else:
dense_shape_dtype = None
return IndexedSlicesSpec(dense_shape, self.dtype, self._indices.dtype,
dense_shape_dtype, indices_shape)
def _shape_invariant_to_type_spec(self, shape):
# From tf.while_loop docs: "If a loop variable is an IndexedSlices, the
# shape invariant must be a shape invariant of the values tensor of the
# IndexedSlices. It means the shapes of the three tensors of the
# IndexedSlices are (shape, [shape[0]], [shape.ndims])."
indices_shape = shape[:1]
dense_shape = tensor_shape.TensorShape([None]).concatenate(shape[1:])
if self._dense_shape is None:
dense_shape_dtype = None
else:
dense_shape_dtype = self._dense_shape.dtype
return IndexedSlicesSpec(dense_shape, self.dtype, self._indices.dtype,
dense_shape_dtype, indices_shape)
def consumers(self):
return self._consumers()
IndexedSlicesValue = collections.namedtuple(
"IndexedSlicesValue", ["values", "indices", "dense_shape"])
@tf_export("IndexedSlicesSpec")
class IndexedSlicesSpec(type_spec.TypeSpec):
"""Type specification for a `tf.IndexedSlices`."""
__slots__ = ["_shape", "_values_dtype", "_indices_dtype",
"_dense_shape_dtype", "_indices_shape"]
value_type = property(lambda self: IndexedSlices)
def __init__(self, shape=None, dtype=dtypes.float32,
indices_dtype=dtypes.int64, dense_shape_dtype=None,
indices_shape=None):
"""Constructs a type specification for a `tf.IndexedSlices`.
Args:
shape: The dense shape of the `IndexedSlices`, or `None` to allow any
dense shape.
dtype: `tf.DType` of values in the `IndexedSlices`.
indices_dtype: `tf.DType` of the `indices` in the `IndexedSlices`. One
of `tf.int32` or `tf.int64`.
dense_shape_dtype: `tf.DType` of the `dense_shape` in the `IndexedSlices`.
One of `tf.int32`, `tf.int64`, or `None` (if the `IndexedSlices` has
no `dense_shape` tensor).
indices_shape: The shape of the `indices` component, which indicates
how many slices are in the `IndexedSlices`.
"""
self._shape = tensor_shape.as_shape(shape)
self._values_dtype = dtypes.as_dtype(dtype)
self._indices_dtype = dtypes.as_dtype(indices_dtype)
if dense_shape_dtype is None:
self._dense_shape_dtype = None
else:
self._dense_shape_dtype = dtypes.as_dtype(dense_shape_dtype)
self._indices_shape = tensor_shape.as_shape(indices_shape).with_rank(1)
def _serialize(self):
return (self._shape, self._values_dtype, self._indices_dtype,
self._dense_shape_dtype, self._indices_shape)
@property
def _component_specs(self):
value_shape = self._indices_shape.concatenate(self._shape[1:])
specs = [
tensor_spec.TensorSpec(value_shape, self._values_dtype),
tensor_spec.TensorSpec(self._indices_shape, self._indices_dtype)]
if self._dense_shape_dtype is not None:
specs.append(
tensor_spec.TensorSpec([self._shape.ndims], self._dense_shape_dtype))
return tuple(specs)
def _to_components(self, value):
if value.dense_shape is None:
return (value.values, value.indices)
else:
return (value.values, value.indices, value.dense_shape)
def _from_components(self, tensor_list):
if (all(isinstance(t, np.ndarray) for t in tensor_list) and
not tf2.enabled()):
if len(tensor_list) == 2:
return IndexedSlicesValue(tensor_list[0], tensor_list[1], None)
else:
return IndexedSlicesValue(*tensor_list)
else:
return IndexedSlices(*tensor_list)
@tf_export(v1=["convert_to_tensor_or_indexed_slices"])
def convert_to_tensor_or_indexed_slices(value, dtype=None, name=None):
"""Converts the given object to a `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
Returns:
A `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
return internal_convert_to_tensor_or_indexed_slices(
value=value, dtype=dtype, name=name, as_ref=False)
def internal_convert_to_tensor_or_indexed_slices(value,
dtype=None,
name=None,
as_ref=False):
"""Converts the given object to a `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, ops.EagerTensor) and not context.executing_eagerly():
return ops.internal_convert_to_tensor(
value, dtype=dtype, name=name, as_ref=as_ref)
elif isinstance(value, _TensorLike):
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return ops.internal_convert_to_tensor(
value, dtype=dtype, name=name, as_ref=as_ref)
def internal_convert_n_to_tensor_or_indexed_slices(values,
dtype=None,
name=None,
as_ref=False):
"""Converts `values` to a list of `Tensor` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A list of `Tensor`, `IndexedSlices`, `SparseTensor` and/or `None` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections.Sequence):
raise TypeError("values must be a sequence.")
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor_or_indexed_slices(
value, dtype=dtype, name=n, as_ref=as_ref))
return ret
def convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None):
"""Converts `values` to a list of `Output` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
Returns:
A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor_or_indexed_slices(
values=values, dtype=dtype, name=name, as_ref=False)
# Warn the user if we convert a sparse representation to dense with at
# least this number of elements.
_LARGE_SPARSE_NUM_ELEMENTS = 100000000
def _indexed_slices_to_tensor(value, dtype=None, name=None, as_ref=False):
"""Converts an IndexedSlices object `value` to a Tensor.
NOTE(mrry): This function is potentially expensive.
Args:
value: An ops.IndexedSlices object.
dtype: The dtype of the Tensor to be returned.
name: Optional name to use for the returned Tensor.
as_ref: True if a ref is requested.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
_ = as_ref
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for IndexedSlices with dtype %s" %
(dtype.name, value.dtype.name))
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(value))
# TODO(mrry): Consider adding static shape information to
# IndexedSlices, to avoid using numpy here.
if not context.executing_eagerly():
dense_shape_value = tensor_util.constant_value(value.dense_shape)
if dense_shape_value is not None:
num_elements = np.prod(dense_shape_value)
if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor with %d "
"elements. This may consume a large amount of memory." %
num_elements)
else:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
"This may consume a large amount of memory.")
return math_ops.unsorted_segment_sum(
value.values, value.indices, value.dense_shape[0], name=name)
tensor_conversion_registry.register_tensor_conversion_function(
IndexedSlices, _indexed_slices_to_tensor)
|
chemelnucfin/tensorflow
|
tensorflow/python/framework/indexed_slices.py
|
Python
|
apache-2.0
| 16,012
|
"""Support for OpenTherm Gateway sensors."""
import logging
from homeassistant.components.sensor import ENTITY_ID_FORMAT
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity, async_generate_entity_id
from .const import DATA_GATEWAYS, DATA_OPENTHERM_GW, SENSOR_INFO
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the OpenTherm Gateway sensors."""
if discovery_info is None:
return
gw_dev = hass.data[DATA_OPENTHERM_GW][DATA_GATEWAYS][discovery_info]
sensors = []
for var, info in SENSOR_INFO.items():
device_class = info[0]
unit = info[1]
friendly_name_format = info[2]
sensors.append(
OpenThermSensor(gw_dev, var, device_class, unit, friendly_name_format)
)
async_add_entities(sensors)
class OpenThermSensor(Entity):
"""Representation of an OpenTherm Gateway sensor."""
def __init__(self, gw_dev, var, device_class, unit, friendly_name_format):
"""Initialize the OpenTherm Gateway sensor."""
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, "{}_{}".format(var, gw_dev.gw_id), hass=gw_dev.hass
)
self._gateway = gw_dev
self._var = var
self._value = None
self._device_class = device_class
self._unit = unit
self._friendly_name = friendly_name_format.format(gw_dev.name)
async def async_added_to_hass(self):
"""Subscribe to updates from the component."""
_LOGGER.debug("Added OpenTherm Gateway sensor %s", self._friendly_name)
async_dispatcher_connect(
self.hass, self._gateway.update_signal, self.receive_report
)
@callback
def receive_report(self, status):
"""Handle status updates from the component."""
value = status.get(self._var)
if isinstance(value, float):
value = "{:2.1f}".format(value)
self._value = value
self.async_schedule_update_ha_state()
@property
def name(self):
"""Return the friendly name of the sensor."""
return self._friendly_name
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def state(self):
"""Return the state of the device."""
return self._value
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
@property
def should_poll(self):
"""Return False because entity pushes its state."""
return False
|
fbradyirl/home-assistant
|
homeassistant/components/opentherm_gw/sensor.py
|
Python
|
apache-2.0
| 2,747
|
# Sample code implementing LeNet-5 from Liu Liu
import tensorflow as tf
import numpy as np
import time
import h5py
# import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import itertools
from copy import deepcopy
import os
import os.path
# from tensorflow.examples.tutorials.mnist import input_data
# mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
class cnnMNIST(object):
def __init__(self):
self.lr = 1e-3
self.epochs = 100
self.runname = 'cnnseqdetandsid_{}'.format(self.epochs)
self.build_graph()
def onehot_labels(self, labels):
out = np.zeros((len(labels), 7))
for i in range(len(labels)):
out[i, :] = np.eye(7)[int(labels[i])]
return out
def onenothot_labels(self, labels):
out = np.zeros((labels.shape[0],))
for i in range(labels.shape[0]):
out[i] = np.argmax(labels[i, :])
return out
def get_data(self):
# data_norm = True
# data_augmentation = False
try:
f = h5py.File('./sequential_dataset_balanced.h5', 'r')
except:
f = h5py.File('/home/holiestcow/Documents/2017_fall/ne697_hayward/lecture/datacompetition/sequential_dataset_balanced.h5', 'r')
X = f['train']
X_test = f['test']
self.x_train = X
self.x_test = X_test
# NOTE: always use the keylist to get data
self.data_keylist = list(X.keys())
return
def batch(self, iterable, n=1, shuffle=True, small_test=True, usethesekeys = None, shortset=False):
if shuffle:
self.shuffle()
if usethesekeys is None:
keylist = self.data_keylist
else:
keylist = usethesekeys
if shortset:
keylist = usethesekeys[:100]
# l = len(iterable)
for i in range(len(keylist)):
x = np.array(iterable[keylist[i]]['measured_spectra'])
y = np.array(iterable[keylist[i]]['labels'])
# NOTE: For using cnnfeatures sequential dataset
# x = np.array(iterable[keylist[i]]['features'])
# y = np.array(iterable[keylist[i]]['labels'])
mask = y >= 0.5
# y[mask] = 1
z = np.ones((y.shape[0],))
# z[mask] = 6.0
y = self.onehot_labels(y)
self.current_batch_length = x.shape[0]
yield x, y, z
# for j in range(self.current_batch_length):
# stuff = y[j,:]
# stuff = stuff.reshape((1, 7))
# yield x[j, :], stuff, z[j]
def validation_batcher(self):
try:
f = h5py.File('sequential_dataset_balanced.h5', 'r')
except:
f = h5py.File('/home/holiestcow/Documents/2017_fall/ne697_hayward/lecture/datacompetition/sequential_dataset_balanced.h5', 'r')
g = f['validate']
samplelist = list(g.keys())
# samplelist = samplelist[:10]
for i in range(len(samplelist)):
self.current_sample_name = samplelist[i]
data = np.array(g[samplelist[i]])
self.current_batch_length = data.shape[0]
yield data
# for j in range(self.current_batch_length):
# current_x = np.squeeze(data[j, :, :])
# yield current_x
def build_graph(self):
feature_map1 = 32
feature_map2 = 64
final_hidden_nodes = 1024
self.x = tf.placeholder(tf.float32, shape=[None, 15, 1024])
self.y_ = tf.placeholder(tf.float32, shape=[None, 7])
x_image = self.hack_1dreshape(self.x)
# define conv-layer variables
# Try 17, 3??
print(x_image)
W_conv1 = self.weight_variable([1, 9, 1, feature_map2])
b_conv1 = self.bias_variable([feature_map2])
W_conv2 = self.weight_variable([3, 1, feature_map2, feature_map1])
b_conv2 = self.bias_variable([feature_map1])
# x_image = tf.reshape(self.x, [-1, 28, 28, 1])
h_conv1 = tf.nn.relu(self.conv2d(x_image, W_conv1) + b_conv1)
# h_pool1 = self.max_pool_2x2(h_conv1)
h_pool1 = self.max_pool_spectra(h_conv1)
h_conv2 = tf.nn.relu(self.conv2d(h_pool1, W_conv2) + b_conv2)
# h_pool2 = self.max_pool_2x2(h_conv2)
h_pool2 = self.max_pool_time(h_conv2)
# W_conv3 = self.weight_variable([1, 3, feature_map2, feature_map3])
# b_conv3 = self.bias_variable([feature_map3])
# W_conv4 = self.weight_variable([1, 3, feature_map3, feature_map4])
# b_conv4 = self.bias_variable([feature_map4])
# h_conv3 = tf.nn.relu(self.conv2d(h_pool2, W_conv3) + b_conv3)
# h_pool3 = self.max_pool_2x2(h_conv3)
# h_conv4 = tf.nn.relu(self.conv2d(h_pool3, W_conv4) + b_conv4)
# h_pool4 = self.max_pool_2x2(h_conv4)
# densely/fully connected layer
print(h_pool2.shape)
sizing_variable = 512 * 8
W_fc1 = self.weight_variable([sizing_variable * feature_map1, final_hidden_nodes])
b_fc1 = self.bias_variable([final_hidden_nodes])
h_pool2_flat = tf.reshape(h_pool2, [-1, sizing_variable * feature_map1])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# W_fc1 = self.weight_variable([64 * feature_map4, final_hidden_nodes])
# b_fc1 = self.bias_variable([final_hidden_nodes])
# h_pool4_flat = tf.reshape(h_pool4, [-1, 64 * feature_map4])
# h_fc1 = tf.nn.relu(tf.matmul(h_pool4_flat, W_fc1) + b_fc1)
# dropout regularization
self.keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob)
# linear classifier
W_fc2 = self.weight_variable([final_hidden_nodes, 7])
b_fc2 = self.bias_variable([7])
self.y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# Now I have to weight to logits
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y_, logits=self.y_conv))
self.train_step = tf.train.AdamOptimizer(self.lr).minimize(self.loss)
def shuffle(self):
# rng_state = np.random.get_state()
# np.random.set_state(rng_state)
np.random.shuffle(self.data_keylist)
# np.random.set_state(rng_state)
# np.random.shuffle(self.y_train)
return
def train(self):
self.sess = tf.Session()
init = tf.global_variables_initializer()
self.sess.run(init)
self.eval() # creating evaluation
a = time.time()
for i in range(self.epochs):
if i % 10 == 0 and i != 0:
counter = 0
sum_acc = 0
sum_loss = 0
hits = 0
meh = 0
x_generator_test = self.batch(self.x_test,
usethesekeys=list(self.x_test.keys()), shortset=True)
for j, k, z in x_generator_test:
# NOTE: quick and dirty preprocessing once again
# feedme = j / j.sum(axis=-1, keepdims=True)
feedme = j
accuracy, train_loss, prediction = self.sess.run(
[self.accuracy, self.loss, self.y_conv],
feed_dict={self.x: feedme,
self.y_: k,
self.keep_prob: 1.0})
# self.weights: z})
sum_loss += np.sum(train_loss)
hits += np.sum(prediction)
sum_acc += accuracy
counter += feedme.shape[0]
meh += 1
b = time.time()
print('step {}:\navg acc {}\navg loss {}\ntotalhits {}\ntime elapsed: {} s'.format(i, sum_acc / meh, sum_loss / counter, hits, b-a))
x_generator = self.batch(self.x_train, n=128)
current_x, current_y, current_z = next(x_generator)
self.sess.run([self.train_step], feed_dict={self.x: current_x,
self.y_: current_y,
self.keep_prob: 0.50})
# self.shuffle()
def eval(self):
# self.time_index = np.arange(self.y_conv.get_shape()[0])
self.prediction = tf.argmax(self.y_conv, 1)
truth = tf.argmax(self.y_, 1)
correct_prediction = tf.equal(self.prediction, truth)
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def test_eval(self):
self.eval()
x_generator = self.batch(self.x_test, n=100, shuffle=False)
y_generator = self.batch(self.y_test, n=100, shuffle=False)
test_acc = []
counter = 0
for data in x_generator:
test_acc += [self.sess.run(self.accuracy, feed_dict={
self.x: data, self.y_: next(y_generator), self.keep_prob: 1.0})]
total_test_acc = sum(test_acc) / float(len(test_acc))
print('test accuracy %g' % total_test_acc)
def weight_variable(self, shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(self, shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def hack_1dreshape(self, x):
# expand its dimensionality to fit into conv2d
# tensor_expand = tf.expand_dims(x, 1)
tensor_expand = tf.expand_dims(x, -1)
return tensor_expand
def conv2d(self, x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
# def max_pool_2x2(self, x):
# return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
# strides=[1, 2, 2, 1], padding='SAME')
def max_pool_spectra(self, x):
return tf.nn.max_pool(x, ksize=[1, 1, 2, 1],
strides=[1, 1, 2, 1], padding='SAME')
def max_pool_time(self, x):
return tf.nn.max_pool(x, ksize=[1, 2, 1, 1],
strides=[1, 2, 1, 1], padding='SAME')
def get_label_predictions(self):
x_batcher = self.batch(self.x_test, n=1000, shuffle=False)
# y_batcher = self.batch(self.y_test, n=1000, shuffle=False)
predictions = np.zeros((0, 1))
for data in x_batcher:
temp_predictions = self.sess.run(
self.prediction,
feed_dict={self.x: data,
self.keep_prob: 1.0})
temp_predictions = temp_predictions.reshape((temp_predictions.shape[0], 1))
predictions = np.vstack((predictions, temp_predictions))
return predictions
def main():
cnn = cnnMNIST()
a = time.time()
print('Retrieving data')
cnn.get_data()
b = time.time()
print('Built the data in {} s'.format(b-a))
validation_data = cnn.validation_batcher()
a = time.time()
cnn.train()
b = time.time()
print('Training time: {} s'.format(b-a))
# cnn.test_eval()
predictions = cnn.get_label_predictions()
predictions_decode = predictions
labels_decode = cnn.onenothot_labels(cnn.y_test)
np.save('{}_predictions.npy'.format(cnn.runname), predictions_decode)
np.save('{}_ground_truth.npy'.format(cnn.runname), labels_decode)
answers = open('approach1c_answers.csv', 'w')
answers.write('RunID,SourceID,SourceTime,Comment\n')
# counter = 0
for sample in validation_data:
x = np.array(sample['spectra'])
x = x[30:, :]
predictions = cnn.sess.run(
cnn.prediction,
feed_dict = {cnn.x: x,
cnn.keep_prob: 1.0})
time_index = np.arange(predictions.shape[0])
mask = predictions >= 0.5
runname = sample.name.split('/')[-1]
if np.sum(mask) != 0:
counts = np.sum(x, axis=1)
# fig = plt.figure()
t = time_index[mask]
t = [int(i) for i in t]
index_guess = np.argmax(counts[t])
current_predictions = predictions[mask]
answers.write('{},{},{},\n'.format(
runname, current_predictions[index_guess], t[index_guess] + 30))
else:
answers.write('{},{},{},\n'.format(
runname, 0, 0))
answers.close()
return
main()
|
HoliestCow/ece692_deeplearning
|
project5/cnn/cnnseqDETandSID.py
|
Python
|
mit
| 12,377
|
__author__ = 'Cheng'
|
TejasM/wisely
|
wisely_project/studyroom/__init__.py
|
Python
|
mit
| 21
|
from flask import Blueprint, render_template
from TrainTime import TrainTime
traintime_blueprint = Blueprint('traintime_blueprint',__name__)
@traintime_blueprint.route('/traintime/')
def traintime_index():
return render_template('traintime_index.html',stations = list(TrainTime.getAvailableStations())+list(TrainTime.getAvailableBwStations()))
@traintime_blueprint.route('/traintime/<string:station>')
def traintime(station):
if '-' in station:
trains,timestamp = TrainTime.getTrainsBwStations(station)
if trains is None:
return traintime_blueprint.send_static_file('traintime_error.html')
return render_template('traintime_bw_stations.html',
station = station, trains = trains,timestamp = timestamp)
else:
trains,timestamp = TrainTime.getTrainsFromStation(station)
if trains is None:
return traintime_blueprint.send_static_file('traintime_error.html')
return render_template('traintime_from_station.html',
trains = trains,station = station, timestamp = timestamp)
|
farseenabdulsalam/wap-proxy
|
TrainTimeBlueprint.py
|
Python
|
mit
| 1,108
|
#!/usr/bin/env python
from camera import ConfigurableCamera
from settings import Job, IMAGES_DIRECTORY
def main():
job = Job()
if job.exists():
file_prefix = job.settings.file_prefix
output_file = IMAGES_DIRECTORY + '/' + file_prefix + '_{counter:03d}.jpg'
with ConfigurableCamera(job=job) as camera:
camera.time_lapse(output_file)
if __name__ == '__main__':
while True:
main()
|
projectweekend/Pi-Camera-Time-Lapse
|
time_lapse.py
|
Python
|
mit
| 440
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
if __name__ == "__main__":
from build import *
addroot()
import pytools.build as b
b.build()
b.run('qtfract')
|
rboman/progs
|
apps/fractal/cpp_qt/run.py
|
Python
|
apache-2.0
| 179
|
import pytest
import string
import numpy as np
from numpy.random import random, randn
from numpy import allclose, empty, zeros, zeros_like, pi, array, int, all, float64
from numpy.fft import fftfreq
from mpi4py import MPI
from mpiFFT4py.pencil import R2C as Pencil_R2C
from mpiFFT4py.slab import R2C as Slab_R2C
from mpiFFT4py.line import R2C as Line_R2C
from mpiFFT4py import rfft2, rfftn, irfftn, irfft2, fftn, ifftn, irfft, ifft
from mpiFFT4py.slab import C2C
def reset_profile(prof):
prof.code_map = {}
prof.last_time = {}
prof.enable_count = 0
for func in prof.functions:
prof.add_function(func)
N = 2**5
L = array([2*pi, 2*pi, 2*pi])
ks = (fftfreq(N)*N).astype(int)
comm = MPI.COMM_WORLD
if comm.Get_size() >= 4:
params = ("slabas", "slabad", "slabws", "slabwd",
"pencilsys", "pencilsyd", "pencilnys", "pencilnyd",
"pencilsxd", "pencilsxs", "pencilnxd", "pencilnxs",
"pencilaxd", "pencilaxs", "pencilayd", "pencilays")
else:
params = ("slabas", "slabad", "slabws", "slabwd")
@pytest.fixture(params=params, scope='module')
def FFT(request):
prec = {"s": "single", "d":"double"}[request.param[-1]]
if request.param[:3] == "pen":
communication = {"s": "Alltoall", "n": "AlltoallN", "a": "Alltoallw"}[request.param[-3]]
alignment = request.param[-2].upper()
return Pencil_R2C(array([N, 2*N, 4*N]), L, comm, prec, communication=communication, alignment=alignment)
else:
communication = 'Alltoall' if request.param[-2] == 'a' else 'Alltoallw'
return Slab_R2C(array([N, 2*N, 4*N]), L, comm, prec, communication=communication)
@pytest.fixture(params=("lines", "lined"), scope='module')
def FFT2(request):
prec = {"s": "single", "d":"double"}[request.param[-1]]
return Line_R2C(array([N, 2*N]), L[:-1], comm, prec)
@pytest.fixture(params=("slabd", "slabs"), scope='module')
def FFT_C2C(request):
prec = {"s": "single", "d":"double"}[request.param[-1]]
return C2C(array([N, 2*N, 4*N]), L, comm, prec)
#@profile
def test_FFT(FFT):
N = FFT.N
if FFT.rank == 0:
A = random(N).astype(FFT.float)
if FFT.communication == 'AlltoallN':
C = empty(FFT.global_complex_shape(), dtype=FFT.complex)
C = rfftn(A, C, axes=(0,1,2))
C[:, :, -1] = 0 # Remove Nyquist frequency
A = irfftn(C, A, axes=(0,1,2))
B2 = zeros(FFT.global_complex_shape(), dtype=FFT.complex)
B2 = rfftn(A, B2, axes=(0,1,2))
else:
A = zeros(N, dtype=FFT.float)
B2 = zeros(FFT.global_complex_shape(), dtype=FFT.complex)
atol, rtol = (1e-10, 1e-8) if FFT.float is float64 else (5e-7, 1e-4)
FFT.comm.Bcast(A, root=0)
FFT.comm.Bcast(B2, root=0)
a = zeros(FFT.real_shape(), dtype=FFT.float)
c = zeros(FFT.complex_shape(), dtype=FFT.complex)
a[:] = A[FFT.real_local_slice()]
c = FFT.fftn(a, c)
#print abs((c - B2[FFT.complex_local_slice()])/c.max()).max()
assert all(abs((c - B2[FFT.complex_local_slice()])/c.max()) < rtol)
#assert allclose(c, B2[FFT.complex_local_slice()], rtol, atol)
a = FFT.ifftn(c, a)
#print abs((a - A[FFT.real_local_slice()])/a.max()).max()
assert all(abs((a - A[FFT.real_local_slice()])/a.max()) < rtol)
#assert allclose(a, A[FFT.real_local_slice()], rtol, atol)
def test_FFT2(FFT2):
N = FFT2.N
if FFT2.rank == 0:
A = random(N).astype(FFT2.float)
else:
A = zeros(N, dtype=FFT2.float)
atol, rtol = (1e-10, 1e-8) if FFT2.float is float64 else (5e-7, 1e-4)
FFT2.comm.Bcast(A, root=0)
a = zeros(FFT2.real_shape(), dtype=FFT2.float)
c = zeros(FFT2.complex_shape(), dtype=FFT2.complex)
a[:] = A[FFT2.real_local_slice()]
c = FFT2.fft2(a, c)
B2 = zeros(FFT2.global_complex_shape(), dtype=FFT2.complex)
B2 = rfft2(A, B2, axes=(0,1))
assert allclose(c, B2[FFT2.complex_local_slice()], rtol, atol)
a = FFT2.ifft2(c, a)
assert allclose(a, A[FFT2.real_local_slice()], rtol, atol)
def test_FFT2_padded(FFT2):
FFT = FFT2
N = FFT.N
prec = "single" if isinstance(FFT.float, np.float32) else "double"
FFT_SELF = Line_R2C(N, FFT.L, MPI.COMM_SELF, prec)
if FFT.rank == 0:
A = random(N).astype(FFT.float)
C = zeros((FFT.global_complex_shape()), dtype=FFT.complex)
C = FFT_SELF.fft2(A, C)
# Eliminate Nyquist, otherwise test will fail
C[-N[0]//2] = 0
A_pad = np.zeros(FFT_SELF.real_shape_padded(), dtype=FFT.float)
A_pad = FFT_SELF.ifft2(C, A_pad, dealias="3/2-rule")
else:
C = zeros(FFT.global_complex_shape(), dtype=FFT.complex)
A_pad = zeros(FFT_SELF.real_shape_padded(), dtype=FFT.float)
FFT.comm.Bcast(C, root=0)
FFT.comm.Bcast(A_pad, root=0)
ae = zeros(FFT.real_shape_padded(), dtype=FFT.float)
c = zeros(FFT.complex_shape(), dtype=FFT.complex)
c[:] = C[FFT.complex_local_slice()]
ae[:] = A_pad[FFT.real_local_slice(padsize=1.5)]
ap = zeros(FFT.real_shape_padded(), dtype=FFT.float)
cp = zeros(FFT.complex_shape(), dtype=FFT.complex)
ap = FFT.ifft2(c, ap, dealias="3/2-rule")
atol, rtol = (1e-10, 1e-8) if FFT.float is float64 else (5e-7, 1e-4)
#from IPython import embed; embed()
#print np.linalg.norm(ap-ae)
assert allclose(ap, ae, rtol, atol)
cp = FFT.fft2(ap, cp, dealias="3/2-rule")
#print np.linalg.norm(abs((cp-c)/cp.max()))
assert all(abs((cp-c)/cp.max()) < rtol)
def test_FFT_padded(FFT):
N = FFT.N
prec = "single" if isinstance(FFT.float, np.float32) else "double"
FFT_SELF = Slab_R2C(FFT.N, L, MPI.COMM_SELF, prec,
communication=FFT.communication)
if FFT.rank == 0:
A = random(N).astype(FFT.float)
C = zeros((FFT.global_complex_shape()), dtype=FFT.complex)
C = FFT_SELF.fftn(A, C)
# Eliminate Nyquist, otherwise test will fail
#C[-N[0]//2] = 0
#C[:, -N[1]//2] = 0
if FFT.communication == 'AlltoallN':
C[:, :, -1] = 0 # Remove Nyquist frequency
A_pad = np.zeros(FFT_SELF.real_shape_padded(), dtype=FFT.float)
A_pad = FFT_SELF.ifftn(C, A_pad, dealias='3/2-rule')
else:
C = zeros(FFT.global_complex_shape(), dtype=FFT.complex)
A_pad = zeros(FFT_SELF.real_shape_padded(), dtype=FFT.float)
FFT.comm.Bcast(C, root=0)
FFT.comm.Bcast(A_pad, root=0)
ae = zeros(FFT.real_shape_padded(), dtype=FFT.float)
c = zeros(FFT.complex_shape(), dtype=FFT.complex)
c[:] = C[FFT.complex_local_slice()]
ae[:] = A_pad[FFT.real_local_slice(padsize=1.5)]
ap = zeros(FFT.real_shape_padded(), dtype=FFT.float)
cp = zeros(FFT.complex_shape(), dtype=FFT.complex)
ap = FFT.ifftn(c, ap, dealias="3/2-rule")
atol, rtol = (1e-10, 1e-8) if FFT.float is float64 else (5e-7, 1e-4)
#print np.linalg.norm(ap-ae)
assert allclose(ap, ae, rtol, atol)
cp = FFT.fftn(ap, cp, dealias="3/2-rule")
#from IPython import embed; embed()
#print np.linalg.norm(abs((cp-c)/cp.max()))
assert all(abs((cp-c)/cp.max()) < rtol)
#aa = zeros(FFT.real_shape(), dtype=FFT.float)
#aa = FFT.ifftn(cp, aa)
#a3 = A[FFT.real_local_slice()]
#assert allclose(aa, a3, rtol, atol)
def test_FFT_C2C(FFT_C2C):
"""Test both padded and unpadded transforms"""
FFT = FFT_C2C
N = FFT.N
atol, rtol = (1e-8, 1e-8) if FFT.float is float64 else (5e-7, 1e-4)
if FFT.rank == 0:
# Create a reference solution using only one CPU
A = (random(N)+random(N)*1j).astype(FFT.complex)
C = zeros((FFT.global_shape()), dtype=FFT.complex)
C = fftn(A, C, axes=(0,1,2))
# Copy to array padded with zeros
Cp = zeros((3*N[0]//2, 3*N[1]//2, 3*N[2]//2), dtype=FFT.complex)
ks = (fftfreq(N[2])*N[2]).astype(int)
Cp[:N[0]//2, :N[1]//2, ks] = C[:N[0]//2, :N[1]//2]
Cp[:N[0]//2, -N[1]//2:, ks] = C[:N[0]//2, N[1]//2:]
Cp[-N[0]//2:, :N[1]//2, ks] = C[N[0]//2:, :N[1]//2]
Cp[-N[0]//2:, -N[1]//2:, ks] = C[N[0]//2:, N[1]//2:]
# Get transform of padded array
Ap = zeros((3*N[0]//2, 3*N[1]//2, 3*N[2]//2), dtype=FFT.complex)
Ap = ifftn(Cp*1.5**3, Ap, axes=(0,1,2))
else:
C = zeros(FFT.global_shape(), dtype=FFT.complex)
Ap = zeros((3*N[0]//2, 3*N[1]//2, 3*N[2]//2), dtype=FFT.complex)
A = zeros(N, dtype=FFT.complex)
# For testing broadcast the arrays computed on root to all CPUs
FFT.comm.Bcast(C, root=0)
FFT.comm.Bcast(Ap, root=0)
FFT.comm.Bcast(A, root=0)
# Get the single processor solution on local part of the solution
ae = zeros(FFT.original_shape_padded(), dtype=FFT.complex)
ae[:] = Ap[FFT.original_local_slice(padsize=1.5)]
c = zeros(FFT.transformed_shape(), dtype=FFT.complex)
c[:] = C[FFT.transformed_local_slice()]
# Perform padded transform with MPI and assert ok
ap = zeros(FFT.original_shape_padded(), dtype=FFT.complex)
ap = FFT.ifftn(c, ap, dealias="3/2-rule")
assert allclose(ap, ae, rtol, atol)
# Perform truncated transform with MPI and assert
cp = zeros(FFT.transformed_shape(), dtype=FFT.complex)
cp = FFT.fftn(ap, cp, dealias="3/2-rule")
assert all(abs(cp-c)/cp.max() < rtol)
# Now without padding
# Transform back to original
aa = zeros(FFT.original_shape(), dtype=FFT.complex)
aa = FFT.ifftn(c, aa)
# Verify
a3 = A[FFT.original_local_slice()]
assert allclose(aa, a3, rtol, atol)
c2 = zeros(FFT.transformed_shape(), dtype=FFT.complex)
c2 = FFT.fftn(aa, c2)
# Verify
assert all(abs(c2-c)/c2.max() < rtol)
#assert allclose(c2, c, rtol, atol)
#import time
#t0 = time.time()
#test_FFT_padded(Pencil_R2C(array([N, N, N], dtype=int), L, MPI.COMM_WORLD, "double", alignment="Y", communication='Alltoall'))
#t1 = time.time()
#test_FFT_padded(Pencil_R2C(array([N, N, N], dtype=int), L, MPI, "double", alignment="X", communication='Alltoall'))
#t2 = time.time()
#ty = MPI.COMM_WORLD.reduce(t1-t0, op=MPI.MIN)
#tx = MPI.COMM_WORLD.reduce(t2-t1, op=MPI.MIN)
#if MPI.COMM_WORLD.Get_rank() == 0:
#print "Y: ", ty
#print "X: ", tx
#test_FFT(Slab_R2C(array([N, 2*N, 4*N]), L, MPI.COMM_WORLD, "double", communication='Alltoall'))
#test_FFT(Pencil_R2C(array([N, N, N], dtype=int), L, MPI.COMM_WORLD, "double", alignment="Y", communication='Alltoall'))
#test_FFT2(Line_R2C(array([N, N]), L[:-1], MPI, "single"))
#test_FFT2_padded(Line_R2C(array([N, N]), L[:-1], MPI, "double"))
#from collections import defaultdict
#FFT = Slab_R2C(array([N//4, N, N]), L, MPI.COMM_WORLD, "double", communication='Alltoallw', threads=2, planner_effort=defaultdict(lambda: "FFTW_MEASURE"))
#test_FFT_padded(FFT)
#reset_profile(profile)
#test_FFT_padded(FFT)
#test_FFT_padded(Pencil_R2C(array([N, N, N], dtype=int), L, MPI, "double", alignment="X", communication='AlltoallN'))
#test_FFT_C2C(C2C(array([N, N, N]), L, MPI, "double"))
|
spectralDNS/mpiFFT4py
|
tests/test_FFT.py
|
Python
|
lgpl-3.0
| 10,981
|
import pyrow
import json
import asyncio
from autobahn.asyncio.websocket import WebSocketServerProtocol
from autobahn.asyncio.websocket import WebSocketServerFactory
class MyServerProtocol(WebSocketServerProtocol):
def __init__(self):
self.is_open = False
def onOpen(self):
try:
self.is_open = True
yield from self.start_sending_rowing_info()
except:
print("fail")
def onClose(self, wasClean, code, reason):
print("closing time")
self.is_open = False
@asyncio.coroutine
def start_sending_rowing_info(self):
machines = list(pyrow.find())
if len(machines) > 0:
rowing_machine = machines[0]
erg = pyrow.pyrow(rowing_machine)
while self.is_open:
monitor = erg.get_monitor(forceplot=True)
message = json.dumps(monitor).encode('utf8')
try:
self.sendMessage(message, isBinary=False)
except:
print("couldn't send message")
yield from asyncio.sleep(2)
else:
print('No machines connected')
factory = WebSocketServerFactory()
factory.protocol = MyServerProtocol
loop = asyncio.get_event_loop()
coro = loop.create_server(factory, '0.0.0.0', 9000)
server = loop.run_until_complete(coro)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
server.close()
loop.close()
|
david-jarman/CookieRower
|
websocket_server.py
|
Python
|
bsd-2-clause
| 1,483
|
# coding: utf-8
from django import template
from django.db import models
from ref.models import ExtendedParameterDict
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter
def verbose_name(value):
return value._meta.verbose_name
@register.filter
def ksh_protect_and_quote(value):
if isinstance(value, bool) and value:
return "1"
elif isinstance(value, bool) and not value:
return "0"
if isinstance(value, int):
return value
if isinstance(value, ExtendedParameterDict):
return '"%s"' % value
if type(value).__name__ == 'ManyRelatedManager':
return '"' + ','.join([a.name for a in value.all()]) + '"'
if value is None:
return '""'
if isinstance(value, models.Model):
return '"%s"' % value.pk
res = ("%s" % value).replace('"', '\\"').replace('$', '\$')
return ('"%s"' % res)
@register.filter
def apply_field_template(component_instance, computed_field):
return computed_field.resolve(component_instance)
''' Returns (field_descr, field_value_or_None). Single pass method. Both lists must be sorted beforehand. '''
@register.filter
def project_ci_fields(descriptions, instances):
i = instances.__iter__()
n = next(i, None)
for field_descr in descriptions:
if n is not None and n.field_id == field_descr.pk:
yield (field_descr, n.value)
n = next(i, None)
else:
yield (field_descr, None)
@register.filter()
def urlify(value):
if (isinstance(value, str) or isinstance(value, unicode)) and value.startswith('http'):
if len(value.split('|')) == 2:
link = value.split('|')[1]
value = value.split('|')[0]
else:
link = 'cliquez ici'
return mark_safe(("<a href='%s'>%s</a>" % (value, link)))
elif (isinstance(value, str) or isinstance(value, unicode)) and value == 'True':
return mark_safe("<span class='glyphicon glyphicon-ok' aria-hidden='true'></span>")
elif (isinstance(value, str) or isinstance(value, unicode)) and value == 'False':
return mark_safe("<span class='glyphicon glyphicon-remove' aria-hidden='true'></span>")
elif value is None:
return ''
else:
return value
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
|
digitalfox/MAGE
|
ref/templatetags/filter.py
|
Python
|
apache-2.0
| 2,371
|
# Generated by Django 2.2.11 on 2020-10-12 14:18
from django.db import migrations, models
import huntserver.models
class Migration(migrations.Migration):
dependencies = [
('huntserver', '0061_puzzle_puzzle_file'),
]
operations = [
migrations.AlterField(
model_name='puzzle',
name='puzzle_file',
field=models.FileField(blank=True, storage=huntserver.models.OverwriteStorage(), upload_to=huntserver.models.get_puzzle_file_path),
),
]
|
dlareau/puzzlehunt_server
|
huntserver/migrations/0062_auto_20201012_1018.py
|
Python
|
mit
| 513
|
import re
import array
from seal.lib.aligner.mapping import Mapping
class SAMMapping(Mapping):
"""
A mapping implementation for storing SAM data.
A SAMMapping object is constructed from a list of SAM fields --
see http://samtools.sourceforge.net
"""
CIGAR_PATTERN = re.compile(r"(\d+)([MIDNSHP])")
REFID_PATTERN = re.compile(r"\d+$")
def __init__(self, sam_fields):
"""
Provide a sam record as a string (it will be split on tabs to get the
fields) or directly a list of sam fields.
"""
super(SAMMapping, self).__init__()
if type(sam_fields) == str:
sam_fields = sam_fields.split("\t")
self.__name = sam_fields[0]
self.flag = int(sam_fields[1])
ref_id_match = self.REFID_PATTERN.search(sam_fields[2]) # it's the best we can do without a sam header or the reference annotations
if ref_id_match is not None:
self.ref_id = int(ref_id_match.group())
self.tid = sam_fields[2]
self.pos = int(sam_fields[3])
self.qual = int(sam_fields[4])
self.__cigar = [(int(n), c) for (n, c) in self.CIGAR_PATTERN.findall(sam_fields[5])]
if sam_fields[6] == '*': # is this BWA-specific?
self.mtid = None
else:
self.mtid = sam_fields[6]
self.mpos = int(sam_fields[7])
self.isize = int(sam_fields[8])
self.__seq = sam_fields[9]
self.__ascii_base_qual = sam_fields[10]
self.__tags = [tuple(t.split(":", 2)) for t in sam_fields[11:]]
def get_name(self):
return self.__name
def get_seq_5(self):
return self.__seq
def get_base_qualities(self):
if not hasattr(self, '__base_qual'):
self.__base_qual = array.array(
'B', [ord(q) - 33 for q in self.__ascii_base_qual]
)
return self.__base_qual
def get_cigar(self):
return self.__cigar
def each_tag(self):
for t in self.__tags:
yield t
|
QwertyManiac/seal-cdh4
|
seal/lib/aligner/sam_mapping.py
|
Python
|
gpl-3.0
| 1,761
|
'''TextFinder - a widget for searching through a text widget
'''
import Tkinter as tk
import ttk
class TextFinder(ttk.Frame):
def __init__(self, parent, text_widget):
self.text = text_widget
ttk.Frame.__init__(self, parent)
validatecommand = (self.register(self._on_validate), "%P")
self._jobs = []
self._stringvar = tk.StringVar()
self._highlightvar = tk.IntVar()
self._highlightvar.set(1)
self.label = ttk.Label(self, text="Find:", anchor="e")
self.sep = ttk.Separator(self, orient="horizontal")
self.entry = ttk.Entry(self, textvariable=self._stringvar, exportselection=False,
validate="key", validatecommand=validatecommand)
self.next_button = ttk.Button(self, text="Find Next",
style="Toolbutton", command=self._on_next)
self.prev_button = ttk.Button(self, text="Find Previous",
style="Toolbutton", command=self._on_previous)
self.highlight_button = ttk.Checkbutton(self, text="Highlight",
onvalue = 1, offvalue=0,
variable=self._highlightvar,
command=self._on_highlight)
self.info_label = ttk.Label(self, anchor="e")
self.hide_button = ttk.Button(self, text="[x]",
style="Toolbutton", command=self._on_cancel)
self.sep.grid(row=0, column=0, columnspan=7,sticky="ew", pady=2)
self.label.grid(row=1, column=0, sticky="e")
self.entry.grid(row=1, column=1, sticky="ew")
self.next_button.grid(row=1, column=2)
self.prev_button.grid(row=1, column=3)
self.highlight_button.grid(row=1, column=4)
self.info_label.grid(row=1, column=5, sticky="nsew", padx=(0, 10))
self.hide_button.grid(row=1, column=6)
self.grid_columnconfigure(5, weight=1)
self.entry.bind("<Return>", self._on_next)
self.entry.bind("<Control-n>", self._on_next)
self.entry.bind("<Control-p>", self._on_previous)
self.entry.bind("<Control-g>", self._on_next)
self.entry.bind("<Escape>", self._on_cancel)
self.text.tag_configure("find", background="yellow", foreground="black")
self.text.tag_configure("current_find", background="bisque")
self.text.tag_raise("find")
self.text.tag_raise("current_find", "find")
self.text.tag_raise("current_find", "sel")
self.text.tag_configure("current_find",
background=self.text.tag_cget("sel", "background"),
foreground=self.text.tag_cget("sel", "foreground"))
def begin(self, pattern, start="insert", direction="forwards"):
'''Begin a new search'''
self.reset()
start = self.text.index(start)
if direction == "forwards":
range1 = (start, "end")
if self.text.compare(start, "!=", "1.0"):
range2 = ("1.0", start)
else:
range2 = ()
else:
range1 = (start, "1.0")
if self.text.compare(start, "<", "end"):
range2 = ("end", start)
else:
range2 = ()
# tkinter's text widget doesn't support the -all option for searching,
# so we'll have to directly interface with the tcl subsystem
command = [self.text._w, "search", "-nocase", "-all"]
if direction != "forwards":
command.append("-backwards")
command.append(pattern)
# search first from start to EOF, then from 1.0 to
# the starting point. Why? Most likely the user is
# wanting to find the next occurrance which is what
# this algorithm results in.
result1 = self.tk.call(tuple(command + ["insert linestart", "end"]))
result2 = self.tk.call(tuple(command + ["1.0", "insert lineend"]))
# why reverse result2? It represents results above the
# starting point, so if result1 is null, this guarantees
# that the first result is nearest the search start
result = list(result1) + list(reversed(result2))
if len(result) > 0:
i = result[0]
self._current_find(i, "%s + %sc" % (i, len(pattern)))
for index in result:
self.text.tag_add("find", index, "%s + %sc" % (index, len(pattern)))
def reset(self):
'''Reset the searching mechanism
This will stop any pending jobs and remove the special tags
'''
self.info_label.configure(text="")
for job in self._jobs:
self.after_cancel(job)
self._jobs = []
self.text.tag_remove("current_find", 1.0, "end")
self.text.tag_remove("find", 1.0, "end")
def _current_find(self, start, end):
self.text.tag_remove("current_find", 1.0, "end")
self.text.tag_add("current_find", start, end)
self.text.tag_remove("sel", 1.0, "end")
self.text.tag_add("sel", start, end)
self.text.mark_set("insert", start)
self.text.see("insert")
def next(self):
search_range = self.text.tag_nextrange("find", "insert")
# if the cursor is inside the range we just found, look a
# little further
if (len(search_range) == 2 and
self.text.compare("insert", "<=", search_range[1]) and
self.text.compare("insert", ">=", search_range[0])):
search_range = self.text.tag_nextrange("find", search_range[1])
# if the range is null, wrap around to the start of the widget
if len(search_range) == 0:
search_range = self.text.tag_nextrange("find", 1.0)
self.info_label.configure(text="find wrapped")
self.bell()
# if, after all that, the range is still null, well, blimey!
if len(search_range) == 0:
self.info_label.configure(text="pattern not found (3)")
self.bell()
else:
self._current_find(*search_range)
def previous(self):
search_range = self.text.tag_prevrange("find", "insert")
# if the cursor is inside the range we just found, look a
# little further
if (len(search_range) == 2 and
self.text.compare("insert", "<=", search_range[1]) and
self.text.compare("insert", ">=", search_range[0])):
search_range = self.text.tag_prevrange("find", search_range[0])
# if the range is null, wrap around to the end of the widget
if len(search_range) == 0:
search_range = self.text.tag_prevrange("find", "end")
self.info_label.configure(text="find wrapped")
self.bell()
# if, after all that, the range is still null, well, blimey!
if len(search_range) == 0:
self.info_label.configure(text="pattern not found (2)")
self.bell()
else:
self._current_find(*search_range)
def _on_cancel(self, event=None):
self.reset()
def _on_highlight(self, event=None):
if self._highlightvar.get():
self.text.tag_configure("find", background="yellow", foreground="black")
else:
self.text.tag_configure("find", background="", foreground="")
def _on_previous(self, event=None):
self.previous()
def _on_next(self, event=None):
self.next()
def _on_validate(self, P):
'''Called whenever the search string changes
This method will cancel any existing search that
is in progress, then start a new search if the passed
in string is not empty
This is called from the entry widget validation command, so
even though we're not validating anything per se, we still
need to return True or this method will stop being called.
'''
self.info_label.configure(text="")
self.reset()
if len(P) > 0:
self.begin(P)
return True
if __name__ == "__main__":
root = tk.Tk()
text = tk.Text(root)
finder = TextFinder(root, text)
text.pack(side="top", fill="both", expand=True)
finder.pack(side="bottom", fill="x")
text.insert("end",'''this is the first line
this is the second line
this is the third line''')
text.mark_set("insert", 1.0)
root.mainloop()
|
boakley/robotframework-workbench
|
rwb/widgets/textfinder.py
|
Python
|
apache-2.0
| 8,510
|
# Copyright 2011 Terena. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY TERENA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL TERENA OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of Terena.
import os
from setuptools import setup, find_packages
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
version = '0.2b3'
setup(name='django-vff',
version=version,
description=("Versioned file field for django models"),
long_description=(read('README.rst') + '\n\n' + read('CHANGES.rst')),
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',],
keywords='django versioned file field vcs model git',
author='TERENA',
author_email='eperez@yaco.es',
url='https://github.com/Yaco-Sistemas/django-vff',
license='BSD',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'GitPython==0.3.6',
],
)
|
enriquepablo/django-vff
|
setup.py
|
Python
|
bsd-2-clause
| 2,489
|
import numpy as np
import citysim3d.envs
from visual_dynamics.envs import Panda3dEnv
from visual_dynamics.spaces import Space, BoxSpace, TranslationAxisAngleSpace
from visual_dynamics.utils.config import ConfigObject
class SimpleQuadPanda3dEnv(citysim3d.envs.SimpleQuadPanda3dEnv, Panda3dEnv):
def _get_config(self):
config = super(SimpleQuadPanda3dEnv, self)._get_config()
car_action_space = self.car_action_space
if not isinstance(car_action_space, ConfigObject):
car_action_space = Space.create(car_action_space)
config.update({'action_space': self.action_space,
'sensor_names': self.sensor_names,
'camera_size': self.camera_size,
'camera_hfov': self.camera_hfov,
'offset': self.offset.tolist(),
'car_env_class': self.car_env_class,
'car_action_space': car_action_space,
'car_model_names': self.car_model_names})
return config
class Point3dSimpleQuadPanda3dEnv(SimpleQuadPanda3dEnv):
def __init__(self, action_space, **kwargs):
super(Point3dSimpleQuadPanda3dEnv, self).__init__(action_space, **kwargs)
self._observation_space.spaces['pos'] = BoxSpace(-np.inf, np.inf, shape=(3,))
def observe(self):
obs = super(Point3dSimpleQuadPanda3dEnv, self).observe()
obs['pos'] = np.array(self.car_node.getTransform(self.camera_node).getPos())
return obs
def main():
import os
import numpy as np
from panda3d.core import loadPrcFile
assert "CITYSIM3D_DIR" in os.environ
loadPrcFile(os.path.expandvars('${CITYSIM3D_DIR}/config.prc'))
action_space = TranslationAxisAngleSpace(np.array([-20, -10, -10, -1.5707963267948966]),
np.array([20, 10, 10, 1.5707963267948966]))
sensor_names = ['image', 'depth_image']
env = SimpleQuadPanda3dEnv(action_space, sensor_names)
import time
import cv2
start_time = time.time()
frames = 0
from visual_dynamics.policies.quad_target_policy import QuadTargetPolicy
pol = QuadTargetPolicy(env, (12, 18), (-np.pi / 2, np.pi / 2))
obs = env.reset()
pol.reset()
image, depth_image = obs
while True:
try:
env.render()
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imshow('Image window', image)
key = cv2.waitKey(1)
key &= 255
if key == 27 or key == ord('q'):
print("Pressed ESC or q, exiting")
break
quad_action = pol.act(obs)
obs, _, _, _ = env.step(quad_action)
image, depth_image = obs
frames += 1
except KeyboardInterrupt:
break
end_time = time.time()
print("average FPS: {}".format(frames / (end_time - start_time)))
if __name__ == "__main__":
main()
|
alexlee-gk/visual_dynamics
|
visual_dynamics/envs/quad_panda3d_env.py
|
Python
|
mit
| 2,967
|
from headphones import logger
from headphones.exceptions import ConfigError
""" List of types, which ConfigOpt could handle internally, without conversions """
_primitives = (int, str, unicode, bool, list, float)
class OptionModel(object):
""" Stores value of option, and know, how to write this value to the config file"""
def __init__(self, appkey, section, default, initype):
self._config_callback = None
self._appkey = appkey
self._inikey = appkey.lower()
self._section = section
self._default = default
self._initype = initype
def _exists(self):
c = self._config_callback()
return (self._section in c) and (self._inikey in c[self._section])
@property
def appkey(self):
return self._appkey
@property
def inikey(self):
return self._inikey
@property
def section(self):
return self._section
@section.setter
def section(self, value):
""" The value of 'section' is immutable. But it is possible to change CASE of this value """
ov = self._section.lower() if self._section else None
nv = value.lower() if value else None
# is immutable. Could change case of chars, or change value from None to not None
if (ov is None) or (ov == nv):
self._section = value
else:
raise ValueError('section already set')
return self._section
def bindToConfig(self, config_callback):
self._config_callback = config_callback
if not self._exists():
self.set(self._default)
def get(self):
# abbreviation. I am too lazy to write 'self._inikey', will use 'k'
s = self._section # section
k = self._inikey # key
t = self._initype # type
d = self._default
if self._config_callback is None:
msg = 'Option [{0}][{1}] was not binded to config'.format(s, k)
logger.error(msg)
raise ConfigError(msg)
if not self._exists():
msg = 'Option [{0}][{1}] does not exist in config'.format(s, k)
logger.error(msg)
raise ConfigError(msg)
config = self._config_callback()
v = config[s][k]
# cast to target type IF REQUIRED. do not convert, if variable is already of target type
if not isinstance(t, type) or not isinstance(v, t):
try:
v = t(v)
except TypeError as exc:
logger.error('The type of option [{0}][{1}] is not compatible. Going to use the default value. {2}'.format(s, k, exc))
v = d if isinstance(t, type) and isinstance(d, t) else t(d)
except ValueError as exc:
logger.error('The value of option [{0}][{1}] is not well-typed. Going to use the default value. {2}'.format(s, k, exc))
v = d if isinstance(t, type) and isinstance(d, t) else t(d)
return v
def set(self, value):
# abbreviation. I am too lazy to write 'self._inikey', will use 'k'
s = self._section # section
k = self._inikey # key
t = self._initype # type
if self._config_callback is None:
msg = 'Option [{0}][{1}] was not binded/registered with config'.format(s, k)
logger.error(msg)
raise ConfigError(msg)
config = self._config_callback()
if s not in config:
# create section:
logger.debug('Section [{0}] for option [{0}][{1}] doesn\'t exists in config. Create empty.'.format(s, k))
config[s] = {}
if k not in config[s]:
# debug about new config value:
logger.debug('Option [{0}][{1}] doesn\'t exists in config. Set to default.'.format(s, k))
# convert value to storable types:
if value is None:
value = ''
elif not isinstance(value, _primitives):
logger.debug('Value of option [{0}][{1}] is not primitive [{2}], will `str` it'.format(s, k, type(value)))
value = str(value)
else:
value = t(value)
config[s][k] = value
|
maxkoryukov/headphones
|
headphones/config/_datamodel.py
|
Python
|
gpl-3.0
| 4,171
|
#!/usr/bin/env python
################################################################################
## PipeCheck: Specifying and Verifying Microarchitectural ##
## Enforcement of Memory Consistency Models ##
## ##
## Copyright (c) 2014 Daniel Lustig, Princeton University ##
## All rights reserved. ##
## ##
## This library is free software; you can redistribute it and/or ##
## modify it under the terms of the GNU Lesser General Public ##
## License as published by the Free Software Foundation; either ##
## version 2.1 of the License, or (at your option) any later version. ##
## ##
## This library is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ##
## Lesser General Public License for more details. ##
## ##
## You should have received a copy of the GNU Lesser General Public ##
## License along with this library; if not, write to the Free Software ##
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 ##
## USA ##
################################################################################
import sys
import re
idens = {}
results = {}
expecteds = {}
for ln in sys.stdin:
match = re.search("(^[0-9]*)_([A-Za-z0-9-]*_*[A-Za-z0-9*]*)__([A-Za-z0-9-]*_*[A-Za-z0-9-]*)_.*label=.(....).*exp: (....)", ln)
if not match:
sys.stderr.write("Could not parse line: %s" % ln)
continue
iden = match.group(1)
proc = match.group(2)
test = match.group(3)
result = match.group(4)
expected = match.group(5)
#sys.stdout.write("Line: %sResult: %s: %s: %s (%s)\n" % (ln, proc, test, result, expected))
if proc not in results:
idens[proc] = {}
results[proc] = {}
expecteds[proc] = {}
if test in results[proc]:
if result <> results[proc][test]:
sys.stderr.write("Inconsistency! %s" % ln)
results[proc][test] = "?"
else:
idens[proc][test] = iden
results[proc][test] = result
expecteds[proc][test] = expected
for (proc, s) in results.items():
try:
for (test, result) in sorted(s.items()):
expected = expecteds[proc][test]
iden = idens[proc][test]
if expected == "Forb" and result == "Perm":
sys.stdout.write("(bug?) \t")
elif expected == "Perm" and result == "Forb":
sys.stdout.write("(strict)\t")
elif expected <> result:
sys.stdout.write("?? \t")
else:
sys.stdout.write(" \t")
sys.stdout.write("%s: %s: %s (test %s) (expected: %s)\n" %
(proc, test, result, iden, expected))
except:
print s
|
daniellustig/pipecheck
|
parse_litmus_test_results.py
|
Python
|
lgpl-2.1
| 3,270
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
from .positive_alert_test_case import PositiveAlertTestCase
from .negative_alert_test_case import NegativeAlertTestCase
from .alert_test_suite import AlertTestSuite
class TestGuardDutyProbe(AlertTestSuite):
alert_filename = "guard_duty_probe"
alert_classname = "AlertGuardDutyProbe"
# This event is the default positive event that will cause the
# alert to trigger
default_event = {
"_source": {
"source": "guardduty",
"details": {
"sourceipaddress": "1.2.3.4",
"finding": {
"action": {
"actionType": "PORT_PROBE"
}
}
}
}
}
# This alert is the expected result from running this task
default_alert = {
"category": "bruteforce",
"tags": ['guardduty', 'bruteforce'],
"severity": "INFO",
"summary": 'Guard Duty Port Probe by 1.2.3.4',
}
test_cases = []
test_cases.append(
PositiveAlertTestCase(
description="Positive test with default events and default alert expected",
events=AlertTestSuite.create_events(default_event, 1),
expected_alert=default_alert
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['source'] = 'bad'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect category",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['details']['sourceipaddress'] = None
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with non-existent sourceipaddress",
events=events,
)
)
for event in events:
event['_source']['utctimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda(
date_timedelta={'minutes': 21})
event['_source']['receivedtimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda(
date_timedelta={'minutes': 21})
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with old timestamp",
events=events,
)
)
|
jeffbryner/MozDef
|
tests/alerts/test_guard_duty_probe.py
|
Python
|
mpl-2.0
| 2,607
|
'''
Created on Aug 2, 2013
@author: chadcumba
Parallel workflow execution with SLURM
'''
import os
import re
import subprocess
from time import sleep
from .base import (SGELikeBatchManagerBase, logger, iflogger, logging)
from nipype.interfaces.base import CommandLine
class SLURMPlugin(SGELikeBatchManagerBase):
'''
Execute using SLURM
The plugin_args input to run can be used to control the SLURM execution.
Currently supported options are:
- template : template to use for batch job submission
- sbatch_args: arguments to pass prepend to the sbatch call
'''
def __init__(self, **kwargs):
template="#!/bin/bash"
self._retry_timeout = 2
self._max_tries = 2
self._template = template
self._sbatch_args = None
if 'plugin_args' in kwargs and kwargs['plugin_args']:
if 'retry_timeout' in kwargs['plugin_args']:
self._retry_timeout = kwargs['plugin_args']['retry_timeout']
if 'max_tries' in kwargs['plugin_args']:
self._max_tries = kwargs['plugin_args']['max_tries']
if 'template' in kwargs['plugin_args']:
self._template = kwargs['plugin_args']['template']
if os.path.isfile(self._template):
self._template = open(self._template).read()
if 'sbatch_args' in kwargs['plugin_args']:
self._sbatch_args = kwargs['plugin_args']['sbatch_args']
self._pending = {}
super(SLURMPlugin, self).__init__(template, **kwargs)
def _is_pending(self, taskid):
# subprocess.Popen requires taskid to be a string
proc = subprocess.Popen(["showq", '-u'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
o, _ = proc.communicate()
return o.find(str(taskid)) > -1
def _submit_batchtask(self, scriptfile, node):
"""
This is more or less the _submit_batchtask from sge.py with flipped variable
names, different command line switches, and different output formatting/processing
"""
cmd = CommandLine('sbatch', environ=os.environ.data,
terminal_output='allatonce')
path = os.path.dirname(scriptfile)
sbatch_args = ''
if self._sbatch_args:
sbatch_args = self._sbatch_args
if 'sbatch_args' in node.plugin_args:
if 'overwrite' in node.plugin_args and\
node.plugin_args['overwrite']:
sbatch_args = node.plugin_args['sbatch_args']
else:
sbatch_args += (" " + node.plugin_args['sbatch_args'])
if '-o' not in sbatch_args:
sbatch_args = '%s -o %s' % (sbatch_args, os.path.join(path, 'slurm-%j.out'))
if '-e' not in sbatch_args:
sbatch_args = '%s -e %s' % (sbatch_args, os.path.join(path, 'slurm-%j.out'))
if '-p' not in sbatch_args:
sbatch_args = '%s -p normal' % (sbatch_args)
if '-n' not in sbatch_args:
sbatch_args = '%s -n 16' % (sbatch_args)
if '-t' not in sbatch_args:
sbatch_args = '%s -t 1:00:00' % (sbatch_args)
if node._hierarchy:
jobname = '.'.join((os.environ.data['LOGNAME'],
node._hierarchy,
node._id))
else:
jobname = '.'.join((os.environ.data['LOGNAME'],
node._id))
jobnameitems = jobname.split('.')
jobnameitems.reverse()
jobname = '.'.join(jobnameitems)
cmd.inputs.args = '%s -J %s %s' % (sbatch_args,
jobname,
scriptfile)
oldlevel = iflogger.level
iflogger.setLevel(logging.getLevelName('CRITICAL'))
tries = 0
while True:
try:
result = cmd.run()
except Exception, e:
if tries < self._max_tries:
tries += 1
sleep(self._retry_timeout) # sleep 2 seconds and try again.
else:
iflogger.setLevel(oldlevel)
raise RuntimeError('\n'.join((('Could not submit sbatch task'
' for node %s') % node._id,
str(e))))
else:
break
logger.debug('Ran command ({0})'.format(cmd.cmdline))
iflogger.setLevel(oldlevel)
# retrieve taskid
lines = [line for line in result.runtime.stdout.split('\n') if line]
taskid = int(re.match("Submitted batch job ([0-9]*)",
lines[-1]).groups()[0])
self._pending[taskid] = node.output_dir()
logger.debug('submitted sbatch task: %d for node %s' % (taskid, node._id))
return taskid
|
mick-d/nipype_source
|
nipype/pipeline/plugins/slurm.py
|
Python
|
bsd-3-clause
| 4,983
|
"""Gait pattern planning module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import math
from typing import Any, Sequence
import gin
import numpy as np
from pybullet_envs.minitaur.agents.baseline_controller import gait_generator
_DEFAULT_INITIAL_LEG_STATE = (
gait_generator.LegState.STANCE,
gait_generator.LegState.STANCE,
gait_generator.LegState.STANCE,
gait_generator.LegState.STANCE,
)
_NOMINAL_STANCE_DURATION = (0.25, 0.25, 0.25, 0.25)
_NOMINAL_DUTY_FACTOR = (0.6, 0.6, 0.6, 0.6)
_TROTTING_LEG_PHASE = (0, 0.5, 0.5, 0)
_NOMINAL_CONTACT_DETECTION_PHASE = 0.4
@gin.configurable
class OpenloopGaitGenerator(gait_generator.GaitGenerator):
"""Generates openloop gaits for quadruped robots.
A flexible open-loop gait generator. Each leg has its own cycle and duty
factor. And the state of each leg alternates between stance and swing. One can
easily formuate a set of common quadruped gaits like trotting, pacing,
pronking, bounding, etc by tweaking the input parameters.
"""
def __init__(
self,
robot: Any,
stance_duration: Sequence[float] = _NOMINAL_STANCE_DURATION,
duty_factor: Sequence[float] = _NOMINAL_DUTY_FACTOR,
initial_leg_phase: Sequence[float] = _TROTTING_LEG_PHASE,
contact_detection_force_threshold: float = 0,
contact_detection_phase_threshold:
float = _NOMINAL_CONTACT_DETECTION_PHASE,
):
"""Initializes the class.
Args:
robot: A quadruped robot that at least implements the GetFootContacts API
and num_legs property.
stance_duration: The desired stance duration.
duty_factor: The ratio stance_duration / total_gait_cycle.
initial_leg_phase: The desired initial phase [0, 1] of the legs within the
full swing + stance cycle.
contact_detection_force_threshold: The minimal contact force required to
detect if a foot is in contact with the ground. For real robots this
needs to be larger (i.e. 25 for Laikago).
contact_detection_phase_threshold: Updates the state of each leg based on
contact info, when the current normalized phase is greater than this
threshold. This is essential to remove false positives in contact
detection when phase switches. For example, a swing foot at at the
beginning of the gait cycle might be still on the ground.
"""
self._robot = robot
self._stance_duration = stance_duration
self._duty_factor = duty_factor
self._swing_duration = np.array(stance_duration) / np.array(
duty_factor) - np.array(stance_duration)
if len(initial_leg_phase) != len(
list(self._robot.urdf_loader.get_end_effector_id_dict().values())):
raise ValueError(
"The number of leg phases should be the same as number of legs.")
self._initial_leg_phase = initial_leg_phase
self._initial_leg_state = _DEFAULT_INITIAL_LEG_STATE
self._next_leg_state = []
# The ratio in cycle is duty factor if initial state of the leg is STANCE,
# and 1 - duty_factory if the initial state of the leg is SWING.
self._initial_state_ratio_in_cycle = []
for state, duty in zip(self._initial_leg_state, duty_factor):
assert state == gait_generator.LegState.STANCE
self._initial_state_ratio_in_cycle.append(duty)
self._next_leg_state.append(gait_generator.LegState.SWING)
self._contact_detection_force_threshold = contact_detection_force_threshold
self._contact_detection_phase_threshold = contact_detection_phase_threshold
# The normalized phase within swing or stance duration.
self._normalized_phase = None
# The current leg state, when contact is considered.
self._leg_state = None
# The desired leg state (i.e. SWING or STANCE).
self._desired_leg_state = None
self.reset(0)
def reset(self, current_time):
# The normalized phase within swing or stance duration.
self._normalized_phase = np.zeros(
len(list(self._robot.urdf_loader.get_end_effector_id_dict().values())))
self._leg_state = list(self._initial_leg_state)
self._desired_leg_state = list(self._initial_leg_state)
@property
def desired_leg_state(self) -> Sequence[gait_generator.LegState]:
"""The desired leg SWING/STANCE states.
Returns:
The SWING/STANCE states for all legs.
"""
return self._desired_leg_state
@property
def leg_state(self) -> Sequence[gait_generator.LegState]:
"""The leg state after considering contact with ground.
Returns:
The actual state of each leg after accounting for contacts.
"""
return self._leg_state
@property
def swing_duration(self) -> Sequence[float]:
return self._swing_duration
@property
def stance_duration(self) -> Sequence[float]:
return self._stance_duration
@property
def normalized_phase(self) -> Sequence[float]:
"""The phase within the current swing or stance cycle.
Reflects the leg's phase within the curren swing or stance stage. For
example, at the end of the current swing duration, the phase will
be set to 1 for all swing legs. Same for stance legs.
Returns:
Normalized leg phase for all legs.
"""
return self._normalized_phase
def update(self, current_time):
contact_state = [
np.linalg.norm(contact_force) > self._contact_detection_force_threshold
for contact_force in self._robot.feet_contact_forces()
]
for leg_id in range(
len(list(self._robot.urdf_loader.get_end_effector_id_dict().values()))):
# Here is the explanation behind this logic: We use the phase within the
# full swing/stance cycle to determine if a swing/stance switch occurs
# for a leg. The threshold value is the "initial_state_ratio_in_cycle" as
# explained before. If the current phase is less than the initial state
# ratio, the leg is either in the initial state or has switched back after
# one or more full cycles.
full_cycle_period = (
self._stance_duration[leg_id] / self._duty_factor[leg_id])
# To account for the non-zero initial phase, we offset the time duration
# with the effect time contribution from the initial leg phase.
augmented_time = current_time + self._initial_leg_phase[
leg_id] * full_cycle_period
phase_in_full_cycle = math.fmod(augmented_time,
full_cycle_period) / full_cycle_period
ratio = self._initial_state_ratio_in_cycle[leg_id]
if phase_in_full_cycle < ratio:
self._desired_leg_state[leg_id] = self._initial_leg_state[leg_id]
self._normalized_phase[leg_id] = phase_in_full_cycle / ratio
else:
# A phase switch happens for this leg.
self._desired_leg_state[leg_id] = self._next_leg_state[leg_id]
self._normalized_phase[leg_id] = (phase_in_full_cycle - ratio) / (1 -
ratio)
self._leg_state[leg_id] = self._desired_leg_state[leg_id]
# No contact detection at the beginning of each SWING/STANCE phase.
if (self._normalized_phase[leg_id] <
self._contact_detection_phase_threshold):
continue
if (self._leg_state[leg_id] == gait_generator.LegState.SWING and
contact_state[leg_id]):
logging.info("early touch down detected")
self._leg_state[leg_id] = gait_generator.LegState.EARLY_CONTACT
if (self._leg_state[leg_id] == gait_generator.LegState.STANCE and
not contact_state[leg_id]):
self._leg_state[leg_id] = gait_generator.LegState.LOSE_CONTACT
|
nrz/ylikuutio
|
external/bullet3/examples/pybullet/gym/pybullet_envs/minitaur/agents/baseline_controller/openloop_gait_generator.py
|
Python
|
agpl-3.0
| 7,711
|
import glob
import logging
import math
import OpenEXR
import Imath
import os
from PIL import Image, ImageChops
from golem.core.common import is_windows
logger = logging.getLogger("gnr.task")
def print_progress(i, total):
print "\rProgress: {} % ".format(100.0 * float(i + 1) / total),
def open_exr_as_rgbf_images(exr_file):
file = OpenEXR.InputFile(exr_file)
pt = Imath.PixelType(Imath.PixelType.FLOAT)
dw = file.header()['dataWindow']
size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
rgbf = [Image.frombytes("F", size, file.channel(c, pt)) for c in "RGB"]
return rgbf
def convert_rgbf_images_to_rgb8_image(rgbf, lightest=255.0, darkest=0.0):
scale = 255 / (lightest - darkest)
def normalize_0_255(val):
scale = 255.0
darkest = 0.0
return (val * scale) + darkest
rgb8 = [im.point(normalize_0_255).convert("L") for im in rgbf]
img = Image.merge("RGB", rgb8)
return img
def convert_rgbf_images_to_l_image(rgbf, lightest=255.0, darkest=0.0):
scale = 255 / (lightest - darkest)
def normalize_0_255(val):
scale = 255.0
darkest = 0.0
return (val * scale) + darkest
rgb8 = [im.point(normalize_0_255).convert("L") for im in rgbf]
img = Image.merge("RGB", rgb8)
img = img.convert("L")
return img
def get_single_rgbf_extrema(rgbf):
extrema = [im.getextrema() for im in rgbf]
darkest = min([lo for (lo, hi) in extrema])
lightest = max([hi for (lo, hi) in extrema])
return darkest, lightest
def get_list_rgbf_extrema(rgbf_list):
assert len(rgbf_list) > 0
darkest, lightest = get_single_rgbf_extrema(rgbf_list[0])
for i in range(1, len(rgbf_list)):
d, l = get_single_rgbf_extrema(rgbf_list[i])
darkest = min(d, darkest)
lightest = max(l, lightest)
print_progress(i, len(rgbf_list))
print ""
return darkest, lightest
def compose_final_image(open_exr_files):
rgbfs = []
print "Reading input files"
for i, open_exr_im_file in enumerate(open_exr_files):
rgbf = open_exr_as_rgbf_images(open_exr_im_file)
rgbfs.append(rgbf)
print_progress(i, len(open_exr_files))
print "\nFinding extremas for all chunks"
darkest, lightest = get_list_rgbf_extrema(rgbfs)
rgb8_images = []
print "Converting chunks to rgb8 images"
for i, rgbf in enumerate(rgbfs):
rgb8_im = convert_rgbf_images_to_rgb8_image(rgbf, lightest, darkest)
rgb8_images.append(rgb8_im)
rgb8_im.close()
print_progress(i, len(rgbfs))
final_img = rgb8_images[0]
print "\nCompositing the final image"
for i in range(1, len(rgb8_images)):
final_img = ImageChops.add(final_img, rgb8_images[i])
print_progress(i, len(rgb8_images))
return final_img
def get_exr_files(path):
if is_windows():
return glob.glob(path + "/*.exr")
else:
return glob.glob(path + "/*.exr") + glob.glob(path + "/*.EXR")
def test_it():
image = 'test/test_chunk_00000.tga'
watermark = 'test/test_chunk_00001.png'
wmark = Image.open(watermark)
img = Image.open(image)
out = ImageChops.add(img, wmark)
out.save("result.png", "PNG")
wmark.close()
img.close()
def exr_to_pil(exr_file):
file = OpenEXR.InputFile(exr_file)
pt = Imath.PixelType(Imath.PixelType.FLOAT)
dw = file.header()['dataWindow']
size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
rgbf = [Image.frombytes("F", size, file.channel(c, pt)) for c in "RGB"]
# extrema = [im.getextrema() for im in rgbf]
# darkest = min([lo for (lo,hi) in extrema])
# lightest = max([hi for (lo,hi) in extrema])
# scale = 255.0 / (lightest - darkest)
scale = 255.0
def normalize_0_255(v):
return v * scale
rgb8 = [im.point(normalize_0_255).convert("L") for im in rgbf]
return Image.merge("RGB", rgb8)
class RenderingTaskCollector:
def __init__(self, paste=False, width=1, height=1):
self.darkest = None
self.lightest = None
self.alpha_darkest = None
self.alpha_lightest = None
self.accepted_img_files = []
self.accepted_alpha_files = []
self.paste = paste
self.width = width
self.height = height
def add_img_file(self, img_file):
if img_file.upper().endswith("EXR"):
rgbf = open_exr_as_rgbf_images(img_file)
d, l = get_single_rgbf_extrema(rgbf)
if self.darkest:
self.darkest = min(d, self.darkest)
else:
self.darkest = d
if self.lightest:
self.lightest = max(l, self.lightest)
else:
self.lightest = l
self.accepted_img_files.append(img_file)
def add_alpha_file(self, img_file):
if img_file.upper().endswith("EXR"):
rgbf = open_exr_as_rgbf_images(img_file)
d, l = get_single_rgbf_extrema(rgbf)
if self.alpha_darkest:
self.alpha_darkest = min(d, self.alpha_darkest)
else:
self.alpha_darkest = d
if self.alpha_lightest:
self.alpha_lightest = max(l, self.alpha_lightest)
else:
self.alpha_lightest = l
self.accepted_alpha_files.append(img_file)
def finalize(self, show_progress=False):
if len(self.accepted_img_files) == 0:
return None
are_exr = self.accepted_img_files[0].upper().endswith("EXR")
if show_progress:
print "Adding all accepted chunks to the final image"
if are_exr:
final_img = self.finalize_exr(show_progress)
else:
final_img = self.finalize_not_exr(show_progress)
if len(self.accepted_alpha_files) > 0:
final_alpha = convert_rgbf_images_to_l_image(open_exr_as_rgbf_images(self.accepted_alpha_files[0]),
self.lightest, self.darkest)
for i in range(1, len(self.accepted_alpha_files)):
l_im = convert_rgbf_images_to_l_image(open_exr_as_rgbf_images(self.accepted_alpha_files[i]),
self.lightest, self.darkest)
final_alpha = ImageChops.add(final_alpha, l_im)
l_im.close()
final_img.putalpha(final_alpha)
final_alpha.close()
return final_img
def finalize_exr(self, show_progress=False):
if self.lightest == self.darkest:
self.lightest = self.darkest + 0.1
final_img = convert_rgbf_images_to_rgb8_image(open_exr_as_rgbf_images(self.accepted_img_files[0]),
self.lightest, self.darkest)
if self.paste:
if not self.width or not self.height:
self.width, self.height = final_img.size
self.height *= len(self.accepted_img_files)
img = Image.new('RGB', (self.width, self.height))
final_img = self._paste_image(img, final_img, 0)
img.close()
for i in range(1, len(self.accepted_img_files)):
rgb8_im = convert_rgbf_images_to_rgb8_image(open_exr_as_rgbf_images(self.accepted_img_files[i]),
self.lightest, self.darkest)
if not self.paste:
final_img = ImageChops.add(final_img, rgb8_im)
else:
final_img = self._paste_image(final_img, rgb8_im, i)
rgb8_im.close()
if show_progress:
print_progress(i, len(self.accepted_img_files))
return final_img
def finalize_not_exr(self, show_progress=False):
_, output_format = os.path.splitext(self.accepted_img_files[0])
output_format = output_format[1:].upper()
res_y = 0
for name in self.accepted_img_files:
img = Image.open(name)
res_x, img_y = img.size
res_y += img_y
img.close()
self.width = res_x
self.height = res_y
img = Image.open(self.accepted_img_files[0])
bands = img.getbands()
img.close()
band = ""
for b in bands:
band += b
final_img = Image.new(band, (res_x, res_y))
#self.accepted_img_files.sort()
offset = 0
for i in range(0, len(self.accepted_img_files)):
if not self.paste:
final_img = ImageChops.add(final_img, self.accepted_img_files[i])
else:
img = Image.open(self.accepted_img_files[i])
final_img.paste(img, (0, offset))
_, img_y = img.size
offset += img_y
img.close()
if show_progress:
print_progress(i, len(self.accepted_img_files))
return final_img
def _paste_image(self, final_img, new_part, num):
img_offset = Image.new("RGB", (self.width, self.height))
offset = int(math.floor(num * float(self.height) / float(len(self.accepted_img_files))))
img_offset.paste(new_part, (0, offset))
return ImageChops.add(final_img, img_offset)
|
imapp-pl/golem
|
gnr/task/renderingtaskcollector.py
|
Python
|
gpl-3.0
| 9,385
|
#!/usr/local/bin/python2
# Advertise.py student version
# Python 2
# CBD 1st Feb.2011
import os.path
import sys
import getopt
from socket import *
def usage(ProgName):
print("usage: %s -port <port no.> -host<hostname> -service <service>\n"%(ProgName),
file=sys.stderr)
os.exit( 0 )
#
# Parse command line arguments to find host name, service
# and port number
#
def ParseCmdLine():
# Parse command line to find service or port to use
# to receive requests for share prices
try:
opts, args = getopt.getopt(sys.argv[1:], "p:s:h:")
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
usage(sys.argv[0])
sys.exit(2)
nPortID = 602
ServiceName = "busboy"
HostName = 'INSTRUCTOR'
# garcon is port 600
for opt, arg in opts:
if opt == "-p":
nPortID = arg
elif opt in ("-h"):
HostName = arg
elif opt in ("-s"):
ServiceName = arg
else:
assert False, "unhandled option"
return (HostName, ServiceName, nPortID)
######################################################################
if __name__ == '__main__':
nNumBytes = 0
# Read command line arguments to find host address, service
# and/or port number
HostName, ServiceName, nPortID = ParseCmdLine()
# TO DO: Create a connectionless socket to send request
# to share price server
# TO DO: Find local address information
# Use local address information to build IP address string
# TO DO: Send local IP string as request to share price program
print "\n\nWaiting for response..."
# TO DO: Receive response from share price program
# TO DO: Print received data
# TO DO: Close the socket
sock.close()
|
rbprogrammer/advanced_python_topics
|
course-material/py2/Advertise.py
|
Python
|
apache-2.0
| 1,938
|
import sys, os
import ast
import copy
import pp
python2 = sys.version[0] == "2"
def find_unparse_module():
for path in sys.path:
for dirpath, dirs, files in os.walk(path):
for file in files:
if file == "unparse.py":
sys.path.append(dirpath)
print("added ["+dirpath+"] to sys.path")
return
if python2:
from StringIO import StringIO
else:
from io import StringIO
find_unparse_module();
try:
import unparse
except:
if python2:
import unparse2 as unparse
else:
import unparse3 as unparse
INIT = "__init__.py"
###########################################################################
#### Globals ##############################################################
mainModule = None
userInteraction = True
###########################################################################
#### TODO #################################################################
#TODO - add support for relative importing (PEP 328)
# - for each file, save the module in which he came from
# - for each submodule, save the module in which he came from
#TODO - use the inerited fields from other classes
#TODO - obfuscate keywords in function call and decleration!
#TODO - distinguish between class' static fields and instances field
#TODO - check the __all__ in python file (maybe add this to the analyzer)
#TODO - support globals
#TODO - mark the type of function arguments (seperate round for this)
#TODO - support attribute with 'ast.Call' inside
#TODO - support python2:
# - replace type(x) with instanceof()
# - in function definition handle all args as names !
###########################################################################
#### general utils ########################################################
def perror(msg):
print(sys._getframe().f_code.co_name + ": " + msg)
###########################################################################
#### objects ##############################################################
class MaskedObj:
def real(self):
if type(self.name) is tuple:
return self.name[0]
return self.name
def mask(self):
return self.name[1]
def findName(self, name):
for field in self.fields():
for obj in field:
if obj.real() == name:
return obj
return False
def matchField(self, attrList, maskedAttrList):
# list is empty => current object is the last one
if not attrList:
return (self, maskedAttrList)
field = self.findName(attrList[0])
if field:
maskedAttrList.append(field.mask())
return field.matchField(attrList[1:], maskedAttrList)
return (False, maskedAttrList)
def getAll(self):
all = []
for field in self.fields():
all.extend(field)
return all
class Module(MaskedObj):
def __init__(self, mod_path):
self.name = ""
self.path = ""
self.variables = []
self.functions = []
self.classes = []
self.files = []
self.modules = []
self.path, self.name = os.path.split(mod_path)
joiner = lambda x: os.path.join(mod_path, x)
# next of os.walk will get only first level of the dir's tree
_, folders, files = next(os.walk(mod_path))
for folder in folders:
# if its a dir which conteins __init__ file then its a submoudle, otherwise igonre it
if os.path.exists(os.path.join(mod_path, folder, INIT)):
self.modules.append(Module(joiner(folder)))
for file in filter(lambda x: x.endswith(".py"), files):
with open(joiner(file), "r") as f:
tree = ast.parse(f.read())
if file == INIT:
self.variables, self.functions, self.classes = ast_analyze(tree)
else:
self.files.append(File(tree, file))
def __str__(self):
return "Module->" + self.real()
def fields(self):
return [self.modules, self.files, self.classes, self.variables, self.functions]
class File(MaskedObj):
def __init__(self, file_tree, file_name):
# keeping the full file name for printing it later on
self.fname = file_name
# name of the module this file represent
self.name = self.fname.rsplit(".", 1)[0]
# analayzing this file object
self.variables, self.functions, self.classes = ast_analyze(file_tree)
def __str__(self):
return "File->" + self.real()
def fields(self):
return [self.variables, self.functions, self.classes]
class Class(MaskedObj):
def __init__(self, node):
self.name = node.name
self.variables, self.functions, self.classes = ast_analyze(node, isClass=True)
# save link to other classes for inheritens realations.
self.motherClasses = []
self.sonsClasses = []
def __str__(self):
return "Class->" + self.real()
def fields(self):
fromMothers = []
for mother in self.motherClasses:
fromMothers.extend(mother.fields())
return [self.variables, self.functions, self.classes] + fromMothers
class Function(MaskedObj):
def __init__(self, name):
self.name = name
# should be represented as list of Variables or dict of "name" and "object"goo
self.args = {} # dictionary of function args as Variables objects
def __str__(self):
return "Function->" + self.real()
def fields(self):
return []
class Variable(MaskedObj):
def __init__(self, name):
self.name = name
# define the object this variable is representing (only if its from inside the project)
self.object = None
def __str__(self):
return "Variable->" + self.real()
def setObject(self, obj):
self.object = obj
def fields(self):
return self.object.fields() if self.object else []
class Env(MaskedObj):
def __init__(self, variables=[]):
self.variables = variables
def append(self, obj):
for var in self.variables:
if var.name == obj.name:
self.variables.remove(var)
self.variables.append(obj)
def extend(self, objList):
for obj in objList:
self.append(obj)
def fields(self):
return [self.variables]
def getCopy(self, ext=[]):
return Env(self.variables[:] + ext[:])
###########################################################################
#### ast related functions ################################################
def ast_extract_vars(mnode, isClass=False, inFunc=False):
# if in function then still looking for self.X for class variables.
var_list = set()
for node in ast.iter_child_nodes(mnode):
if type(node) == ast.FunctionDef and isClass:
var_list.update(ast_extract_vars(node, isClass, inFunc=True))
elif type(node) == ast.Assign:
for target in node.targets:
if (type(target)==ast.Attribute) and (type(target.value)==ast.Name) and (target.value.id=="self"):
var_list.add(Variable(target.attr))
elif type(target) == ast.Name and not inFunc:
var_list.add(Variable(target.id))
elif type(node) not in (ast.FunctionDef, ast.ClassDef):
var_list.update(ast_extract_vars(node, isClass, inFunc))
return var_list
def ast_analyze(tree, isClass=False):
# finds all tree vars
tree_vars = list(ast_extract_vars(tree, isClass=isClass))
# find functions and classes:
tree_fuctions = []
tree_classes = []
for node in ast.iter_child_nodes(tree):
if type(node) == ast.FunctionDef:
tree_fuctions.append(Function(node.name))
elif type(node) == ast.ClassDef:
tree_classes.append(Class(node))
if "__init__" in tree_fuctions:
tree_fuctions.remove("__init__")
return (tree_vars, tree_fuctions, tree_classes)
def pp_module(obj, level=0):
tab = "\t"*level
print("%s name: %s" % (tab, obj.name))
print("%s variables: \n%s%s" % (tab, tab+"\t", str(obj.variables)))
print("%s functions: \n%s%s" % (tab, tab+"\t", str(obj.functions)))
if hasattr(obj, 'classes'):
for clss in obj.classes:
print("%s class:" % tab)
pp_module(clss, level+1)
if hasattr(obj, 'files'):
for file in obj.files:
print("%s file:" % tab)
pp_module(file, level+1)
if hasattr(obj, 'modules'):
for mod in obj.modules:
print("%s module:" % tab)
pp_module(mod, level+1)
def attrToList(obj):
if type(obj) == str:
return [obj]
if type(obj) == ast.Name:
return [obj.id]
if type(obj) == ast.Attribute:
return attrToList(obj.value) + attrToList(obj.attr)
# encounter unsupported type
raise Exception("match: encounter unsupported type")
def updateAttrFromList(obj, attrList):
if type(obj) == ast.Name:
obj.id = attrList.pop()
if type(obj) == ast.Attribute:
obj.attr = attrList.pop()
updateAttrFromList(obj.value, attrList)
###########################################################################
#### mask related functions ###############################################
counter = 1
maskBank = {}
def mask_gen(name):
global counter, maskBank
if name in ("__init__", "self"):
return (name, name)
# checking wether this name has already been masked
mask = maskBank.get(name)
if mask:
return (name, mask)
# if this name has never been masked, then will create new mask for it
mask = bin(counter)
mask = mask[2:]
mask = mask.zfill(20)
mask = "X" + mask
# storing name in mask bank for later use
maskBank[name] = mask
counter = counter +1
return (name, mask)
def mask_module(obj):
obj.name = mask_gen(obj.name)
for field in obj.fields():
list(map(mask_module, field))
# map(mask_module, field)
###########################################################################
#### obfuscation handlers #################################################
def assign_user_interaction(node, env):
global userInteraction
if not userInteraction:
return
print("** found assaign with unknown type:")
print("** -> " + astunparse.unparse(node))
print("** (s:kip) (c:ancel) (i:mport) (t:ype) = [i import sys] or [t me.Object()]")
usr = input().split(None, 1)
if usr[0] is "s":
return
if usr[0] is "c":
userInteraction = False
return
if usr[0] is "i":
try:
imp = ast.parse(usr[1])
imp = imp.body[0]
assert type(imp) in (ast.Import, ast.ImportFrom)
env = env.getCopy()
obfuscate_obj(imp, env)
except:
print("please enter import statment")
if usr[0] is "t":
try:
obj = ast.parse(usr[1])
obj = obj.body[0]
assert type(obj) in (ast.Call, ast.Name, ast.Attribute)
return obfuscate_obj(obj, env)[0]
except:
print("please enter a valid expression")
return assign_user_interaction(node, env)
def handle_import_from(node, env):
attrList = node.module.split(".")
module, maskedAtrrList = mainModule.matchField(attrList, [])
if not module:
return
node.module = ".".join(maskedAtrrList)
if (len(node.names)==1) and (node.names[0].name == "*"):
env.extend(module.getAll())
else:
handle_import(node, module, env)
def handle_import(node, env):
#TODO - check also in main_module (maybe save it as global)
for alias in node.names:
attrList = alias.name.split(".")
val, maskedAtrrList = mainModule.matchField(attrList, [])
if not val:
continue
#TODO - maybe change this to variable with type of 'val'
if alias.asname:
val = copy.copy(val)
val.name = mask_gen(alias.asname)
alias.asname = val.mask()
env.append(val)
alias.name = ".".join(maskedAtrrList)
def handle_assign(node, env):
#TODO - hanlde tuple 2 tuple assign with diffarent types
# if its name or attribute then its a one var list
# in case of tuple or list, will take the fist type only
value = obfuscate_obj(node.value, env)
if len(value) > 0:
value = value[0]
if type(value) is Class:
pass
if type(value) is Variable:
value = value.object
for target in node.targets:
names = target.elts if (type(target) in (ast.Tuple, ast.List)) else [target]
for name in names:
if type(name) == ast.Name:
print("@@@@ " + target.id)
newVar = Variable(mask_gen(target.id))
env.append(newVar)
objs = obfuscate_obj(target, env)
for obj in objs:
if type(obj) == Variable:
obj.object = value
def handle_call(node, env):
# get all other childs and remove the func since it is evaluated seperatly
child_nodes = list(ast.iter_child_nodes(node))
child_nodes.remove(node.func)
for child in child_nodes:
obfuscate_obj(child, env)
# evaluate func seperatl:y so it can be examined later
func = obfuscate_obj(node.func, env)
if func:
func = func[0]
# if this function belongs to the masked module then the keywords should also get their mask
for kw in node.keywords:
real, mask = mask_gen(kw.arg)
kw.arg = mask
# if the type is class then this call is actually constractor call
if type(func) == Class:
return func
return None
def handle_function_def(func, env):
funcObj = env.findName(func.name)
assert type(funcObj) is Function
env.extend(funcObj.getAll())
func.name = funcObj.mask()
obfuscate_childs(func, env)
def handle_lambda_def(node, env):
pass
def handle_class_def(cls, env):
clsObj = env.findName(cls.name)
assert type(clsObj) is Class
# self var has no mask, so mask() and real() should return same name
selfVar = Variable(mask_gen("self"))
selfVar.setObject(clsObj)
env.extend(clsObj.getAll())
env.append(selfVar)
cls.name = clsObj.mask()
obfuscate_childs(cls, env)
def handle_name_attribute(node, env):
attrList = attrToList(node)
val, maskedAtrrList = env.matchField(attrList, [])
if len(maskedAtrrList) < 1:
perror("didnt find [" + ".".join(attrList) + "] in current module")
return None
if len(attrList) != len(maskedAtrrList):
maskedAtrrList += attrList[len(maskedAtrrList):]
updateAttrFromList(node, maskedAtrrList)
return val
def handle_arg(arg, env):
if arg.arg == "self":
return
newVar = Variable(mask_gen(arg.arg))
env.append(newVar)
arg.arg = newVar.mask()
###########################################################################
#### variables types logics ###############################################
def typify_class_def(cls, env):
clsObj = env.findName(cls.name)
assert type(clsObj) is Class
# self var has no mask, so mask() and real() should return same name
selfVar = Variable(mask_gen("self"))
selfVar.setObject(clsObj)
env.extend(clsObj.getAll())
env.append(selfVar)
changed = False
for base in cls.bases:
baseObj = handle_name_attribute(base, env)
if baseObj:
assert type(clsObj) is Class
if baseObj not in clsObj.motherClasses:
clsObj.motherClasses.append(baseObj)
changed = True
if clsObj not in baseObj.sonsClasses:
baseObj.sonsClasses.append(clsObj)
changed = True
classify_childs(cls, env)
return changed
def typify_obj(node, env):
# print("$$$ -> ", end=""); pp.parseprint(node)
retVal = []
if type(node) == ast.Import:
handle_import(node, env)
elif type(node) == ast.ImportFrom:
handle_import_from(node, env)
# require copy of current env ######
elif type(node) == ast.ClassDef:
val = classify_class_def(node, env.getCopy())
retVal.append(val)
# ##################################
else:
val = classify_childs(node, env)
retVal.append(val)
return any(retVal)
def typify_childs(tree, env):
retVal = []
for node in ast.iter_child_nodes(tree):
val = classify_obj(node, env)
retVal.append(val)
return any(retVal)
def typify_file(realPath, file):
with open(realPath, "r") as realFile:
tree = ast.parse(realFile.read())
return classify_childs(tree, Env(file.getAll()))
def typify(mod):
retVal = []
realPath = os.path.join(mod.path, mod.real())
for file in mod.files:
fileRealPath = os.path.join(realPath, file.real() + ".py")
val = classify_file(fileRealPath, file)
retVal.append(val)
# if this is a submodule then it will have INIT file
# need to create INIT file without mask his name, only the content
initRealPath = os.path.join(realPath, INIT)
if os.path.exists(initRealPath):
val = classify_file(initRealPath, mod)
retVal.append(val)
for module in mod.modules:
val = classify(module)
retVal.append(val)
# TODO - maybe there is no need for recursion here
if any(retVal):
classify(mod)
###########################################################################
#### class inheritence logics #############################################
def classify_class_def(cls, env):
clsObj = env.findName(cls.name)
assert type(clsObj) is Class
# self var has no mask, so mask() and real() should return same name
selfVar = Variable(mask_gen("self"))
selfVar.setObject(clsObj)
env.extend(clsObj.getAll())
env.append(selfVar)
changed = False
for base in cls.bases:
baseObj = handle_name_attribute(base, env)
if baseObj:
assert type(clsObj) is Class
if baseObj not in clsObj.motherClasses:
clsObj.motherClasses.append(baseObj)
changed = True
if clsObj not in baseObj.sonsClasses:
baseObj.sonsClasses.append(clsObj)
changed = True
classify_childs(cls, env)
return changed
def classify_obj(node, env):
# print("$$$ -> ", end=""); pp.parseprint(node)
retVal = []
if type(node) == ast.Import:
handle_import(node, env)
elif type(node) == ast.ImportFrom:
handle_import_from(node, env)
# require copy of current env ######
elif type(node) == ast.ClassDef:
val = classify_class_def(node, env.getCopy())
retVal.append(val)
# ##################################
else:
val = classify_childs(node, env)
retVal.append(val)
return any(retVal)
def classify_childs(tree, env):
retVal = []
for node in ast.iter_child_nodes(tree):
val = classify_obj(node, env)
retVal.append(val)
return any(retVal)
def classify_file(realPath, file):
with open(realPath, "r") as realFile:
tree = ast.parse(realFile.read())
return classify_childs(tree, Env(file.getAll()))
def classify(mod):
retVal = []
realPath = os.path.join(mod.path, mod.real())
for file in mod.files:
fileRealPath = os.path.join(realPath, file.real() + ".py")
val = classify_file(fileRealPath, file)
retVal.append(val)
# if this is a submodule then it will have INIT file
# need to create INIT file without mask his name, only the content
initRealPath = os.path.join(realPath, INIT)
if os.path.exists(initRealPath):
val = classify_file(initRealPath, mod)
retVal.append(val)
for module in mod.modules:
val = classify(module)
retVal.append(val)
# TODO - maybe there is no need for recursion here
if any(retVal):
classify(mod)
###########################################################################
#### obfuscation main logics ##############################################
def obfuscate_obj(node, env):
# print("$$$ -> ", end=""); pp.parseprint(node)
retVal = []
if type(node) in (ast.Name, ast.Attribute):
val = handle_name_attribute(node, env)
retVal.append(val)
elif type(node) == ast.Call:
val = handle_call(node, env)
retVal.append(val)
elif type(node) == ast.Import:
handle_import(node, env)
elif type(node) == ast.ImportFrom:
handle_import_from(node, env)
# require copy of current env ######
elif type(node) == ast.FunctionDef:
handle_function_def(node, env.getCopy())
elif type(node) == ast.Lambda:
handle_lambda_def(node, env.getCopy())
elif type(node) == ast.ClassDef:
handle_class_def(node, env.getCopy())
# ##################################
elif type(node) == ast.Assign:
handle_assign(node, env)
elif type(node) in (ast.List, ast.Tuple):
val = obfuscate_childs(node, env)
retVal.extend(val)
elif type(node) == ast.Starred:
val = obfuscate_obj(node.value, env)
retVal.extend(val)
elif type(node) == ast.Subscript:
obfuscate_obj(node.slice)
val = obfuscate_obj(node.value, env)
retVal.extend(val)
elif type(node) == ast.arg:
handle_arg(node, env)
# Keyword, Global
else:
obfuscate_childs(node, env)
return list(filter(None, retVal))
def obfuscate_childs(tree, env):
retVal = []
for node in ast.iter_child_nodes(tree):
val = obfuscate_obj(node, env)
retVal.extend(val)
return retVal
def obfuscate_file(realPath, maskedPath, file):
with open(realPath, "r") as realFile:
tree = ast.parse(realFile.read())
obfuscate_childs(tree, Env(file.getAll()))
sfile = StringIO()
unparse.Unparser(tree, sfile)
with open(maskedPath, "w+") as maskedFile:
maskedFile.write(sfile.getvalue())
sfile.close()
def obfuscate(mod, masked_path):
realPath = os.path.join(mod.path, mod.real())
maskedPath = os.path.join(masked_path, mod.mask())
if python2:
try:
os.makedirs(maskedPath)
except:
pass
else:
os.makedirs(maskedPath, exist_ok=True)
for file in mod.files:
fileRealPath = os.path.join(realPath, file.real() + ".py")
fileMaskedPath = os.path.join(maskedPath, file.mask() + ".py")
obfuscate_file(fileRealPath, fileMaskedPath, file)
# if this is a submodule then it will have INIT file
# need to create INIT file without mask his name, only the content
initRealPath = os.path.join(realPath, INIT)
initMaskedPath = os.path.join(maskedPath, INIT)
if os.path.exists(initRealPath):
obfuscate_file(initRealPath, initMaskedPath, mod)
for module in mod.modules:
obfuscate(module, maskedPath)
###########################################################################
#### main #################################################################
def main():
global mainModule
if len(sys.argv) < 2:
print("please specify project folder")
exit(1)
project_path = sys.argv[1]
# verify path is obsulute
if not project_path.startswith("/") and project_path.find(":") < 0:
project_path = os.path.join(os.getcwd(), project_path)
# verify path does exists
if not os.path.exists(project_path):
print("path does not exists: %s" % project_path)
exit(1)
mainModule = Module(project_path)
mask_module(mainModule)
classify(mainModule)
obfuscate(mainModule, mainModule.path)
if __name__ == "__main__":
main()
|
dorki/py-namer
|
pynamer.py
|
Python
|
gpl-2.0
| 25,153
|
from .relation import *
from .operation import *
from .dependency import *
from .apt import *
from .pip import *
from .yatr import *
from syn.base_utils import harvest_metadata, delete
with delete(harvest_metadata, delete):
harvest_metadata('metadata.yml')
|
mbodenhamer/depman
|
depman/__init__.py
|
Python
|
mit
| 262
|
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from docker.errors import APIError
from requests.exceptions import ConnectionError
from compose.cli import errors
from compose.cli.errors import handle_connection_errors
from tests import mock
@pytest.yield_fixture
def mock_logging():
with mock.patch('compose.cli.errors.log', autospec=True) as mock_log:
yield mock_log
def patch_find_executable(side_effect):
return mock.patch(
'compose.cli.errors.find_executable',
autospec=True,
side_effect=side_effect)
class TestHandleConnectionErrors(object):
def test_generic_connection_error(self, mock_logging):
with pytest.raises(errors.ConnectionError):
with patch_find_executable(['/bin/docker', None]):
with handle_connection_errors(mock.Mock()):
raise ConnectionError()
_, args, _ = mock_logging.error.mock_calls[0]
assert "Couldn't connect to Docker daemon" in args[0]
def test_api_error_version_mismatch(self, mock_logging):
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise APIError(None, None, b"client is newer than server")
_, args, _ = mock_logging.error.mock_calls[0]
assert "Docker Engine of version 1.10.0 or greater" in args[0]
def test_api_error_version_mismatch_unicode_explanation(self, mock_logging):
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise APIError(None, None, u"client is newer than server")
_, args, _ = mock_logging.error.mock_calls[0]
assert "Docker Engine of version 1.10.0 or greater" in args[0]
def test_api_error_version_other(self, mock_logging):
msg = b"Something broke!"
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise APIError(None, None, msg)
mock_logging.error.assert_called_once_with(msg.decode('utf-8'))
def test_api_error_version_other_unicode_explanation(self, mock_logging):
msg = u"Something broke!"
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise APIError(None, None, msg)
mock_logging.error.assert_called_once_with(msg)
|
sdurrheimer/compose
|
tests/unit/cli/errors_test.py
|
Python
|
apache-2.0
| 2,518
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
import pkg_resources
from pifpaf import drivers
class CephDriver(drivers.Driver):
DEFAULT_PORT = 6790
def __init__(self, port=DEFAULT_PORT,
**kwargs):
"""Create a new Ceph cluster."""
super(CephDriver, self).__init__(**kwargs)
self.port = port
@classmethod
def get_options(cls):
return [
{"param_decls": ["--port"],
"type": int,
"default": cls.DEFAULT_PORT,
"help": "port to use for Ceph Monitor"},
]
def _setUp(self):
super(CephDriver, self)._setUp()
self._ensure_xattr_support()
fsid = str(uuid.uuid4())
conffile = os.path.join(self.tempdir, "ceph.conf")
mondir = os.path.join(self.tempdir, "mon", "ceph-a")
osddir = os.path.join(self.tempdir, "osd", "ceph-0")
os.makedirs(mondir)
os.makedirs(osddir)
_, version = self._exec(["ceph", "--version"], stdout=True)
version = version.decode("ascii").split()[2]
version = pkg_resources.parse_version(version)
if version < pkg_resources.parse_version("12.0.0"):
extra = """
mon_osd_nearfull_ratio = 1
mon_osd_full_ratio = 1
osd_failsafe_nearfull_ratio = 1
osd_failsafe_full_ratio = 1
"""
else:
extra = """
mon_allow_pool_delete = true
"""
# FIXME(sileht): check availible space on /dev/shm
# if os.path.exists("/dev/shm") and os.access('/dev/shm', os.W_OK):
# journal_path = "/dev/shm/$cluster-$id-journal"
# else:
journal_path = "%s/osd/$cluster-$id/journal" % self.tempdir
with open(conffile, "w") as f:
f.write("""[global]
fsid = %(fsid)s
# no auth for now
auth cluster required = none
auth service required = none
auth client required = none
## no replica
osd pool default size = 1
osd pool default min size = 1
osd crush chooseleaf type = 0
## some default path change
run dir = %(tempdir)s
pid file = %(tempdir)s/$type.$id.pid
admin socket = %(tempdir)s/$cluster-$name.asok
mon data = %(tempdir)s/mon/$cluster-$id
osd data = %(tempdir)s/osd/$cluster-$id
osd journal = %(journal_path)s
log file = %(tempdir)s/$cluster-$name.log
mon cluster log file = %(tempdir)s/$cluster.log
# Only omap to have same behavior for all filesystems
filestore xattr use omap = True
# workaround for ext4 and last Jewel version
osd max object name len = 256
osd max object namespace len = 64
osd op threads = 10
filestore max sync interval = 10001
filestore min sync interval = 10000
%(extra)s
journal_aio = false
journal_dio = false
journal zero on create = false
journal block align = false
# run as file owner
setuser match path = %(tempdir)s/$type/$cluster-$id
[mon.a]
host = localhost
mon addr = 127.0.0.1:%(port)d
""" % dict(fsid=fsid, tempdir=self.tempdir, port=self.port,
journal_path=journal_path, extra=extra)) # noqa
ceph_opts = ["ceph", "-c", conffile]
mon_opts = ["ceph-mon", "-c", conffile, "--id", "a", "-d"]
osd_opts = ["ceph-osd", "-c", conffile, "--id", "0", "-d",
"-m", "127.0.0.1:%d" % self.port]
# Create and start monitor
self._exec(mon_opts + ["--mkfs"])
self._touch(os.path.join(mondir, "done"))
mon, _ = self._exec(
mon_opts,
wait_for_line=r"mon.a@0\(leader\).mds e1 print_map")
# Create and start OSD
self._exec(ceph_opts + ["osd", "create"])
self._exec(ceph_opts + ["osd", "crush", "add", "osd.0", "1",
"root=default"])
self._exec(osd_opts + ["--mkfs", "--mkjournal"])
if version < pkg_resources.parse_version("0.94.0"):
wait_for_line = "journal close"
else:
wait_for_line = "done with init"
osd, _ = self._exec(osd_opts, wait_for_line=wait_for_line)
if version >= pkg_resources.parse_version("12.0.0"):
self._exec(ceph_opts + ["osd", "set-full-ratio", "0.95"])
self._exec(ceph_opts + ["osd", "set-backfillfull-ratio", "0.95"])
self._exec(ceph_opts + ["osd", "set-nearfull-ratio", "0.95"])
# Wait it's ready
out = b""
while b"HEALTH_OK" not in out:
ceph, out = self._exec(ceph_opts + ["health"], stdout=True)
if b"HEALTH_ERR" in out:
raise RuntimeError("Fail to deploy ceph")
self.putenv("CEPH_CONF", conffile, True)
self.putenv("CEPH_CONF", conffile)
self.putenv("URL", "ceph://localhost:%d" % self.port)
|
sileht/pifpaf
|
pifpaf/drivers/ceph.py
|
Python
|
apache-2.0
| 5,134
|
# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
try:
import mock # Python 2
except ImportError:
from unittest import mock # Python 3
import os
import socket
import sys
import unittest
from nose.tools import eq_
from nose.tools import ok_
from nose.tools import raises
import six
from ryu.lib import pcaplib
from ryu.lib.packet import packet
from ryu.lib.packet import zebra
from ryu.utils import binary_str
PCAP_DATA_DIR = os.path.join(
os.path.dirname(sys.modules[__name__].__file__),
'../../packet_data/pcap/')
_patch_frr_v2 = mock.patch(
'ryu.lib.packet.zebra._is_frr_version_ge',
mock.MagicMock(side_effect=lambda x: x == zebra._FRR_VERSION_2_0))
class Test_zebra(unittest.TestCase):
"""
Test case for ryu.lib.packet.zebra.
"""
@staticmethod
def _test_pcap_single(f):
zebra_pcap_file = os.path.join(PCAP_DATA_DIR, f + '.pcap')
# print('*** testing %s' % zebra_pcap_file)
for _, buf in pcaplib.Reader(open(zebra_pcap_file, 'rb')):
# Checks if Zebra message can be parsed as expected.
pkt = packet.Packet(buf)
zebra_pkts = pkt.get_protocols(zebra.ZebraMessage)
for zebra_pkt in zebra_pkts:
ok_(isinstance(zebra_pkt, zebra.ZebraMessage),
'Failed to parse Zebra message: %s' % pkt)
ok_(not isinstance(pkt.protocols[-1],
(six.binary_type, bytearray)),
'Some messages could not be parsed in %s: %s' % (f, pkt))
# Checks if Zebra message can be serialized as expected.
pkt.serialize()
eq_(binary_str(buf), binary_str(pkt.data))
def test_pcap_quagga(self):
files = [
'zebra_v2',
'zebra_v3',
]
for f in files:
self._test_pcap_single(f)
@_patch_frr_v2
def test_pcap_frr_v2(self):
files = [
'zebra_v4_frr_v2', # API version 4 on FRRouting v2.0
]
for f in files:
self._test_pcap_single(f)
class TestZebraMessage(unittest.TestCase):
def test_get_header_size(self):
eq_(zebra.ZebraMessage.V0_HEADER_SIZE,
zebra.ZebraMessage.get_header_size(0))
eq_(zebra.ZebraMessage.V1_HEADER_SIZE,
zebra.ZebraMessage.get_header_size(2))
eq_(zebra.ZebraMessage.V3_HEADER_SIZE,
zebra.ZebraMessage.get_header_size(3))
eq_(zebra.ZebraMessage.V3_HEADER_SIZE,
zebra.ZebraMessage.get_header_size(4))
@raises(ValueError)
def test_get_header_size_invalid_version(self):
eq_(zebra.ZebraMessage.V0_HEADER_SIZE,
zebra.ZebraMessage.get_header_size(0xff))
class TestZebraRedistributeAdd(unittest.TestCase):
buf = (
b'\x02' # route_type
)
route_type = zebra.ZEBRA_ROUTE_CONNECT
def test_parser(self):
body = zebra.ZebraRedistributeAdd.parse(self.buf, version=3)
eq_(self.route_type, body.route_type)
buf = body.serialize(version=3)
eq_(binary_str(self.buf), binary_str(buf))
class TestZebraIPv4ImportLookup(unittest.TestCase):
buf = (
b'\x18'
b'\xc0\xa8\x01\x01' # prefix
)
prefix = '192.168.1.1/24'
metric = None
nexthop_num = 0
from_zebra = False
def test_parser(self):
body = zebra.ZebraIPv4ImportLookup.parse(self.buf)
eq_(self.prefix, body.prefix)
eq_(self.metric, body.metric)
eq_(self.nexthop_num, len(body.nexthops))
eq_(self.from_zebra, body.from_zebra)
buf = body.serialize()
eq_(binary_str(self.buf), binary_str(buf))
class TestZebraIPv4ImportLookupFromZebra(unittest.TestCase):
buf = (
b'\xc0\xa8\x01\x01' # prefix
b'\x00\x00\x00\x14' # metric
b'\x01' # nexthop_num
b'\x01' # nexthop_type
b'\x00\x00\x00\x02' # ifindex
)
prefix = '192.168.1.1'
metric = 0x14
nexthop_num = 1
nexthop_type = zebra.ZEBRA_NEXTHOP_IFINDEX
ifindex = 2
from_zebra = True
def test_parser(self):
body = zebra.ZebraIPv4ImportLookup.parse_from_zebra(self.buf)
eq_(self.prefix, body.prefix)
eq_(self.metric, body.metric)
eq_(self.nexthop_num, len(body.nexthops))
eq_(self.nexthop_type, body.nexthops[0].type)
eq_(self.ifindex, body.nexthops[0].ifindex)
eq_(self.from_zebra, body.from_zebra)
buf = body.serialize()
eq_(binary_str(self.buf), binary_str(buf))
class TestZebraIPv4NexthopLookupMRib(unittest.TestCase):
buf = (
b'\xc0\xa8\x01\x01' # addr
)
addr = '192.168.1.1'
distance = None
metric = None
nexthop_num = 0
def test_parser(self):
body = zebra.ZebraIPv4NexthopLookupMRib.parse(self.buf)
eq_(self.addr, body.addr)
eq_(self.distance, body.distance)
eq_(self.metric, body.metric)
eq_(self.nexthop_num, len(body.nexthops))
buf = body.serialize()
eq_(binary_str(self.buf), binary_str(buf))
class TestZebraIPv4NexthopLookupMRibFromZebra(unittest.TestCase):
buf = (
b'\xc0\xa8\x01\x01' # addr
b'\x01' # distance
b'\x00\x00\x00\x14' # metric
b'\x01' # nexthop_num
b'\x01' # nexthop_type
b'\x00\x00\x00\x02' # ifindex
)
addr = '192.168.1.1'
distance = 1
metric = 0x14
nexthop_num = 1
nexthop_type = zebra.ZEBRA_NEXTHOP_IFINDEX
ifindex = 2
def test_parser(self):
body = zebra.ZebraIPv4NexthopLookupMRib.parse(self.buf)
eq_(self.addr, body.addr)
eq_(self.distance, body.distance)
eq_(self.metric, body.metric)
eq_(self.nexthop_num, len(body.nexthops))
eq_(self.nexthop_type, body.nexthops[0].type)
eq_(self.ifindex, body.nexthops[0].ifindex)
buf = body.serialize()
eq_(binary_str(self.buf), binary_str(buf))
class TestZebraNexthopUpdateIPv6(unittest.TestCase):
buf = (
b'\x00\x0a' # family
b'\x40' # prefix_len
b'\x20\x01\x0d\xb8' # prefix
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x14' # metric
b'\x01' # nexthop_num
b'\x01' # nexthop_type
b'\x00\x00\x00\x02' # ifindex
)
family = socket.AF_INET6
prefix = '2001:db8::/64'
metric = 0x14
nexthop_num = 1
nexthop_type = zebra.ZEBRA_NEXTHOP_IFINDEX
ifindex = 2
@_patch_frr_v2
def test_parser(self):
body = zebra.ZebraNexthopUpdate.parse(self.buf)
eq_(self.family, body.family)
eq_(self.prefix, body.prefix)
eq_(self.metric, body.metric)
eq_(self.nexthop_num, len(body.nexthops))
eq_(self.nexthop_type, body.nexthops[0].type)
eq_(self.ifindex, body.nexthops[0].ifindex)
buf = body.serialize()
eq_(binary_str(self.buf), binary_str(buf))
class TestZebraInterfaceNbrAddressAdd(unittest.TestCase):
buf = (
b'\x00\x00\x00\x01' # ifindex
b'\x02' # family
b'\xc0\xa8\x01\x00' # prefix
b'\x18' # prefix_len
)
ifindex = 1
family = socket.AF_INET
prefix = '192.168.1.0/24'
@_patch_frr_v2
def test_parser(self):
body = zebra.ZebraInterfaceNbrAddressAdd.parse(self.buf)
eq_(self.ifindex, body.ifindex)
eq_(self.family, body.family)
eq_(self.prefix, body.prefix)
buf = body.serialize()
eq_(binary_str(self.buf), binary_str(buf))
class TestZebraInterfaceBfdDestinationUpdate(unittest.TestCase):
buf = (
b'\x00\x00\x00\x01' # ifindex
b'\x02' # dst_family
b'\xc0\xa8\x01\x01' # dst_prefix
b'\x18' # dst_prefix_len
b'\x04' # status
b'\x02' # src_family
b'\xc0\xa8\x01\x02' # src_prefix
b'\x18' # src_prefix_len
)
ifindex = 1
dst_family = socket.AF_INET
dst_prefix = '192.168.1.1/24'
status = zebra.BFD_STATUS_UP
src_family = socket.AF_INET
src_prefix = '192.168.1.2/24'
@_patch_frr_v2
def test_parser(self):
body = zebra.ZebraInterfaceBfdDestinationUpdate.parse(self.buf)
eq_(self.ifindex, body.ifindex)
eq_(self.dst_family, body.dst_family)
eq_(self.dst_prefix, body.dst_prefix)
eq_(self.status, body.status)
eq_(self.src_family, body.src_family)
eq_(self.src_prefix, body.src_prefix)
buf = body.serialize()
eq_(binary_str(self.buf), binary_str(buf))
class TestZebraBfdDestinationRegisterMultiHopEnabled(unittest.TestCase):
buf = (
b'\x00\x00\x00\x01' # pid
b'\x00\x02' # dst_family
b'\xc0\xa8\x01\x01' # dst_prefix
b'\x00\x00\x00\x10' # min_rx_timer
b'\x00\x00\x00\x20' # min_tx_timer
b'\x01' # detect_mult
b'\x01' # multi_hop
b'\x00\x02' # src_family
b'\xc0\xa8\x01\x02' # src_prefix
b'\x05' # multi_hop_count
)
pid = 1
dst_family = socket.AF_INET
dst_prefix = '192.168.1.1'
min_rx_timer = 0x10
min_tx_timer = 0x20
detect_mult = 1
multi_hop = 1
src_family = socket.AF_INET
src_prefix = '192.168.1.2'
multi_hop_count = 5
ifname = None
@_patch_frr_v2
def test_parser(self):
body = zebra.ZebraBfdDestinationRegister.parse(self.buf)
eq_(self.pid, body.pid)
eq_(self.dst_family, body.dst_family)
eq_(self.dst_prefix, body.dst_prefix)
eq_(self.min_rx_timer, body.min_rx_timer)
eq_(self.min_tx_timer, body.min_tx_timer)
eq_(self.detect_mult, body.detect_mult)
eq_(self.multi_hop, body.multi_hop)
eq_(self.src_family, body.src_family)
eq_(self.src_prefix, body.src_prefix)
eq_(self.multi_hop_count, body.multi_hop_count)
eq_(self.ifname, body.ifname)
buf = body.serialize()
eq_(binary_str(self.buf), binary_str(buf))
class TestZebraBfdDestinationRegisterMultiHopDisabled(unittest.TestCase):
buf = (
b'\x00\x00\x00\x01' # pid
b'\x00\x02' # dst_family
b'\xc0\xa8\x01\x01' # dst_prefix
b'\x00\x00\x00\x10' # min_rx_timer
b'\x00\x00\x00\x20' # min_tx_timer
b'\x01' # detect_mult
b'\x00' # multi_hop
b'\x00\x02' # src_family
b'\xc0\xa8\x01\x02' # src_prefix
b'\x04' # ifname_len
b'eth0' # ifname
)
pid = 1
dst_family = socket.AF_INET
dst_prefix = '192.168.1.1'
min_rx_timer = 0x10
min_tx_timer = 0x20
detect_mult = 1
multi_hop = 0
src_family = socket.AF_INET
src_prefix = '192.168.1.2'
multi_hop_count = None
ifname = 'eth0'
@_patch_frr_v2
def test_parser(self):
body = zebra.ZebraBfdDestinationRegister.parse(self.buf)
eq_(self.pid, body.pid)
eq_(self.dst_family, body.dst_family)
eq_(self.dst_prefix, body.dst_prefix)
eq_(self.min_rx_timer, body.min_rx_timer)
eq_(self.min_tx_timer, body.min_tx_timer)
eq_(self.detect_mult, body.detect_mult)
eq_(self.multi_hop, body.multi_hop)
eq_(self.src_family, body.src_family)
eq_(self.src_prefix, body.src_prefix)
eq_(self.multi_hop_count, body.multi_hop_count)
eq_(self.ifname, body.ifname)
buf = body.serialize()
eq_(binary_str(self.buf), binary_str(buf))
class TestZebraBfdDestinationRegisterMultiHopEnabledIPv6(unittest.TestCase):
buf = (
b'\x00\x00\x00\x01' # pid
b'\x00\x0a' # dst_family
b'\x20\x01\x0d\xb8' # dst_prefix
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x01'
b'\x00\x00\x00\x10' # min_rx_timer
b'\x00\x00\x00\x20' # min_tx_timer
b'\x01' # detect_mult
b'\x01' # multi_hop
b'\x00\x0a' # src_family
b'\x20\x01\x0d\xb8' # src_prefix
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x02'
b'\x05' # multi_hop_count
)
pid = 1
dst_family = socket.AF_INET6
dst_prefix = '2001:db8::1'
min_rx_timer = 0x10
min_tx_timer = 0x20
detect_mult = 1
multi_hop = 1
src_family = socket.AF_INET6
src_prefix = '2001:db8::2'
multi_hop_count = 5
ifname = None
@_patch_frr_v2
def test_parser(self):
body = zebra.ZebraBfdDestinationRegister.parse(self.buf)
eq_(self.pid, body.pid)
eq_(self.dst_family, body.dst_family)
eq_(self.dst_prefix, body.dst_prefix)
eq_(self.min_rx_timer, body.min_rx_timer)
eq_(self.min_tx_timer, body.min_tx_timer)
eq_(self.detect_mult, body.detect_mult)
eq_(self.multi_hop, body.multi_hop)
eq_(self.src_family, body.src_family)
eq_(self.src_prefix, body.src_prefix)
eq_(self.multi_hop_count, body.multi_hop_count)
eq_(self.ifname, body.ifname)
buf = body.serialize()
eq_(binary_str(self.buf), binary_str(buf))
class TestZebraBfdDestinationDeregisterMultiHopEnabled(unittest.TestCase):
buf = (
b'\x00\x00\x00\x01' # pid
b'\x00\x02' # dst_family
b'\xc0\xa8\x01\x01' # dst_prefix
b'\x01' # multi_hop
b'\x00\x02' # src_family
b'\xc0\xa8\x01\x02' # src_prefix
b'\x05' # multi_hop_count
)
pid = 1
dst_family = socket.AF_INET
dst_prefix = '192.168.1.1'
multi_hop = 1
src_family = socket.AF_INET
src_prefix = '192.168.1.2'
multi_hop_count = 5
ifname = None
@_patch_frr_v2
def test_parser(self):
body = zebra.ZebraBfdDestinationDeregister.parse(self.buf)
eq_(self.pid, body.pid)
eq_(self.dst_family, body.dst_family)
eq_(self.dst_prefix, body.dst_prefix)
eq_(self.multi_hop, body.multi_hop)
eq_(self.src_family, body.src_family)
eq_(self.src_prefix, body.src_prefix)
eq_(self.multi_hop_count, body.multi_hop_count)
eq_(self.ifname, body.ifname)
buf = body.serialize()
eq_(binary_str(self.buf), binary_str(buf))
class TestZebraBfdDestinationDeregisterMultiHopDisabled(unittest.TestCase):
buf = (
b'\x00\x00\x00\x01' # pid
b'\x00\x02' # dst_family
b'\xc0\xa8\x01\x01' # dst_prefix
b'\x00' # multi_hop
b'\x00\x02' # src_family
b'\xc0\xa8\x01\x02' # src_prefix
b'\x04' # ifname_len
b'eth0' # ifname
)
pid = 1
dst_family = socket.AF_INET
dst_prefix = '192.168.1.1'
multi_hop = 0
src_family = socket.AF_INET
src_prefix = '192.168.1.2'
multi_hop_count = None
ifname = 'eth0'
@_patch_frr_v2
def test_parser(self):
body = zebra.ZebraBfdDestinationDeregister.parse(self.buf)
eq_(self.pid, body.pid)
eq_(self.dst_family, body.dst_family)
eq_(self.dst_prefix, body.dst_prefix)
eq_(self.multi_hop, body.multi_hop)
eq_(self.src_family, body.src_family)
eq_(self.src_prefix, body.src_prefix)
eq_(self.multi_hop_count, body.multi_hop_count)
eq_(self.ifname, body.ifname)
buf = body.serialize()
eq_(binary_str(self.buf), binary_str(buf))
class TestZebraBfdDestinationDeregisterMultiHopEnabledIPv6(unittest.TestCase):
buf = (
b'\x00\x00\x00\x01' # pid
b'\x00\x0a' # dst_family
b'\x20\x01\x0d\xb8' # dst_prefix
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x01'
b'\x01' # multi_hop
b'\x00\x0a' # src_family
b'\x20\x01\x0d\xb8' # src_prefix
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x02'
b'\x05' # multi_hop_count
)
pid = 1
dst_family = socket.AF_INET6
dst_prefix = '2001:db8::1'
multi_hop = 1
src_family = socket.AF_INET6
src_prefix = '2001:db8::2'
multi_hop_count = 5
ifname = None
@_patch_frr_v2
def test_parser(self):
body = zebra.ZebraBfdDestinationDeregister.parse(self.buf)
eq_(self.pid, body.pid)
eq_(self.dst_family, body.dst_family)
eq_(self.dst_prefix, body.dst_prefix)
eq_(self.multi_hop, body.multi_hop)
eq_(self.src_family, body.src_family)
eq_(self.src_prefix, body.src_prefix)
eq_(self.multi_hop_count, body.multi_hop_count)
eq_(self.ifname, body.ifname)
buf = body.serialize()
eq_(binary_str(self.buf), binary_str(buf))
class TestZebraVrfAdd(unittest.TestCase):
buf = (
b'VRF1' # vrf_name
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
)
vrf_name = 'VRF1'
@_patch_frr_v2
def test_parser(self):
body = zebra.ZebraVrfAdd.parse(self.buf)
eq_(self.vrf_name, body.vrf_name)
buf = body.serialize()
eq_(binary_str(self.buf), binary_str(buf))
class TestZebraInterfaceVrfUpdate(unittest.TestCase):
buf = (
b'\x00\x00\x00\x01' # ifindex
b'\x00\x02' # vrf_id
)
ifindex = 1
vrf_id = 2
@_patch_frr_v2
def test_parser(self):
body = zebra.ZebraInterfaceVrfUpdate.parse(self.buf)
eq_(self.ifindex, body.ifindex)
eq_(self.vrf_id, body.vrf_id)
buf = body.serialize()
eq_(binary_str(self.buf), binary_str(buf))
class TestZebraInterfaceEnableRadv(unittest.TestCase):
buf = (
b'\x00\x00\x00\x01' # ifindex
b'\x00\x00\x01\x00' # interval
)
ifindex = 1
interval = 0x100
@_patch_frr_v2
def test_parser(self):
body = zebra.ZebraInterfaceEnableRadv.parse(self.buf)
eq_(self.ifindex, body.ifindex)
eq_(self.interval, body.interval)
buf = body.serialize()
eq_(binary_str(self.buf), binary_str(buf))
class TestZebraMplsLabelsAddIPv4(unittest.TestCase):
buf = (
b'\x09' # route_type
b'\x00\x00\x00\x02' # family
b'\xc0\xa8\x01\x00' # prefix
b'\x18' # prefix_len
b'\xc0\xa8\x01\x01' # gate_addr
b'\x10' # distance
b'\x00\x00\x00\x64' # in_label
b'\x00\x00\x00\x03' # out_label
)
route_type = zebra.ZEBRA_ROUTE_BGP
family = socket.AF_INET
prefix = '192.168.1.0/24'
gate_addr = '192.168.1.1'
distance = 0x10
in_label = 100
out_label = zebra.MPLS_IMP_NULL_LABEL
@_patch_frr_v2
def test_parser(self):
body = zebra.ZebraMplsLabelsAdd.parse(self.buf)
eq_(self.route_type, body.route_type)
eq_(self.family, body.family)
eq_(self.prefix, body.prefix)
eq_(self.gate_addr, body.gate_addr)
eq_(self.distance, body.distance)
eq_(self.in_label, body.in_label)
eq_(self.out_label, body.out_label)
buf = body.serialize()
eq_(binary_str(self.buf), binary_str(buf))
class TestZebraMplsLabelsAddIPv6(unittest.TestCase):
buf = (
b'\x09' # route_type
b'\x00\x00\x00\x0a' # family
b'\x20\x01\x0d\xb8' # prefix
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x40' # prefix_len
b'\x20\x01\x0d\xb8' # gate_addr
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x00'
b'\x00\x00\x00\x01'
b'\x10' # distance
b'\x00\x00\x00\x64' # in_label
b'\x00\x00\x00\x03' # out_label
)
route_type = zebra.ZEBRA_ROUTE_BGP
family = socket.AF_INET6
prefix = '2001:db8::/64'
gate_addr = '2001:db8::1'
distance = 0x10
in_label = 100
out_label = zebra.MPLS_IMP_NULL_LABEL
@_patch_frr_v2
def test_parser(self):
body = zebra.ZebraMplsLabelsAdd.parse(self.buf)
eq_(self.route_type, body.route_type)
eq_(self.family, body.family)
eq_(self.prefix, body.prefix)
eq_(self.gate_addr, body.gate_addr)
eq_(self.distance, body.distance)
eq_(self.in_label, body.in_label)
eq_(self.out_label, body.out_label)
buf = body.serialize()
eq_(binary_str(self.buf), binary_str(buf))
|
iwaseyusuke/ryu
|
ryu/tests/unit/packet/test_zebra.py
|
Python
|
apache-2.0
| 21,409
|
def impkw():
print('impkw1')
def my_third_keyword():
print('my_third_keyword1')
|
gliviu/hyperclick-robot-framework
|
fixtures/gotodef/approximate-resource-imports/gotodeflib1.py
|
Python
|
mit
| 88
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TroubleshootingResult(Model):
"""Troubleshooting information gained from specified resource.
:param start_time: The start time of the troubleshooting.
:type start_time: datetime
:param end_time: The end time of the troubleshooting.
:type end_time: datetime
:param code: The result code of the troubleshooting.
:type code: str
:param results: Information from troubleshooting.
:type results:
list[~azure.mgmt.network.v2017_08_01.models.TroubleshootingDetails]
"""
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'code': {'key': 'code', 'type': 'str'},
'results': {'key': 'results', 'type': '[TroubleshootingDetails]'},
}
def __init__(self, *, start_time=None, end_time=None, code: str=None, results=None, **kwargs) -> None:
super(TroubleshootingResult, self).__init__(**kwargs)
self.start_time = start_time
self.end_time = end_time
self.code = code
self.results = results
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/troubleshooting_result_py3.py
|
Python
|
mit
| 1,603
|
from melkman.green import green_init
green_init()
from datetime import datetime, timedelta
from eventlet.green import socket
from eventlet.support.greenlets import GreenletExit
from eventlet.wsgi import server as wsgi_server
import os
import time
from urlparse import urlsplit
from urllib import quote_plus
from webob import Request, Response
from melk.util.dibject import Dibject, dibjectify
from melk.util.hash import melk_id
from melk.util.nonce import nonce_str
from melkman.context import Context
__all__ = ['make_db', 'fresh_context', 'data_path', 'test_yaml_file', 'random_id', 'rfc3339_date', 'melk_ids_in', 'random_atom_feed',
'make_atom_feed', 'dummy_atom_entries', 'make_atom_entry', 'dummy_news_item', 'epeq_datetime',
'append_param', 'no_micro', 'TestHTTPServer', 'FileServer', 'contextual']
def data_path():
here = os.path.abspath(os.path.dirname(__file__))
return os.path.join(here, 'data')
def test_yaml_file():
here = os.path.abspath(os.path.dirname(__file__))
return os.path.join(here, 'test.yaml')
def make_db():
ctx = fresh_context()
return ctx.db
def fresh_context():
ctx = Context.from_yaml(test_yaml_file())
ctx.bootstrap(purge=True)
return ctx
def random_id():
return melk_id(nonce_str())
def rfc3339_date(timestamp):
"""
accepts datetime
returns RFC 3339 date
"""
return time.strftime('%Y-%m-%dT%H:%M:%SZ', timestamp.timetuple())
def melk_ids_in(content, url):
from melkman.parse import parse_feed
fp = parse_feed(content, url)
return [x.melk_id for x in fp.entries]
def random_atom_feed(feed_id, nentries, base_timestamp=None, **kw):
if base_timestamp is None:
base_timestamp = datetime.utcnow()
entries = dummy_atom_entries(nentries, base_timestamp)
return make_atom_feed(feed_id, entries, timestamp=base_timestamp + timedelta(seconds=nentries), **kw)
def make_atom_feed(feed_id, entries,
title='Some Dummy Feed',
timestamp=None,
link='http://example.org/feed',
author='Jane Dough',
hub_urls=None):
if timestamp is None:
timestamp = datetime.utcnow()
updated_str = rfc3339_date(timestamp)
doc = """<?xml version="1.0" encoding="utf-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<id>%s</id>
<title>%s</title>
<link rel="self" href="%s"/>
<updated>%s</updated>
<author>
<name>%s</name>
</author>
""" % (feed_id, title, link, updated_str, author)
if hub_urls is not None:
for hub_url in hub_urls:
doc += '<link rel="hub" href="%s" />' % hub_url
for entry in entries:
doc += entry
doc += "</feed>"
return doc
def dummy_atom_entries(n, base_timestamp=None):
if base_timestamp is None:
base_timestamp = datetime.utcnow()
entries = []
for i in range(n):
iid = random_id()
timestamp = base_timestamp + timedelta(seconds=i)
entries.append(make_atom_entry(iid, timestamp=timestamp))
entries.reverse()
return entries
def make_atom_entry(id, title='This is the title',
author='Jane Dough',
link='http://example.com/link',
timestamp=None,
summary='Some Text.'):
if timestamp is None:
timestamp = datetime.utcnow()
updated_str = rfc3339_date(timestamp)
return """<entry>
<id>%s</id>
<title>%s</title>
<link rel="alternate" href="%s"/>
<author><name>%s</name></author>
<updated>%s</updated>
<summary>%s</summary>
</entry>
""" % (id, title, link, author, updated_str, summary)
class DummyItem(Dibject):
def load_full_item(self, db):
return self
def dummy_news_item(d):
di = DummyItem(dibjectify(d))
di.setdefault('author', 'Whoever T. Merriweather')
di.setdefault('item_id', random_id())
di.setdefault('timestamp', datetime.utcnow())
di.setdefault('title', 'The News Title')
di.setdefault('link', 'http://example.org/blagosphere?id=12')
di.setdefault('source_title', 'The Blags')
di.setdefault('source_url', 'http://example.org/blagosphere')
di.setdefault('summary', 'abaraljsrs sjrkja rsj klrjewori ew rwa riojweroiwer iowr wre')
di.setdefault('details', Dibject())
return di
class TestHTTPServer(object):
def __init__(self, port=9291):
self.port = port
def run(self):
try:
server = socket.socket()
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(('127.0.0.1', self.port))
server.listen(50)
wsgi_server(server, self)
except GreenletExit:
pass
def __call__(self, environ, start_response):
res = Response()
res.status = 404
return res(environ, start_response)
class FileServer(TestHTTPServer):
"""
little file server for testing
"""
def __init__(self, www_dir, port=9292):
TestHTTPServer.__init__(self, port)
self.requests = 0
self.www_dir = os.path.abspath(www_dir)
def url_for(self, path):
return 'http://localhost:%d/%s' % (self.port, path)
def __call__(self, environ, start_response):
self.requests += 1
req = Request(environ)
res = Response()
filename = req.path_info.lstrip('/')
filename = os.path.abspath(os.path.join(self.www_dir, filename))
if filename.startswith(self.www_dir) and os.path.isfile(filename):
res.status = 200
res.body = open(filename).read()
else:
res.status = 404
return res(environ, start_response)
def epeq_datetime(t1, t2):
return abs(t1 - t2) < timedelta(seconds=1)
def no_micro(dt):
return dt.replace(microsecond=0)
def append_param(url, k, v):
if len(urlsplit(url)[3]) > 0:
return '%s&%s=%s' % (url, quote_plus(k), quote_plus(v))
else:
return '%s?%s=%s' % (url, quote_plus(k), quote_plus(v))
def contextual(t):
from eventlet import sleep
from greenamqp.client_0_8 import connection
connection.DEBUG_LEAKS = True
def inner():
start_connections = connection.connection_count
ctx = fresh_context()
with ctx:
rc = t(ctx)
sleep(0)
assert len(ctx._locals_by_greenlet) == 0, 'Leaked %d greenlet storages' % len(ctx._locals_by_greenlet)
assert ctx._broker is None, 'Broker connection was not closed.'
end_connections = connection.connection_count
assert start_connections == end_connections, 'Leaked %d amqp connections (%d leaked in total)' % (end_connections - start_connections, end_connections)
return rc
inner.__name__ = t.__name__
return inner
|
ltucker/melkman
|
tests/helpers.py
|
Python
|
gpl-2.0
| 6,931
|
# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from testtools import TestCase
from kmip.core.errors import ErrorStrings
from kmip.core import utils
class TestUtils(TestCase):
def setUp(self):
super(TestUtils, self).setUp()
def tearDown(self):
super(TestUtils, self).tearDown()
def test_count_bytes(self):
num = 65535
bytes_exp = 2
bytes_obs = utils.count_bytes(num)
self.assertEqual(bytes_exp, bytes_obs,
'Value {0} requires {1} bytes to encode, '
'received {2} byte(s)'.format(num, bytes_exp,
bytes_obs))
def test_count_bytes_overflow(self):
num = 65536
bytes_exp = 3
bytes_obs = utils.count_bytes(num)
self.assertEqual(bytes_exp, bytes_obs,
'Value {0} requires {1} bytes to encode, '
'received {2} bytes'.format(num, bytes_exp,
bytes_obs))
def test_count_bytes_zero(self):
num = 0
bytes_exp = 1
bytes_obs = utils.count_bytes(num)
self.assertEqual(bytes_exp, bytes_obs,
'Value {0} requires {1} bytes to encode, '
'received {2} byte(s)'.format(num, bytes_exp,
bytes_obs))
class TestBytearrayStream(TestCase):
def setUp(self):
super(TestBytearrayStream, self).setUp()
self.stream = utils.BytearrayStream()
self.bad_type = ErrorStrings.BAD_EXP_RECV.format('BytearrayStream.{0}',
'type', '{1}', '{2}')
self.bad_len = ErrorStrings.BAD_EXP_RECV.format('BytearrayStream.{0}',
'length', '{1}', '{2}')
self.bad_val = ErrorStrings.BAD_EXP_RECV.format('BytearrayStream.{0}',
'value', '{1}', '{2}')
def tearDown(self):
super(TestBytearrayStream, self).tearDown()
def test_init(self):
value = b'\x00'
b = utils.BytearrayStream(value)
buf_type = type(b.buffer)
msg = self.bad_type.format('buffer', type(b''), buf_type)
self.assertIsInstance(b.buffer, type(b''),
msg.format(type(b''), type(b.buffer)))
length = len(b.buffer)
msg = self.bad_len.format('buffer', 1, length)
self.assertEqual(1, length, msg)
content = b.buffer
msg = self.bad_val.format('buffer', value, content)
self.assertEqual(value, content, msg)
def test_init_unset(self):
b = utils.BytearrayStream()
buf_type = type(b.buffer)
msg = self.bad_type.format('buffer', type(b''), buf_type)
self.assertIsInstance(b.buffer, type(b''),
msg.format(type(b''), type(b.buffer)))
length = len(b.buffer)
msg = self.bad_len.format('buffer', 0, length)
self.assertEqual(0, length, msg)
def test_read(self):
# TODO (peter-hamilton) Finish implementation.
self.skip('')
def test_write(self):
# TODO (peter-hamilton) Finish implementation.
self.skip('')
def test_peek(self):
# TODO (peter-hamilton) Finish implementation.
value = (b'\x00\x01\x02\x03')
expected = value
b = expected
expected = b
b = utils.BytearrayStream(value)
def test_peek_overflow(self):
# TODO (peter-hamilton) Finish implementation.
self.skip('')
def test_peek_empty(self):
# TODO (peter-hamilton) Finish implementation.
self.skip('')
def test_peek_none(self):
# TODO (peter-hamilton) Finish implementation.
self.skip('')
def test_length(self):
# TODO (peter-hamilton) Finish implementation.
self.skip('')
|
callidus/PyKMIP
|
kmip/tests/unit/core/test_utils.py
|
Python
|
apache-2.0
| 4,594
|
import genmsg.msgs
try:
from cStringIO import StringIO # Python 2.x
except ImportError:
from io import StringIO # Python 3.x
MSG_TYPE_TO_IDL = {
'byte': 'octet',
'char': 'char',
'bool': 'boolean',
'uint8': 'unsigned short', # TODO reconsider mapping
'int8': 'short', # TODO reconsider mapping
'uint16': 'unsigned short',
'int16': 'short',
'uint32': 'unsigned long',
'int32': 'long',
'uint64': 'unsigned long long',
'int64': 'long long',
'float32': 'float',
'float64': 'double',
'string': 'string',
'time': 'DDS::Time_t',
'duration': 'DDS::Duration_t'
}
#used
def msg_type_to_idl(type):
"""
Converts a message type (e.g. uint32, std_msgs/String, etc.) into the C++ declaration
for that type (e.g. uint32_t, std_msgs::String_<ContainerAllocator>)
@param type: The message type
@type type: str
@return: The C++ declaration
@rtype: str
"""
(base_type, is_array, array_len) = genmsg.msgs.parse_type(type)
idl_type = None
if (genmsg.msgs.is_builtin(base_type)):
idl_type = MSG_TYPE_TO_IDL[base_type]
elif (len(base_type.split('/')) == 1):
if (genmsg.msgs.is_header_type(base_type)):
idl_type = 'std_msgs::dds_impl::Header_'
else:
idl_type = '%s' % base_type
else:
pkg = base_type.split('/')[0]
msg = base_type.split('/')[1]
idl_type = '%s::dds_impl::%s_' % (pkg, msg)
if (is_array):
if (array_len is None):
return ['', '', 'sequence<%s>' % idl_type]
else:
typename = '%s_array_%s' % (idl_type.replace(' ', '_'), array_len)
return ['typedef %s' % idl_type, '%s[%s];' % (typename, array_len), '%s' % typename]
else:
return ['', '', idl_type]
def _escape_string(s):
s = s.replace('\\', '\\\\')
s = s.replace('"', '\\"')
return s
def escape_message_definition(definition):
lines = definition.splitlines()
if not lines:
lines.append('')
s = StringIO()
for line in lines:
line = _escape_string(line)
s.write('%s\\n\\\n'%(line))
val = s.getvalue()
s.close()
return val
#used2
def idl_message_declarations(name_prefix, msg):
"""
Returns the different possible C++ declarations for a message given the message itself.
@param name_prefix: The C++ prefix to be prepended to the name, e.g. "std_msgs::"
@type name_prefix: str
@param msg: The message type
@type msg: str
@return: A tuple of 3 different names. idl_message_decelarations("std_msgs::", "String") returns the tuple
("std_msgs::String_", "std_msgs::String_<ContainerAllocator>", "std_msgs::String")
@rtype: str
"""
pkg, basetype = genmsg.names.package_resource_name(msg)
idl_name = ' ::%s%s'%(name_prefix, msg)
if (pkg):
idl_name = ' ::%s::%s'%(pkg, basetype)
return ('%s_'%(idl_name), '%s_<ContainerAllocator> '%(idl_name), '%s'%(idl_name))
#todo
def is_fixed_length(spec, msg_context, includepath):
"""
Returns whether or not the message is fixed-length
@param spec: The message spec
@type spec: genmsg.msgs.MsgSpec
@param package: The package of the
@type package: str
"""
types = []
for field in spec.parsed_fields():
if (field.is_array and field.array_len is None):
return False
if (field.base_type == 'string'):
return False
if (not field.is_builtin):
types.append(field.base_type)
types = set(types)
for t in types:
t = genmsg.msgs.resolve_type(t, spec.package)
assert isinstance(includepath, dict)
new_spec = genmsg.msg_loader.load_msg_by_type(msg_context, t, includepath)
if (not is_fixed_length(new_spec, msg_context, includepath)):
return False
return True
#used2
def default_value(type):
"""
Returns the value to initialize a message member with. 0 for integer types, 0.0 for floating point, false for bool,
empty string for everything else
@param type: The type
@type type: str
"""
if type in ['byte', 'int8', 'int16', 'int32', 'int64',
'char', 'uint8', 'uint16', 'uint32', 'uint64']:
return '0'
elif type in ['float32', 'float64']:
return '0.0'
elif type == 'bool':
return 'false'
return ""
#used2
def takes_allocator(type):
"""
Returns whether or not a type can take an allocator in its constructor. False for all builtin types except string.
True for all others.
@param type: The type
@type: str
"""
return not type in ['byte', 'int8', 'int16', 'int32', 'int64',
'char', 'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64', 'bool', 'time', 'duration']
def escape_string(str):
str = str.replace('\\', '\\\\')
str = str.replace('"', '\\"')
return str
#used
def generate_fixed_length_assigns(spec, container_gets_allocator, idl_name_prefix):
"""
Initialize any fixed-length arrays
@param s: The stream to write to
@type s: stream
@param spec: The message spec
@type spec: genmsg.msgs.MsgSpec
@param container_gets_allocator: Whether or not a container type (whether it's another message, a vector, array or string)
should have the allocator passed to its constructor. Assumes the allocator is named _alloc.
@type container_gets_allocator: bool
@param idl_name_prefix: The C++ prefix to use when referring to the message, e.g. "std_msgs::"
@type idl_name_prefix: str
"""
# Assign all fixed-length arrays their default values
for field in spec.parsed_fields():
if (not field.is_array or field.array_len is None):
continue
val = default_value(field.base_type)
if (container_gets_allocator and takes_allocator(field.base_type)):
# String is a special case, as it is the only builtin type that takes an allocator
if (field.base_type == "string"):
string_idl = msg_type_to_idl("string")
yield ' %s.assign(%s(_alloc));\n'%(field.name, string_idl)
else:
(idl_msg_unqualified, idl_msg_with_alloc, _) = idl_message_declarations(idl_name_prefix, field.base_type)
yield ' %s.assign(%s(_alloc));\n'%(field.name, idl_msg_with_alloc)
elif (len(val) > 0):
yield ' %s.assign(%s);\n'%(field.name, val)
#used
def generate_initializer_list(spec, container_gets_allocator):
"""
Writes the initializer list for a constructor
@param s: The stream to write to
@type s: stream
@param spec: The message spec
@type spec: genmsg.msgs.MsgSpec
@param container_gets_allocator: Whether or not a container type (whether it's another message, a vector, array or string)
should have the allocator passed to its constructor. Assumes the allocator is named _alloc.
@type container_gets_allocator: bool
"""
op = ':'
for field in spec.parsed_fields():
val = default_value(field.base_type)
use_alloc = takes_allocator(field.base_type)
if (field.is_array):
if (field.array_len is None and container_gets_allocator):
yield ' %s %s(_alloc)'%(op, field.name)
else:
yield ' %s %s()'%(op, field.name)
else:
if (container_gets_allocator and use_alloc):
yield ' %s %s(_alloc)'%(op, field.name)
else:
yield ' %s %s(%s)'%(op, field.name, val)
op = ','
|
osrf/ros_dds
|
prototype/src/genidl/src/genidl/__init__.py
|
Python
|
apache-2.0
| 7,660
|
import logging
import bernhard
from logging import Handler
from bernhard import Client
from bernhard import TransportError
TCPTransport = bernhard.TCPTransport
UDPTransport = bernhard.UDPTransport
bernhard.log.addHandler(logging.NullHandler())
class ConnectionError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class RiemannHandler(Handler):
def __init__(self, host='127.0.0.1', port=5555, transport=TCPTransport):
Handler.__init__(self)
self.client = Client(host, port, transport)
try:
conn = transport(host, port)
conn.close()
except TransportError:
raise ConnectionError("Could not connect to Riemann server.")
def emit(self, record):
try:
self.client.send(record.event)
except TransportError:
raise ConnectionError("Connection to Riemann server broken.")
|
my-zhang/elogging
|
elogging/handlers/riemann.py
|
Python
|
apache-2.0
| 951
|
"""
Returns the X and Y (presumably longitude,latitude)
coordinates from the points in the indicated shapefile.
The missing value separates coordinates between shapes.
"""
from __future__ import print_function
import numpy
import pyferret
import shapefile
def ferret_init(efid):
"""
Initialization for the shapefile_readxy PyEF
"""
retdict = { "numargs": 2,
"descript": "Returns X,Y coordinates of shapes from shapefile. "
"Missing value separates shapes.",
"restype": pyferret.FLOAT_ARRAY,
"axes": ( pyferret.AXIS_ABSTRACT,
pyferret.AXIS_ABSTRACT,
pyferret.AXIS_DOES_NOT_EXIST,
pyferret.AXIS_DOES_NOT_EXIST,
pyferret.AXIS_DOES_NOT_EXIST,
pyferret.AXIS_DOES_NOT_EXIST, ),
"argnames": ( "SHAPEFILE", "MAXPTS", ),
"argdescripts": ( "Shapefile name (any extension given is ignored)",
"Max. number of points to return (-1 for all, but reads shapefile twice)", ),
"argtypes": ( pyferret.STRING_ONEVAL,
pyferret.FLOAT_ONEVAL, ),
"influences": ( (False, False, False, False, False, False),
(False, False, False, False, False, False), ),
}
return retdict
def ferret_result_limits(efid):
"""
Abstract axis limits for the shapefile_readxy PyEF
"""
maxpts = pyferret.get_arg_one_val(efid, pyferret.ARG2)
maxpts = int(maxpts)
if maxpts == -1:
shapefile_name = pyferret.get_arg_one_val(efid, pyferret.ARG1)
sf = shapefile.Reader(shapefile_name)
maxpts = 0
for shp in sf.shapes():
maxpts += len(shp.points) + 1
elif maxpts < 1:
raise ValueError("MAXPTS must be a positive integer or -1")
return ( (1, maxpts), (1, 2), None, None, None, None, )
def ferret_compute(efid, result, resbdf, inputs, inpbdfs):
"""
Read the shapefile named in inputs[0] and assign result[:,0,0,0]
with the X coordinates, and result[:,1,0,0] with the Y coordinates
of the shapes contained in the shapefile. The missing value,
resbdf, is assigned as the coordinates of a point separating
different shapes.
"""
result[:,:,:,:,:,:] = resbdf
sf = shapefile.Reader(inputs[0])
try:
pt_index = 0
for shp in sf.shapes():
for pt in shp.points:
result[pt_index,:2,0,0,0,0] = pt[:2]
pt_index += 1
# missing value coordinates (already assigned) separating shapes
pt_index += 1
except IndexError:
# hit the maximum number of points
pass
#
# The rest of this is for testing from the command line
#
if __name__ == "__main__":
import time
# make sure ferret_init does not cause problems
info = ferret_init(0)
resbdf = numpy.array([-9999.0], dtype=numpy.float64)
inpbdfs = numpy.array([-8888.0, -7777.0], dtype=numpy.float64)
maxpts = 3200 * 2400
result = -6666.0 * numpy.ones((maxpts,2,1,1,1,1), dtype=numpy.float64, order='F')
print("ferret_compute start: time = %s" % time.asctime())
ferret_compute(0, result, resbdf, ("tl_2010_us_county10", maxpts, ), inpbdfs)
print("ferret_compute done; time = %s" % time.asctime())
good_x = numpy.logical_and((-180.0 <= result[:,0,0,0,0,0]), (result[:,0,0,0,0,0] <= -65.0))
good_x = numpy.logical_or(good_x,
numpy.logical_and((172.0 <= result[:,0,0,0,0,0]), (result[:,0,0,0,0,0] <= 180.0)))
good_y = numpy.logical_and((17.0 <= result[:,1,0,0,0,0]), (result[:,1,0,0,0,0] <= 72.0))
if numpy.logical_xor(good_x, good_y).any():
raise ValueError("good_x != good_y")
missing_x = ( result[:,0,0,0,0,0] == resbdf )
if numpy.logical_xor(good_x, numpy.logical_not(missing_x)).any():
raise ValueError("good_x != not missing_x")
missing_y = ( result[:,1,0,0,0,0] == resbdf )
if numpy.logical_xor(good_y, numpy.logical_not(missing_y)).any():
raise ValueError("good_y != not missing_y")
count = 0
at_end = False
shape_num = 0
total = 0
for k in range(result.shape[0]):
if missing_x[k]:
if count == 0:
at_end = True
else:
# print "Count[%d] = %d" % (shape_num, count)
shape_num += 1
total += count + 1
count = 0
elif at_end:
raise ValueError("More than one missing value between shapes")
else:
count += 1
total += count
print("total (including missing-value separators) = %d" % total)
print("out of a maximum of %d" % result.shape[0])
print("number of shapes = %d" % shape_num)
print("shapefile_readxy: SUCCESS")
|
NOAA-PMEL/PyFerret
|
pyfermod/fershp/shapefile_readxy.py
|
Python
|
unlicense
| 4,929
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.