text stringlengths 6 947k | repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1
value | license stringclasses 15
values | size int64 6 947k | score float64 0 0.34 |
|---|---|---|---|---|---|---|
"""Constant definitions for testing purposes."""
from bddbot.config import TEST_COMMAND
BANK_PATH_1 = "banks/first.bank"
BANK_PATH_2 = "banks/second.bank"
FEATURE_PATH_1 = BANK_PATH_1.replace("bank", "feature")
FEATURE_PATH_2 = BANK_PATH_2.replace("bank", "feature")
(HOST, PORT) = ("bank_server", 0xBDD)
CLIENT = "client"
DEFAULT_TEST_COMMANDS = [TEST_COMMAND, ]
| nivbend/bdd_bot | bddbot/test/constants.py | Python | mit | 367 | 0 |
"""
The `ModelSerializer` and `HyperlinkedModelSerializer` classes are essentially
shortcuts for automatically creating serializers based on a given model class.
These tests deal with ensuring that we correctly map the model fields onto
an appropriate set of serializer fields for each case.
"""
from __future__ import unicode_literals
import decimal
from collections import OrderedDict
import pytest
from django.core.exceptions import ImproperlyConfigured
from django.core.validators import (
MaxValueValidator, MinLengthValidator, MinValueValidator
)
from django.db import models
from django.db.models import DurationField as ModelDurationField
from django.test import TestCase
from django.utils import six
from rest_framework import serializers
from rest_framework.compat import set_many, unicode_repr
def dedent(blocktext):
return '\n'.join([line[12:] for line in blocktext.splitlines()[1:-1]])
# Tests for regular field mappings.
# ---------------------------------
class CustomField(models.Field):
"""
A custom model field simply for testing purposes.
"""
pass
class OneFieldModel(models.Model):
char_field = models.CharField(max_length=100)
class RegularFieldsModel(models.Model):
"""
A model class for testing regular flat fields.
"""
auto_field = models.AutoField(primary_key=True)
big_integer_field = models.BigIntegerField()
boolean_field = models.BooleanField(default=False)
char_field = models.CharField(max_length=100)
comma_separated_integer_field = models.CommaSeparatedIntegerField(max_length=100)
date_field = models.DateField()
datetime_field = models.DateTimeField()
decimal_field = models.DecimalField(max_digits=3, decimal_places=1)
email_field = models.EmailField(max_length=100)
float_field = models.FloatField()
integer_field = models.IntegerField()
null_boolean_field = models.NullBooleanField()
positive_integer_field = models.PositiveIntegerField()
positive_small_integer_field = models.PositiveSmallIntegerField()
slug_field = models.SlugField(max_length=100)
small_integer_field = models.SmallIntegerField()
text_field = models.TextField(max_length=100)
file_field = models.FileField(max_length=100)
time_field = models.TimeField()
url_field = models.URLField(max_length=100)
custom_field = CustomField()
file_path_field = models.FilePathField(path='/tmp/')
def method(self):
return 'method'
COLOR_CHOICES = (('red', 'Red'), ('blue', 'Blue'), ('green', 'Green'))
DECIMAL_CHOICES = (('low', decimal.Decimal('0.1')), ('medium', decimal.Decimal('0.5')), ('high', decimal.Decimal('0.9')))
class FieldOptionsModel(models.Model):
value_limit_field = models.IntegerField(validators=[MinValueValidator(1), MaxValueValidator(10)])
length_limit_field = models.CharField(validators=[MinLengthValidator(3)], max_length=12)
blank_field = models.CharField(blank=True, max_length=10)
null_field = models.IntegerField(null=True)
default_field = models.IntegerField(default=0)
descriptive_field = models.IntegerField(help_text='Some help text', verbose_name='A label')
choices_field = models.CharField(max_length=100, choices=COLOR_CHOICES)
class ChoicesModel(models.Model):
choices_field_with_nonstandard_args = models.DecimalField(max_digits=3, decimal_places=1, choices=DECIMAL_CHOICES, verbose_name='A label')
class Issue3674ParentModel(models.Model):
title = models.CharField(max_length=64)
class Issue3674ChildModel(models.Model):
parent = models.ForeignKey(Issue3674ParentModel, related_name='children', on_delete=models.CASCADE)
value = models.CharField(primary_key=True, max_length=64)
class UniqueChoiceModel(models.Model):
CHOICES = (
('choice1', 'choice 1'),
('choice2', 'choice 1'),
)
name = models.CharField(max_length=254, unique=True, choices=CHOICES)
class TestModelSerializer(TestCase):
def test_create_method(self):
class TestSerializer(serializers.ModelSerializer):
non_model_field = serializers.CharField()
class Meta:
model = OneFieldModel
fields = ('char_field', 'non_model_field')
serializer = TestSerializer(data={
'char_field': 'foo',
'non_model_field': 'bar',
})
serializer.is_valid()
with self.assertRaises(TypeError) as excinfo:
serializer.save()
msginitial = 'Got a `TypeError` when calling `OneFieldModel.objects.create()`.'
assert str(excinfo.exception).startswith(msginitial)
def test_abstract_model(self):
"""
Test that trying to use ModelSerializer with Abstract Models
throws a ValueError exception.
"""
class AbstractModel(models.Model):
afield = models.CharField(max_length=255)
class Meta:
abstract = True
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = AbstractModel
fields = ('afield',)
serializer = TestSerializer(data={
'afield': 'foo',
})
with self.assertRaises(ValueError) as excinfo:
serializer.is_valid()
msginitial = 'Cannot use ModelSerializer with Abstract Models.'
assert str(excinfo.exception).startswith(msginitial)
class TestRegularFieldMappings(TestCase):
def test_regular_fields(self):
"""
Model fields should map to their equivalent serializer fields.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = '__all__'
expected = dedent("""
TestSerializer():
auto_field = IntegerField(read_only=True)
big_integer_field = IntegerField()
boolean_field = BooleanField(required=False)
char_field = CharField(max_length=100)
comma_separated_integer_field = CharField(max_length=100, validators=[<django.core.validators.RegexValidator object>])
date_field = DateField()
datetime_field = DateTimeField()
decimal_field = DecimalField(decimal_places=1, max_digits=3)
email_field = EmailField(max_length=100)
float_field = FloatField()
integer_field = IntegerField()
null_boolean_field = NullBooleanField(required=False)
positive_integer_field = IntegerField()
positive_small_integer_field = IntegerField()
slug_field = SlugField(max_length=100)
small_integer_field = IntegerField()
text_field = CharField(max_length=100, style={'base_template': 'textarea.html'})
file_field = FileField(max_length=100)
time_field = TimeField()
url_field = URLField(max_length=100)
custom_field = ModelField(model_field=<tests.test_model_serializer.CustomField: custom_field>)
file_path_field = FilePathField(path='/tmp/')
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_field_options(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = FieldOptionsModel
fields = '__all__'
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
value_limit_field = IntegerField(max_value=10, min_value=1)
length_limit_field = CharField(max_length=12, min_length=3)
blank_field = CharField(allow_blank=True, max_length=10, required=False)
null_field = IntegerField(allow_null=True, required=False)
default_field = IntegerField(required=False)
descriptive_field = IntegerField(help_text='Some help text', label='A label')
choices_field = ChoiceField(choices=(('red', 'Red'), ('blue', 'Blue'), ('green', 'Green')))
""")
if six.PY2:
# This particular case is too awkward to resolve fully across
# both py2 and py3.
expected = expected.replace(
"('red', 'Red'), ('blue', 'Blue'), ('green', 'Green')",
"(u'red', u'Red'), (u'blue', u'Blue'), (u'green', u'Green')"
)
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_method_field(self):
"""
Properties and methods on the model should be allowed as `Meta.fields`
values, and should map to `ReadOnlyField`.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = ('auto_field', 'method')
expected = dedent("""
TestSerializer():
auto_field = IntegerField(read_only=True)
method = ReadOnlyField()
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_pk_fields(self):
"""
Both `pk` and the actual primary key name are valid in `Meta.fields`.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = ('pk', 'auto_field')
expected = dedent("""
TestSerializer():
pk = IntegerField(label='Auto field', read_only=True)
auto_field = IntegerField(read_only=True)
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_extra_field_kwargs(self):
"""
Ensure `extra_kwargs` are passed to generated fields.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = ('auto_field', 'char_field')
extra_kwargs = {'char_field': {'default': 'extra'}}
expected = dedent("""
TestSerializer():
auto_field = IntegerField(read_only=True)
char_field = CharField(default='extra', max_length=100)
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_extra_field_kwargs_required(self):
"""
Ensure `extra_kwargs` are passed to generated fields.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = ('auto_field', 'char_field')
extra_kwargs = {'auto_field': {'required': False, 'read_only': False}}
expected = dedent("""
TestSerializer():
auto_field = IntegerField(read_only=False, required=False)
char_field = CharField(max_length=100)
""")
self.assertEqual(repr(TestSerializer()), expected)
def test_invalid_field(self):
"""
Field names that do not map to a model field or relationship should
raise a configuration errror.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = ('auto_field', 'invalid')
with self.assertRaises(ImproperlyConfigured) as excinfo:
TestSerializer().fields
expected = 'Field name `invalid` is not valid for model `RegularFieldsModel`.'
assert str(excinfo.exception) == expected
def test_missing_field(self):
"""
Fields that have been declared on the serializer class must be included
in the `Meta.fields` if it exists.
"""
class TestSerializer(serializers.ModelSerializer):
missing = serializers.ReadOnlyField()
class Meta:
model = RegularFieldsModel
fields = ('auto_field',)
with self.assertRaises(AssertionError) as excinfo:
TestSerializer().fields
expected = (
"The field 'missing' was declared on serializer TestSerializer, "
"but has not been included in the 'fields' option."
)
assert str(excinfo.exception) == expected
def test_missing_superclass_field(self):
"""
Fields that have been declared on a parent of the serializer class may
be excluded from the `Meta.fields` option.
"""
class TestSerializer(serializers.ModelSerializer):
missing = serializers.ReadOnlyField()
class Meta:
model = RegularFieldsModel
fields = '__all__'
class ChildSerializer(TestSerializer):
missing = serializers.ReadOnlyField()
class Meta:
model = RegularFieldsModel
fields = ('auto_field',)
ChildSerializer().fields
def test_choices_with_nonstandard_args(self):
class ExampleSerializer(serializers.ModelSerializer):
class Meta:
model = ChoicesModel
fields = '__all__'
ExampleSerializer()
def test_fields_and_exclude_behavior(self):
class ImplicitFieldsSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = '__all__'
class ExplicitFieldsSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = '__all__'
implicit = ImplicitFieldsSerializer()
explicit = ExplicitFieldsSerializer()
assert implicit.data == explicit.data
class TestDurationFieldMapping(TestCase):
def test_duration_field(self):
class DurationFieldModel(models.Model):
"""
A model that defines DurationField.
"""
duration_field = ModelDurationField()
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = DurationFieldModel
fields = '__all__'
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
duration_field = DurationField()
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
class TestGenericIPAddressFieldValidation(TestCase):
def test_ip_address_validation(self):
class IPAddressFieldModel(models.Model):
address = models.GenericIPAddressField()
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = IPAddressFieldModel
fields = '__all__'
s = TestSerializer(data={'address': 'not an ip address'})
self.assertFalse(s.is_valid())
self.assertEqual(1, len(s.errors['address']),
'Unexpected number of validation errors: '
'{0}'.format(s.errors))
# Tests for relational field mappings.
# ------------------------------------
class ForeignKeyTargetModel(models.Model):
name = models.CharField(max_length=100)
class ManyToManyTargetModel(models.Model):
name = models.CharField(max_length=100)
class OneToOneTargetModel(models.Model):
name = models.CharField(max_length=100)
class ThroughTargetModel(models.Model):
name = models.CharField(max_length=100)
class Supplementary(models.Model):
extra = models.IntegerField()
forwards = models.ForeignKey('ThroughTargetModel', on_delete=models.CASCADE)
backwards = models.ForeignKey('RelationalModel', on_delete=models.CASCADE)
class RelationalModel(models.Model):
foreign_key = models.ForeignKey(ForeignKeyTargetModel, related_name='reverse_foreign_key', on_delete=models.CASCADE)
many_to_many = models.ManyToManyField(ManyToManyTargetModel, related_name='reverse_many_to_many')
one_to_one = models.OneToOneField(OneToOneTargetModel, related_name='reverse_one_to_one', on_delete=models.CASCADE)
through = models.ManyToManyField(ThroughTargetModel, through=Supplementary, related_name='reverse_through')
class UniqueTogetherModel(models.Model):
foreign_key = models.ForeignKey(ForeignKeyTargetModel, related_name='unique_foreign_key', on_delete=models.CASCADE)
one_to_one = models.OneToOneField(OneToOneTargetModel, related_name='unique_one_to_one', on_delete=models.CASCADE)
class Meta:
unique_together = ("foreign_key", "one_to_one")
class TestRelationalFieldMappings(TestCase):
def test_pk_relations(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RelationalModel
fields = '__all__'
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
foreign_key = PrimaryKeyRelatedField(queryset=ForeignKeyTargetModel.objects.all())
one_to_one = PrimaryKeyRelatedField(queryset=OneToOneTargetModel.objects.all(), validators=[<UniqueValidator(queryset=RelationalModel.objects.all())>])
many_to_many = PrimaryKeyRelatedField(allow_empty=False, many=True, queryset=ManyToManyTargetModel.objects.all())
through = PrimaryKeyRelatedField(many=True, read_only=True)
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_nested_relations(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RelationalModel
depth = 1
fields = '__all__'
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
foreign_key = NestedSerializer(read_only=True):
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
one_to_one = NestedSerializer(read_only=True):
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
many_to_many = NestedSerializer(many=True, read_only=True):
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
through = NestedSerializer(many=True, read_only=True):
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_hyperlinked_relations(self):
class TestSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = RelationalModel
fields = '__all__'
expected = dedent("""
TestSerializer():
url = HyperlinkedIdentityField(view_name='relationalmodel-detail')
foreign_key = HyperlinkedRelatedField(queryset=ForeignKeyTargetModel.objects.all(), view_name='foreignkeytargetmodel-detail')
one_to_one = HyperlinkedRelatedField(queryset=OneToOneTargetModel.objects.all(), validators=[<UniqueValidator(queryset=RelationalModel.objects.all())>], view_name='onetoonetargetmodel-detail')
many_to_many = HyperlinkedRelatedField(allow_empty=False, many=True, queryset=ManyToManyTargetModel.objects.all(), view_name='manytomanytargetmodel-detail')
through = HyperlinkedRelatedField(many=True, read_only=True, view_name='throughtargetmodel-detail')
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_nested_hyperlinked_relations(self):
class TestSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = RelationalModel
depth = 1
fields = '__all__'
expected = dedent("""
TestSerializer():
url = HyperlinkedIdentityField(view_name='relationalmodel-detail')
foreign_key = NestedSerializer(read_only=True):
url = HyperlinkedIdentityField(view_name='foreignkeytargetmodel-detail')
name = CharField(max_length=100)
one_to_one = NestedSerializer(read_only=True):
url = HyperlinkedIdentityField(view_name='onetoonetargetmodel-detail')
name = CharField(max_length=100)
many_to_many = NestedSerializer(many=True, read_only=True):
url = HyperlinkedIdentityField(view_name='manytomanytargetmodel-detail')
name = CharField(max_length=100)
through = NestedSerializer(many=True, read_only=True):
url = HyperlinkedIdentityField(view_name='throughtargetmodel-detail')
name = CharField(max_length=100)
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_nested_hyperlinked_relations_starred_source(self):
class TestSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = RelationalModel
depth = 1
fields = '__all__'
extra_kwargs = {
'url': {
'source': '*',
}}
expected = dedent("""
TestSerializer():
url = HyperlinkedIdentityField(source='*', view_name='relationalmodel-detail')
foreign_key = NestedSerializer(read_only=True):
url = HyperlinkedIdentityField(view_name='foreignkeytargetmodel-detail')
name = CharField(max_length=100)
one_to_one = NestedSerializer(read_only=True):
url = HyperlinkedIdentityField(view_name='onetoonetargetmodel-detail')
name = CharField(max_length=100)
many_to_many = NestedSerializer(many=True, read_only=True):
url = HyperlinkedIdentityField(view_name='manytomanytargetmodel-detail')
name = CharField(max_length=100)
through = NestedSerializer(many=True, read_only=True):
url = HyperlinkedIdentityField(view_name='throughtargetmodel-detail')
name = CharField(max_length=100)
""")
self.maxDiff = None
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_nested_unique_together_relations(self):
class TestSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = UniqueTogetherModel
depth = 1
fields = '__all__'
expected = dedent("""
TestSerializer():
url = HyperlinkedIdentityField(view_name='uniquetogethermodel-detail')
foreign_key = NestedSerializer(read_only=True):
url = HyperlinkedIdentityField(view_name='foreignkeytargetmodel-detail')
name = CharField(max_length=100)
one_to_one = NestedSerializer(read_only=True):
url = HyperlinkedIdentityField(view_name='onetoonetargetmodel-detail')
name = CharField(max_length=100)
""")
if six.PY2:
# This case is also too awkward to resolve fully across both py2
# and py3. (See above)
expected = expected.replace(
"('foreign_key', 'one_to_one')",
"(u'foreign_key', u'one_to_one')"
)
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_pk_reverse_foreign_key(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = ForeignKeyTargetModel
fields = ('id', 'name', 'reverse_foreign_key')
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
reverse_foreign_key = PrimaryKeyRelatedField(many=True, queryset=RelationalModel.objects.all())
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_pk_reverse_one_to_one(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = OneToOneTargetModel
fields = ('id', 'name', 'reverse_one_to_one')
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
reverse_one_to_one = PrimaryKeyRelatedField(queryset=RelationalModel.objects.all())
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_pk_reverse_many_to_many(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = ManyToManyTargetModel
fields = ('id', 'name', 'reverse_many_to_many')
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
reverse_many_to_many = PrimaryKeyRelatedField(many=True, queryset=RelationalModel.objects.all())
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
def test_pk_reverse_through(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = ThroughTargetModel
fields = ('id', 'name', 'reverse_through')
expected = dedent("""
TestSerializer():
id = IntegerField(label='ID', read_only=True)
name = CharField(max_length=100)
reverse_through = PrimaryKeyRelatedField(many=True, read_only=True)
""")
self.assertEqual(unicode_repr(TestSerializer()), expected)
class DisplayValueTargetModel(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return '%s Color' % (self.name)
class DisplayValueModel(models.Model):
color = models.ForeignKey(DisplayValueTargetModel, on_delete=models.CASCADE)
class TestRelationalFieldDisplayValue(TestCase):
def setUp(self):
DisplayValueTargetModel.objects.bulk_create([
DisplayValueTargetModel(name='Red'),
DisplayValueTargetModel(name='Yellow'),
DisplayValueTargetModel(name='Green'),
])
def test_default_display_value(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = DisplayValueModel
fields = '__all__'
serializer = TestSerializer()
expected = OrderedDict([(1, 'Red Color'), (2, 'Yellow Color'), (3, 'Green Color')])
self.assertEqual(serializer.fields['color'].choices, expected)
def test_custom_display_value(self):
class TestField(serializers.PrimaryKeyRelatedField):
def display_value(self, instance):
return 'My %s Color' % (instance.name)
class TestSerializer(serializers.ModelSerializer):
color = TestField(queryset=DisplayValueTargetModel.objects.all())
class Meta:
model = DisplayValueModel
fields = '__all__'
serializer = TestSerializer()
expected = OrderedDict([(1, 'My Red Color'), (2, 'My Yellow Color'), (3, 'My Green Color')])
self.assertEqual(serializer.fields['color'].choices, expected)
class TestIntegration(TestCase):
def setUp(self):
self.foreign_key_target = ForeignKeyTargetModel.objects.create(
name='foreign_key'
)
self.one_to_one_target = OneToOneTargetModel.objects.create(
name='one_to_one'
)
self.many_to_many_targets = [
ManyToManyTargetModel.objects.create(
name='many_to_many (%d)' % idx
) for idx in range(3)
]
self.instance = RelationalModel.objects.create(
foreign_key=self.foreign_key_target,
one_to_one=self.one_to_one_target,
)
set_many(self.instance, 'many_to_many', self.many_to_many_targets)
self.instance.save()
def test_pk_retrival(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RelationalModel
fields = '__all__'
serializer = TestSerializer(self.instance)
expected = {
'id': self.instance.pk,
'foreign_key': self.foreign_key_target.pk,
'one_to_one': self.one_to_one_target.pk,
'many_to_many': [item.pk for item in self.many_to_many_targets],
'through': []
}
self.assertEqual(serializer.data, expected)
def test_pk_create(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RelationalModel
fields = '__all__'
new_foreign_key = ForeignKeyTargetModel.objects.create(
name='foreign_key'
)
new_one_to_one = OneToOneTargetModel.objects.create(
name='one_to_one'
)
new_many_to_many = [
ManyToManyTargetModel.objects.create(
name='new many_to_many (%d)' % idx
) for idx in range(3)
]
data = {
'foreign_key': new_foreign_key.pk,
'one_to_one': new_one_to_one.pk,
'many_to_many': [item.pk for item in new_many_to_many],
}
# Serializer should validate okay.
serializer = TestSerializer(data=data)
assert serializer.is_valid()
# Creating the instance, relationship attributes should be set.
instance = serializer.save()
assert instance.foreign_key.pk == new_foreign_key.pk
assert instance.one_to_one.pk == new_one_to_one.pk
assert [
item.pk for item in instance.many_to_many.all()
] == [
item.pk for item in new_many_to_many
]
assert list(instance.through.all()) == []
# Representation should be correct.
expected = {
'id': instance.pk,
'foreign_key': new_foreign_key.pk,
'one_to_one': new_one_to_one.pk,
'many_to_many': [item.pk for item in new_many_to_many],
'through': []
}
self.assertEqual(serializer.data, expected)
def test_pk_update(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RelationalModel
fields = '__all__'
new_foreign_key = ForeignKeyTargetModel.objects.create(
name='foreign_key'
)
new_one_to_one = OneToOneTargetModel.objects.create(
name='one_to_one'
)
new_many_to_many = [
ManyToManyTargetModel.objects.create(
name='new many_to_many (%d)' % idx
) for idx in range(3)
]
data = {
'foreign_key': new_foreign_key.pk,
'one_to_one': new_one_to_one.pk,
'many_to_many': [item.pk for item in new_many_to_many],
}
# Serializer should validate okay.
serializer = TestSerializer(self.instance, data=data)
assert serializer.is_valid()
# Creating the instance, relationship attributes should be set.
instance = serializer.save()
assert instance.foreign_key.pk == new_foreign_key.pk
assert instance.one_to_one.pk == new_one_to_one.pk
assert [
item.pk for item in instance.many_to_many.all()
] == [
item.pk for item in new_many_to_many
]
assert list(instance.through.all()) == []
# Representation should be correct.
expected = {
'id': self.instance.pk,
'foreign_key': new_foreign_key.pk,
'one_to_one': new_one_to_one.pk,
'many_to_many': [item.pk for item in new_many_to_many],
'through': []
}
self.assertEqual(serializer.data, expected)
# Tests for bulk create using `ListSerializer`.
class BulkCreateModel(models.Model):
name = models.CharField(max_length=10)
class TestBulkCreate(TestCase):
def test_bulk_create(self):
class BasicModelSerializer(serializers.ModelSerializer):
class Meta:
model = BulkCreateModel
fields = ('name',)
class BulkCreateSerializer(serializers.ListSerializer):
child = BasicModelSerializer()
data = [{'name': 'a'}, {'name': 'b'}, {'name': 'c'}]
serializer = BulkCreateSerializer(data=data)
assert serializer.is_valid()
# Objects are returned by save().
instances = serializer.save()
assert len(instances) == 3
assert [item.name for item in instances] == ['a', 'b', 'c']
# Objects have been created in the database.
assert BulkCreateModel.objects.count() == 3
assert list(BulkCreateModel.objects.values_list('name', flat=True)) == ['a', 'b', 'c']
# Serializer returns correct data.
assert serializer.data == data
class MetaClassTestModel(models.Model):
text = models.CharField(max_length=100)
class TestSerializerMetaClass(TestCase):
def test_meta_class_fields_option(self):
class ExampleSerializer(serializers.ModelSerializer):
class Meta:
model = MetaClassTestModel
fields = 'text'
with self.assertRaises(TypeError) as result:
ExampleSerializer().fields
exception = result.exception
assert str(exception).startswith(
"The `fields` option must be a list or tuple"
)
def test_meta_class_exclude_option(self):
class ExampleSerializer(serializers.ModelSerializer):
class Meta:
model = MetaClassTestModel
exclude = 'text'
with self.assertRaises(TypeError) as result:
ExampleSerializer().fields
exception = result.exception
assert str(exception).startswith(
"The `exclude` option must be a list or tuple"
)
def test_meta_class_fields_and_exclude_options(self):
class ExampleSerializer(serializers.ModelSerializer):
class Meta:
model = MetaClassTestModel
fields = ('text',)
exclude = ('text',)
with self.assertRaises(AssertionError) as result:
ExampleSerializer().fields
exception = result.exception
self.assertEqual(
str(exception),
"Cannot set both 'fields' and 'exclude' options on serializer ExampleSerializer."
)
class Issue2704TestCase(TestCase):
def test_queryset_all(self):
class TestSerializer(serializers.ModelSerializer):
additional_attr = serializers.CharField()
class Meta:
model = OneFieldModel
fields = ('char_field', 'additional_attr')
OneFieldModel.objects.create(char_field='abc')
qs = OneFieldModel.objects.all()
for o in qs:
o.additional_attr = '123'
serializer = TestSerializer(instance=qs, many=True)
expected = [{
'char_field': 'abc',
'additional_attr': '123',
}]
assert serializer.data == expected
class DecimalFieldModel(models.Model):
decimal_field = models.DecimalField(
max_digits=3,
decimal_places=1,
validators=[MinValueValidator(1), MaxValueValidator(3)]
)
class TestDecimalFieldMappings(TestCase):
def test_decimal_field_has_decimal_validator(self):
"""
Test that a `DecimalField` has no `DecimalValidator`.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = DecimalFieldModel
fields = '__all__'
serializer = TestSerializer()
assert len(serializer.fields['decimal_field'].validators) == 2
def test_min_value_is_passed(self):
"""
Test that the `MinValueValidator` is converted to the `min_value`
argument for the field.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = DecimalFieldModel
fields = '__all__'
serializer = TestSerializer()
assert serializer.fields['decimal_field'].min_value == 1
def test_max_value_is_passed(self):
"""
Test that the `MaxValueValidator` is converted to the `max_value`
argument for the field.
"""
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = DecimalFieldModel
fields = '__all__'
serializer = TestSerializer()
assert serializer.fields['decimal_field'].max_value == 3
class TestMetaInheritance(TestCase):
def test_extra_kwargs_not_altered(self):
class TestSerializer(serializers.ModelSerializer):
non_model_field = serializers.CharField()
class Meta:
model = OneFieldModel
read_only_fields = ('char_field', 'non_model_field')
fields = read_only_fields
extra_kwargs = {}
class ChildSerializer(TestSerializer):
class Meta(TestSerializer.Meta):
read_only_fields = ()
test_expected = dedent("""
TestSerializer():
char_field = CharField(read_only=True)
non_model_field = CharField()
""")
child_expected = dedent("""
ChildSerializer():
char_field = CharField(max_length=100)
non_model_field = CharField()
""")
self.assertEqual(unicode_repr(ChildSerializer()), child_expected)
self.assertEqual(unicode_repr(TestSerializer()), test_expected)
self.assertEqual(unicode_repr(ChildSerializer()), child_expected)
class OneToOneTargetTestModel(models.Model):
text = models.CharField(max_length=100)
class OneToOneSourceTestModel(models.Model):
target = models.OneToOneField(OneToOneTargetTestModel, primary_key=True, on_delete=models.CASCADE)
class TestModelFieldValues(TestCase):
def test_model_field(self):
class ExampleSerializer(serializers.ModelSerializer):
class Meta:
model = OneToOneSourceTestModel
fields = ('target',)
target = OneToOneTargetTestModel(id=1, text='abc')
source = OneToOneSourceTestModel(target=target)
serializer = ExampleSerializer(source)
self.assertEqual(serializer.data, {'target': 1})
class TestUniquenessOverride(TestCase):
def test_required_not_overwritten(self):
class TestModel(models.Model):
field_1 = models.IntegerField(null=True)
field_2 = models.IntegerField()
class Meta:
unique_together = (('field_1', 'field_2'),)
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = TestModel
fields = '__all__'
extra_kwargs = {'field_1': {'required': False}}
fields = TestSerializer().fields
self.assertFalse(fields['field_1'].required)
self.assertTrue(fields['field_2'].required)
class Issue3674Test(TestCase):
def test_nonPK_foreignkey_model_serializer(self):
class TestParentModel(models.Model):
title = models.CharField(max_length=64)
class TestChildModel(models.Model):
parent = models.ForeignKey(TestParentModel, related_name='children', on_delete=models.CASCADE)
value = models.CharField(primary_key=True, max_length=64)
class TestChildModelSerializer(serializers.ModelSerializer):
class Meta:
model = TestChildModel
fields = ('value', 'parent')
class TestParentModelSerializer(serializers.ModelSerializer):
class Meta:
model = TestParentModel
fields = ('id', 'title', 'children')
parent_expected = dedent("""
TestParentModelSerializer():
id = IntegerField(label='ID', read_only=True)
title = CharField(max_length=64)
children = PrimaryKeyRelatedField(many=True, queryset=TestChildModel.objects.all())
""")
self.assertEqual(unicode_repr(TestParentModelSerializer()), parent_expected)
child_expected = dedent("""
TestChildModelSerializer():
value = CharField(max_length=64, validators=[<UniqueValidator(queryset=TestChildModel.objects.all())>])
parent = PrimaryKeyRelatedField(queryset=TestParentModel.objects.all())
""")
self.assertEqual(unicode_repr(TestChildModelSerializer()), child_expected)
def test_nonID_PK_foreignkey_model_serializer(self):
class TestChildModelSerializer(serializers.ModelSerializer):
class Meta:
model = Issue3674ChildModel
fields = ('value', 'parent')
class TestParentModelSerializer(serializers.ModelSerializer):
class Meta:
model = Issue3674ParentModel
fields = ('id', 'title', 'children')
parent = Issue3674ParentModel.objects.create(title='abc')
child = Issue3674ChildModel.objects.create(value='def', parent=parent)
parent_serializer = TestParentModelSerializer(parent)
child_serializer = TestChildModelSerializer(child)
parent_expected = {'children': ['def'], 'id': 1, 'title': 'abc'}
self.assertEqual(parent_serializer.data, parent_expected)
child_expected = {'parent': 1, 'value': 'def'}
self.assertEqual(child_serializer.data, child_expected)
class Issue4897TestCase(TestCase):
def test_should_assert_if_writing_readonly_fields(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = OneFieldModel
fields = ('char_field',)
readonly_fields = fields
obj = OneFieldModel.objects.create(char_field='abc')
with pytest.raises(AssertionError) as cm:
TestSerializer(obj).fields
cm.match(r'readonly_fields')
class Test5004UniqueChoiceField(TestCase):
def test_unique_choice_field(self):
class TestUniqueChoiceSerializer(serializers.ModelSerializer):
class Meta:
model = UniqueChoiceModel
fields = '__all__'
UniqueChoiceModel.objects.create(name='choice1')
serializer = TestUniqueChoiceSerializer(data={'name': 'choice1'})
assert not serializer.is_valid()
assert serializer.errors == {'name': ['unique choice model with this name already exists.']}
| atombrella/django-rest-framework | tests/test_model_serializer.py | Python | bsd-2-clause | 43,267 | 0.00141 |
# Copyright (C) 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Auto-generated file for BH1750FVI v0.1.0.
# Generated from peripherals/BH1750FVI.yaml using Cyanobyte Codegen v0.1.0
"""
Class for BH1750FVI
"""
from adafruit_bus_device.i2c_device import I2CDevice
I2C_ADDRESS_35 = 35
I2C_ADDRESS_92 = 92
class BH1750FVI:
"""
Rohm Light Sensor
"""
REGISTER_CONTINUOUSHRES2MODE = 17
REGISTER_CONTINUOUSHRESMODE = 16
REGISTER_CONTINUOUSLYLRESMODE = 19
REGISTER_ONCEHRES2MODE = 33
REGISTER_ONCEHRESMODE = 32
REGISTER_ONCELRESMODE = 35
REGISTER_POWERDOWN = 0
REGISTER_POWERON = 1
REGISTER_RESET = 7
def __init__(self, i2c, address):
# Initialize connection to peripheral
self.i2c_device = I2CDevice(i2c, address)
self.device_address = address
self._lifecycle_begin()
def set_continuoushres2mode(self):
"""
Start measurement at 0.5lx resolution. Typically 120ms.
"""
buffer = bytearray(0)
with self.i2c_device as i2c:
i2c.write(buffer)
def set_continuoushresmode(self):
"""
Start measurement at 1lx resolution. Typically 120ms.
"""
buffer = bytearray(0)
with self.i2c_device as i2c:
i2c.write(buffer)
def set_continuouslylresmode(self):
"""
Start measurement at 4lx resolution. Typically 16ms.
"""
buffer = bytearray(0)
with self.i2c_device as i2c:
i2c.write(buffer)
def set_oncehres2mode(self):
"""
Start measurement at 0.5lx resolution. Typically 120ms.
Power Down after measurement.
"""
buffer = bytearray(0)
with self.i2c_device as i2c:
i2c.write(buffer)
def set_oncehresmode(self):
"""
Start measurement at 1lx resolution. Typically 120ms.
Power Down after measurement.
"""
buffer = bytearray(0)
with self.i2c_device as i2c:
i2c.write(buffer)
def set_oncelresmode(self):
"""
Start measurement at 4lx resolution. Typically 16ms.
Power Down after measurement.
"""
buffer = bytearray(0)
with self.i2c_device as i2c:
i2c.write(buffer)
def set_powerdown(self):
"""
No active state
"""
buffer = bytearray(0)
with self.i2c_device as i2c:
i2c.write(buffer)
def set_poweron(self):
"""
Waiting for measurement command
"""
buffer = bytearray(0)
with self.i2c_device as i2c:
i2c.write(buffer)
def set_reset(self):
"""
Reset data register value. Not accepted in Power Down mode.
"""
buffer = bytearray(0)
with self.i2c_device as i2c:
i2c.write(buffer)
def _lifecycle_begin(self):
"""
Sends a POWER ON cmd to device
"""
self.set_poweron()
def command_powerdown(self):
"""
Things you can do to device
"""
self.set_powerdown()
def command_reset(self):
"""
Things you can do to device
"""
self.set_poweron()
self.set_reset()
def read_lightintensity(self):
"""
Read light intensity from device
"""
intensity = None # Variable declaration
with self.i2c_device as i2c:
_byte_list = bytearray(2)
i2c.readinto(_byte_list)
intensity = 0
intensity = intensity << 8 | _byte_list[0]
intensity = intensity << 8 | _byte_list[1]
return intensity
| google/cyanobyte | test/sampleData/circuitpython/BH1750FVI.py | Python | apache-2.0 | 4,207 | 0.001426 |
from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from vod import user_views
from vod.alias_id_views import AliasIdListView, AliasIdCreateView, AliasIdUpdateView, AliasIdRetireView
from vod.datatype_views import DataTypeListView, DataTypeCreateView, DataTypeUpdateView, DataTypeRetireView
from vod.institution_views import InstitutionListView, InstitutionCreateView, InstitutionUpdateView, \
InstitutionRetireView
from vod.patient_views import PatientListView, PatientCreateView, PatientUpdateView, PatientRetireView, \
PatientIdentifiersDetailView, PatientAliasCreateView, PatientTransplantCreateView
from vod.transplant_views import TransplantListView, TransplantCreateView, TransplantUpdateView, TransplantRetireView
from vod.data_views import RawDataListView, RawDataProcessingView, DataAnalysisDetailView
from vod.cleansing_views import DataCleansingTemplatesListView, DataCleansingTemplateCreateView, DataCleansingTemplateFieldsUpdateView
from vod.upload_views import UploadListView
from vod.user_views import UserListView, UserCreateView, UserUpdateView, UserRetireView, LoginView
from vod import helper_views
from django.contrib.auth.decorators import login_required
urlpatterns = [
url(r'login', LoginView.as_view(), name='vod-login'),
url(r'logout', user_views.logout, name='vod-logout'),
# url routes for superuser (admin) related views
url(r'^user/list/$', login_required(UserListView.as_view()), name='user-list'),
url(r'^user/create/$', login_required(UserCreateView.as_view()), name='user-create'),
url(r'^user/update/(?P<id>\d+)/$', login_required(UserUpdateView.as_view()), name='user-update'),
url(r'^user/delete/(?P<id>\d+)/$', login_required(UserRetireView.as_view()), name='user-retire'),
url(r'^institution/list/$', login_required(InstitutionListView.as_view()), name='institution-list'),
url(r'^institution/create/$', login_required(InstitutionCreateView.as_view()), name='institution-create'),
url(r'^institution/update/(?P<id>\d+)/$', login_required(InstitutionUpdateView.as_view()), name='institution-update'),
url(r'^institution/delete/(?P<id>\d+)/$', login_required(InstitutionRetireView.as_view()), name='institution-retire'),
url(r'^aliasid/list/$', login_required(AliasIdListView.as_view()), name='alias-id-list'),
url(r'^aliasid/create/$', login_required(AliasIdCreateView.as_view()), name='alias-id-create'),
url(r'^aliasid/update/(?P<id>\d+)/$', login_required(AliasIdUpdateView.as_view()), name='alias-id-update'),
url(r'^aliasid/delete/(?P<id>\d+)/$', login_required(AliasIdRetireView.as_view()), name='alias-id-retire'),
url(r'^datatype/list/$', login_required(DataTypeListView.as_view()), name='datatype-list'),
url(r'^datatype/create/$', login_required(DataTypeCreateView.as_view()), name='datatype-create'),
url(r'^datatype/update/(?P<id>\d+)/$', login_required(DataTypeUpdateView.as_view()), name='datatype-update'),
url(r'^datatype/delete/(?P<id>\d+)/$', login_required(DataTypeRetireView.as_view()), name='datatype-retire'),
url(r'^transplant/list/$', login_required(TransplantListView.as_view()), name='transplant-list'),
url(r'^transplant/create/$', login_required(TransplantCreateView.as_view()), name='transplant-create'),
url(r'^transplant/update/(?P<id>\d+)/$', login_required(TransplantUpdateView.as_view()), name='transplant-update'),
url(r'^transplant/delete/(?P<id>\d+)/$', login_required(TransplantRetireView.as_view()), name='transplant-retire'),
# url routes for staff (normal user) related views
url(r'^upload/list/$', login_required(UploadListView.as_view()), name='upload-list'),
url(r'^patient/list/$', login_required(PatientListView.as_view()), name='patient-list'),
url(r'^patient/create/$', login_required(PatientCreateView.as_view()), name='patient-create'),
url(r'^patient/update/(?P<id>\d+)/$', login_required(PatientUpdateView.as_view()), name='patient-update'),
url(r'^patient/delete/(?P<id>\d+)/$', login_required(PatientRetireView.as_view()), name='patient-retire'),
url(r'^patient/create-alias/(?P<id>\d+)/$', login_required(PatientAliasCreateView.as_view()), name='patient-create-alias'),
url(r'^patient/create-transplant/(?P<id>\d+)/$', login_required(PatientTransplantCreateView.as_view()), name='patient-create-transplant'),
url(r'^patient/detail/(?P<id>\d+)/$', login_required(PatientIdentifiersDetailView.as_view()), name='patient-detail'),
# url routes to view data
url(r'^data/uploaded-raw/$', login_required(RawDataListView.as_view()), name='raw-data-list'),
# url(r'^data/uploaded-raw/complete/(?P<id>\d+)/$', login_required(RawDataProcessingView.as_view()), name='data-complete'),
# url(r'^data/uploaded-raw/valid/(?P<id>\d+)/$', login_required(RawDataProcessingView.as_view()), name='data-valid'),
url(r'^data/detail/(?P<id>\d+)/(?P<tid>\d+)/$', login_required(DataAnalysisDetailView.as_view()), name='data-analysis-detail'),
url(r'^data/cleansing-profile/$', login_required(DataCleansingTemplatesListView.as_view()), name='cleansing-profile-list'),
# url(r'^data/cleansing-profile/create/$', login_required(DataCleansingTemplateCreateView.as_view()), name='cleansing-profile-create'),
# url(r'^data/cleansing-profile/detail/(?P<id>\d+)/$', login_required(DataCleansingTemplateFieldsListView.as_view()), name='cleansing-profile-detail'),
url(r'^data/cleansing-profile/detail/update/(?P<id>\d+)/$', login_required(DataCleansingTemplateFieldsUpdateView.as_view()), name='cleansing-template-field-update'),
# route to helper views
url(r'^ajax/validate_username/$', helper_views.validate_username, name='validate_username'),
url(r'^ajax/cleansing-profile-detail/$', helper_views.dataCleansingTemplateFields_asJSON, name='ajax-cleansing-profile-detail'),
url(r'^ajax/models/$', helper_views.modelsInApp, name='app-models'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | hizni/vod-systems | vod_systems/vod/urls.py | Python | mit | 6,067 | 0.008076 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for VM snapshot operations.
"""
import os
from oslo.config import cfg
from nova.compute import task_states
from nova.image import glance
from nova.openstack.common import log as logging
from nova.virt.hyperv import pathutils
from nova.virt.hyperv import vhdutils
from nova.virt.hyperv import vmutils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class SnapshotOps(object):
def __init__(self):
self._pathutils = pathutils.PathUtils()
self._vmutils = vmutils.VMUtils()
self._vhdutils = vhdutils.VHDUtils()
def _save_glance_image(self, context, name, image_vhd_path):
(glance_image_service,
image_id) = glance.get_remote_image_service(context, name)
image_metadata = {"is_public": False,
"disk_format": "vhd",
"container_format": "bare",
"properties": {}}
with self._pathutils.open(image_vhd_path, 'rb') as f:
glance_image_service.update(context, image_id, image_metadata, f)
def snapshot(self, context, instance, name, update_task_state):
"""Create snapshot from a running VM instance."""
instance_name = instance["name"]
LOG.debug(_("Creating snapshot for instance %s"), instance_name)
snapshot_path = self._vmutils.take_vm_snapshot(instance_name)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
export_dir = None
try:
src_vhd_path = self._pathutils.get_vhd_path(instance_name)
LOG.debug(_("Getting info for VHD %s"), src_vhd_path)
src_base_disk_path = self._vhdutils.get_vhd_parent_path(
src_vhd_path)
export_dir = self._pathutils.get_export_dir(instance_name)
dest_vhd_path = os.path.join(export_dir, os.path.basename(
src_vhd_path))
LOG.debug(_('Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s'),
locals())
self._pathutils.copyfile(src_vhd_path, dest_vhd_path)
image_vhd_path = None
if not src_base_disk_path:
image_vhd_path = dest_vhd_path
else:
basename = os.path.basename(src_base_disk_path)
dest_base_disk_path = os.path.join(export_dir, basename)
LOG.debug(_('Copying base disk %(src_vhd_path)s to '
'%(dest_base_disk_path)s'), locals())
self._pathutils.copyfile(src_base_disk_path,
dest_base_disk_path)
LOG.debug(_("Reconnecting copied base VHD "
"%(dest_base_disk_path)s and diff "
"VHD %(dest_vhd_path)s"), locals())
self._vhdutils.reconnect_parent_vhd(dest_vhd_path,
dest_base_disk_path)
LOG.debug(_("Merging base disk %(dest_base_disk_path)s and "
"diff disk %(dest_vhd_path)s"), locals())
self._vhdutils.merge_vhd(dest_vhd_path, dest_base_disk_path)
image_vhd_path = dest_base_disk_path
LOG.debug(_("Updating Glance image %(image_id)s with content from "
"merged disk %(image_vhd_path)s"), locals())
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
self._save_glance_image(context, name, image_vhd_path)
LOG.debug(_("Snapshot image %(image_id)s updated for VM "
"%(instance_name)s"), locals())
finally:
try:
LOG.debug(_("Removing snapshot %s"), name)
self._vmutils.remove_vm_snapshot(snapshot_path)
except Exception as ex:
LOG.exception(ex)
LOG.warning(_('Failed to remove snapshot for VM %s')
% instance_name)
if export_dir:
LOG.debug(_('Removing directory: %s'), export_dir)
self._pathutils.rmtree(export_dir)
| zestrada/nova-cs498cc | nova/virt/hyperv/snapshotops.py | Python | apache-2.0 | 4,875 | 0 |
import Queue
import atexit
import logging
import threading
import traceback
class WorkerPool(object):
""" Pool of worker threads; grows as necessary. """
_lock = threading.Lock()
_pool = None # Singleton.
def __init__(self):
self._idle = [] # Queues of idle workers.
self._workers = {} # Maps queue to worker.
atexit.register(self.cleanup)
@staticmethod
def get_instance():
""" Return singleton instance. """
with WorkerPool._lock:
if WorkerPool._pool is None:
WorkerPool._pool = WorkerPool()
return WorkerPool._pool
@staticmethod
def cleanup():
""" Cleanup resources (worker threads). """
WorkerPool.get_instance()._cleanup()
def _cleanup(self):
""" Cleanup resources (worker threads). """
with self._lock:
for queue in self._workers:
queue.put((None, None, None, None))
self._workers[queue].join(1)
if self._workers[queue].is_alive():
logging.debug('WorkerPool: worker join timed-out.')
try:
self._idle.remove(queue)
except ValueError:
pass # Never released due to some other issue...
self._idle = []
self._workers = {}
@staticmethod
def get(one_shot=False):
"""
Get a worker queue from the pool. Work requests should be of the form:
``(callable, *args, **kwargs, reply_queue)``
Work replies are of the form:
``(queue, retval, exc, traceback)``
one_shot: bool
If True, the worker will self-release after processing one request.
"""
return WorkerPool.get_instance()._get(one_shot)
def _get(self, one_shot):
""" Get a worker queue from the pool. """
with self._lock:
try:
return self._idle.pop()
except IndexError:
queue = Queue.Queue()
worker = threading.Thread(target=self._service_loop,
args=(queue, one_shot))
worker.daemon = True
worker.start()
self._workers[queue] = worker
return queue
@staticmethod
def release(queue):
"""
Release a worker queue back to the pool.
queue: Queue
Worker queue previously obtained from :meth:`get`.
"""
return WorkerPool.get_instance()._release(queue)
def _release(self, queue):
""" Release a worker queue back to the pool. """
with self._lock:
self._idle.append(queue)
def _service_loop(self, request_q, one_shot):
""" Get (callable, args, kwargs) from request_q and queue result. """
while True:
callable, args, kwargs, reply_q = request_q.get()
if callable is None:
request_q.task_done()
return # Shutdown.
exc = None
trace = None
retval = None
try:
retval = callable(*args, **kwargs)
except Exception as exc:
# Sometimes we have issues at shutdown.
try:
trace = traceback.format_exc()
except Exception: # pragma no cover
return
request_q.task_done()
if reply_q is not None:
reply_q.put((request_q, retval, exc, trace))
if one_shot:
self._release(request_q)
| DailyActie/Surrogate-Model | 01-codes/OpenMDAO-Framework-dev/openmdao.util/src/openmdao/util/wrkpool.py | Python | mit | 3,630 | 0 |
#!/usr/bin/env python
#Copyright (c) <2015>, <Jaakko Leppakangas>
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
#ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#The views and conclusions contained in the software and documentation are those
#of the authors and should not be interpreted as representing official policies,
#either expressed or implied, of the FreeBSD Project.
'''
Created on Dec 16, 2014
@author: Jaakko Leppakangas
'''
import sys
from PyQt4 import QtGui
from ui.preprocessDialog import PreprocessDialog
def main():
app = QtGui.QApplication(sys.argv)
window=PreprocessDialog()
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| jaeilepp/eggie | eggie.py | Python | bsd-2-clause | 1,903 | 0.017867 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate the wide baseline stereo image dataset from the Matterport3D.
We generate the data by randomly sample different perspective views from
panoramic images in Matterport3D to create a large scale dataset with a large
varieties of motion. The dataset contains a pair of perspective images labeled
with the relative rotation from camera 2 to camera 1, and the relative
translation direction in the frame of camera 1.
Matterport3D: https://niessner.github.io/Matterport/
https://arxiv.org/pdf/1709.06158.pdf
"""
import collections
import math
import numpy as np
from pano_utils import math_utils
from pano_utils import transformation
import tensorflow.compat.v1 as tf
def world_to_image_projection(p_world, intrinsics, pose_w2c):
"""Project points in the world frame to the image plane.
Args:
p_world: [HEIGHT, WIDTH, 3] points in the world's coordinate frame.
intrinsics: [3, 3] camera's intrinsic matrix.
pose_w2c: [3, 4] camera pose matrix (world to camera).
Returns:
[HEIGHT, WIDTH, 2] points in the image coordinate.
[HEIGHT, WIDTH, 1] the z depth.
"""
shape = p_world.shape.as_list()
height, width = shape[0], shape[1]
p_world_homogeneous = tf.concat([p_world, tf.ones([height, width, 1])], -1)
p_camera = tf.squeeze(
tf.matmul(pose_w2c[tf.newaxis, tf.newaxis, :],
tf.expand_dims(p_world_homogeneous, -1)), -1)
p_camera = p_camera*tf.constant([1., 1., -1.], shape=[1, 1, 3])
p_image = tf.squeeze(tf.matmul(intrinsics[tf.newaxis, tf.newaxis, :],
tf.expand_dims(p_camera, -1)), -1)
z = p_image[:, :, -1:]
return tf.math.divide_no_nan(p_image[:, :, :2], z), z
def image_to_world_projection(depth, intrinsics, pose_c2w):
"""Project points on the image to the world frame.
Args:
depth: [HEIGHT, WIDTH, 1] the depth map contains the radial distance from
the camera eye to each point corresponding to each pixel.
intrinsics: [3, 3] camera's intrinsic matrix.
pose_c2w: [3, 4] camera pose matrix (camera to world).
Returns:
[HEIGHT, WIDTH, 3] points in the world's coordinate frame.
"""
shape = depth.shape.as_list()
height, width = shape[0], shape[1]
xx, yy = tf.meshgrid(tf.lin_space(0., width-1., width),
tf.lin_space(0., height-1., height))
p_pixel_homogeneous = tf.concat([tf.stack([xx, yy], axis=-1),
tf.ones([height, width, 1])], -1)
p_image = tf.squeeze(tf.matmul(
tf.matrix_inverse(intrinsics[tf.newaxis, tf.newaxis, :]),
tf.expand_dims(p_pixel_homogeneous, -1)), -1)
z = depth*tf.reduce_sum(
tf.math.l2_normalize(p_image, axis=-1)*tf.constant([[[0., 0., 1.]]]),
axis=-1,
keepdims=True)
p_camera = z*p_image
# convert to OpenGL coordinate system.
p_camera = p_camera*tf.constant([1., 1., -1.], shape=[1, 1, 3])
p_camera_homogeneous = tf.concat(
[p_camera, tf.ones(shape=[height, width, 1])], -1)
# Convert camera coordinates to world coordinates.
p_world = tf.squeeze(
tf.matmul(pose_c2w[tf.newaxis, tf.newaxis, :],
tf.expand_dims(p_camera_homogeneous, -1)), -1)
return p_world
def overlap_mask(depth1,
pose1_c2w,
depth2,
pose2_c2w,
intrinsics):
"""Compute the overlap masks of two views using triangulation.
The masks have the same shape of the input images. A pixel value is true if it
can be seen by both cameras.
Args:
depth1: [HEIGHT, WIDTH, 1] the depth map of the first view.
pose1_c2w: [3, 4] camera pose matrix (camera to world) of the first view.
pose1_c2w[:, :3] is the rotation and pose1_c2w[:, -1] is the translation.
depth2: [HEIGHT, WIDTH, 1] the depth map of the second view.
pose2_c2w: [3, 4] camera pose matrix (camera to world) of the second view.
pose1_c2w[:, :3] is the rotation and pose1_c2w[:, -1] is the translation.
intrinsics: [3, 3] camera's intrinsic matrix.
Returns:
[HEIGHT, WIDTH] two overlap masks of the two inputs respectively.
"""
pose1_w2c = tf.matrix_inverse(
tf.concat([pose1_c2w, tf.constant([[0., 0., 0., 1.]])], 0))[:3]
pose2_w2c = tf.matrix_inverse(
tf.concat([pose2_c2w, tf.constant([[0., 0., 0., 1.]])], 0))[:3]
p_world1 = image_to_world_projection(depth1, intrinsics, pose1_c2w)
p_image1_in_2, z1_c2 = world_to_image_projection(
p_world1, intrinsics, pose2_w2c)
p_world2 = image_to_world_projection(depth2, intrinsics, pose2_c2w)
p_image2_in_1, z2_c1 = world_to_image_projection(
p_world2, intrinsics, pose1_w2c)
shape = depth1.shape.as_list()
height, width = shape[0], shape[1]
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
# Error tolerance.
eps = 1e-4
# check the object seen by camera 2 is also projected to camera 1's image
# plane and in front of the camera 1.
mask_h2_in_1 = tf.logical_and(
tf.less_equal(p_image2_in_1[:, :, 1], height+eps),
tf.greater_equal(p_image2_in_1[:, :, 1], 0.-eps))
mask_w2_in_1 = tf.logical_and(
tf.less_equal(p_image2_in_1[:, :, 0], width+eps),
tf.greater_equal(p_image2_in_1[:, :, 0], 0.-eps))
# check the projected points are within the image boundaries and in front of
# the camera.
mask2_in_1 = tf.logical_and(
tf.logical_and(mask_h2_in_1, mask_w2_in_1), tf.squeeze(z2_c1, -1) > 0)
# check the object seen by camera 1 is also projected to camera 2's image
# plane and in front of the camera 2.
mask_h1_in_2 = tf.logical_and(
tf.less_equal(p_image1_in_2[:, :, 1], height+eps),
tf.greater_equal(p_image1_in_2[:, :, 1], 0.-eps))
mask_w1_in_2 = tf.logical_and(
tf.less_equal(p_image1_in_2[:, :, 0], width+eps),
tf.greater_equal(p_image1_in_2[:, :, 0], 0.-eps))
# check the projected points are within the image boundaries and in front of
# the camera.
mask1_in_2 = tf.logical_and(
tf.logical_and(mask_h1_in_2, mask_w1_in_2), tf.squeeze(z1_c2, -1) > 0)
return mask1_in_2, mask2_in_1
def overlap_ratio(mask1, mask2):
"""Check if the overlapping ratio of the input is within given limits.
The overlap ratio is measured by the minimum of the ratio between the area
seen by both cameras and the image size. This function returns a ViewPair
object containing the perspective images, the masks that shows the common area
seen by both cameras, the camera's field of view (FoV), the relative rotation
from camera 2 to camera 1, and the relative translation direction in the frame
of camera 1.
Args:
mask1: [HEIGHT, WIDTH] overlapping mask.
mask2: [HEIGHT, WIDTH] overlapping mask.
Returns:
A tf.float32 tensor.
"""
shape = mask1.shape.as_list()
height, width = shape[0], shape[1]
return tf.min(tf.reduce_sum(tf.cast(mask1, tf.float32))/(height * width),
tf.reduce_sum(tf.cast(mask2, tf.float32))/(height * width))
# This is written for Matterport3D's directory structure.
def generate_from_meta(meta_data_path,
pano_data_dir,
pano_height=1024,
pano_width=2048,
output_height=512,
output_width=512):
"""Generate the stereo image dataset from Matterport3D using the meta data.
Example call:
ds = generate_from_meta(
meta_data_path='matterport3d/saved_meta/R90_fov90/test_meta/',
pano_data_dir='matterport3d/pano/')
Args:
meta_data_path: (string) the path to the meta data files.
pano_data_dir: (string) the path to the panorama images of the Matterport3D.
pano_height: (int) the height dimension of the panorama images.
pano_width: (int) the width dimension of the panorama images.
output_height: (int) the height dimension of the output perspective images.
output_width: (int) the width dimension of the output perspective images.
Returns:
Tensorflow Dataset.
"""
def load_text(file_path, n_lines=200):
"""Load text data from a file."""
return tf.data.Dataset.from_tensor_slices(
tf.data.experimental.get_single_element(
tf.data.TextLineDataset(file_path).batch(n_lines)))
def load_single_image(filename):
"""Load a single image given the filename."""
image = tf.image.decode_jpeg(tf.read_file(filename), 3)
image = tf.image.convert_image_dtype(image, tf.float32)
image.set_shape([pano_height, pano_width, 3])
return image
def string_to_matrix(s, shape):
"""Decode strings to matrices tensor."""
m = tf.reshape(
tf.stack([tf.decode_csv(s, [0.0] * np.prod(shape))], 0), shape)
m.set_shape(shape)
return m
def decode_line(line):
"""Decode text lines."""
DataPair = collections.namedtuple(
'DataPair', ['src_img', 'trt_img', 'fov', 'rotation', 'translation'])
splitted = tf.decode_csv(line, ['']*10, field_delim=' ')
img1 = load_single_image(pano_data_dir+splitted[0]+'/'+splitted[1]+'.jpeg')
img2 = load_single_image(pano_data_dir+splitted[0]+'/'+splitted[2]+'.jpeg')
fov = string_to_matrix(splitted[3], [1])
r1 = string_to_matrix(splitted[4], [3, 3])
t1 = string_to_matrix(splitted[5], [3])
r2 = string_to_matrix(splitted[6], [3, 3])
t2 = string_to_matrix(splitted[7], [3])
sampled_r1 = string_to_matrix(splitted[8], [3, 3])
sampled_r2 = string_to_matrix(splitted[9], [3, 3])
r_c2_to_c1 = tf.matmul(sampled_r1, sampled_r2, transpose_a=True)
t_c1 = tf.squeeze(tf.matmul(sampled_r1,
tf.expand_dims(tf.nn.l2_normalize(t2-t1), -1),
transpose_a=True))
sampled_rotation = tf.matmul(tf.stack([sampled_r1, sampled_r2], 0),
tf.stack([r1, r2], 0), transpose_a=True)
sampled_views = transformation.rectilinear_projection(
tf.stack([img1, img2], 0),
[output_height, output_width],
fov,
tf.matrix_transpose(sampled_rotation))
src_img, trt_img = sampled_views[0], sampled_views[1]
return DataPair(src_img, trt_img, fov, r_c2_to_c1, t_c1)
# meta_data_path has slash '/' at the end.
ds = tf.data.Dataset.list_files(meta_data_path+'*')
ds = ds.flat_map(load_text)
ds = ds.map(decode_line)
return ds
def generate_random_views(pano1_rgb,
pano2_rgb,
r1, t1, r2, t2,
max_rotation=90.,
max_tilt=5.,
output_fov=90.,
output_height=512,
output_width=512,
pano1_depth=None,
pano2_depth=None):
"""Generate stereo image pairs by randomly sampling the panoramic images.
We randomly sample camera lookat directions and project the panorama to
perspective images. We also compute the overlaping area between the pair given
the depth map if depthmaps are provided. The overlap is measured by the
minimum of the ratio between the area seen by both cameras and the image size.
This function returns a ViewPair object containing the perspective images,
the masks that shows the common area seen by both cameras, the camera's field
of view (FoV), the relative rotation from camera 2 to camera 1, and the
relative translation direction in the frame of camera 1.
Args:
pano1_rgb: [HEIGHT, WIDTH, 3] the input RGB panoramic image.
pano2_rgb: [HEIGHT, WIDTH, 3] the input RGB panoramic image.
r1: [3, 3] the camera to world rotation of camera 1.
t1: [3] the world location of camera 1.
r2: [3, 3] the camera to world rotation of camera 2.
t2: [3] the world location of camera 2.
max_rotation: (float) maximum relative rotation between the output image
pair in degrees.
max_tilt: (float) maximum tilt angle of the up vector in degrees.
output_fov: (float) output images' horizontal field of view in degrees.
output_height: (int) the height dimension of the output perspective images.
output_width: (int) the width dimension of the output perspective images.
pano1_depth: [HEIGHT, WIDTH, 1] the panoramic depth map of pano1_rgb.
pano2_depth: [HEIGHT, WIDTH, 1] the panoramic depth map of pano2_rgb.
Returns:
ViewPair
"""
ViewPair = collections.namedtuple(
'ViewPair', ['img1', 'img2', 'mask1', 'mask2', 'fov', 'r', 't'])
swap_yz = tf.constant([[1., 0., 0.], [0., 0., 1.], [0., -1., 0.]],
shape=[1, 3, 3])
lookat_direction1 = math_utils.random_vector_on_sphere(
1, [[-math.sin(math.pi/3), math.sin(math.pi/3)], [0., 2*math.pi]])
lookat_direction1 = tf.squeeze(
tf.matmul(swap_yz, tf.expand_dims(lookat_direction1, -1)), -1)
lookat_direction2 = math_utils.uniform_sampled_vector_within_cone(
lookat_direction1, math_utils.degrees_to_radians(max_rotation))
lookat_directions = tf.concat([lookat_direction1, lookat_direction2], 0)
up1 = math_utils.uniform_sampled_vector_within_cone(
tf.constant([[0., 0., 1.]]), math_utils.degrees_to_radians(max_tilt))
up2 = math_utils.uniform_sampled_vector_within_cone(
tf.constant([[0., 0., 1.]]), math_utils.degrees_to_radians(max_tilt))
lookat_rotations = math_utils.lookat_matrix(
tf.concat([up1, up2], 0), lookat_directions)
sample_rotations = tf.matmul(
tf.concat([r1, r2], 0), lookat_rotations, transpose_a=True)
sampled_views = transformation.rectilinear_projection(
tf.stack([pano1_rgb, pano2_rgb], 0),
[output_height, output_width],
output_fov,
sample_rotations)
r_c2_to_c1 = tf.matmul(
lookat_rotations[0], lookat_rotations[1], transpose_a=True)
t_c1 = tf.squeeze(tf.matmul(lookat_rotations[0],
tf.expand_dims(tf.nn.l2_normalize(t2-t1), -1),
transpose_a=True))
if pano1_depth is not None and pano2_depth is not None:
sampled_depth = transformation.rectilinear_projection(
tf.stack([pano1_depth, pano2_depth], 0),
[output_height, output_width],
output_fov,
sample_rotations)
fx = output_width*0.5/math.tan(math_utils.degrees_to_radians(output_fov)/2)
intrinsics = tf.constant([[fx, 0., output_width*0.5],
[0., -fx, output_height*0.5],
[0., 0., 1.]])
pose1_c2w = tf.concat([lookat_rotations[0], tf.expand_dims(t1, -1)], 1)
pose2_c2w = tf.concat([lookat_rotations[1], tf.expand_dims(t2, -1)], 1)
mask1, mask2 = overlap_mask(sampled_depth[0],
pose1_c2w,
sampled_depth[1],
pose2_c2w,
intrinsics)
else:
mask1 = None
mask2 = None
return ViewPair(sampled_views[0],
sampled_views[1],
mask1,
mask2,
output_fov,
r_c2_to_c1,
t_c1)
| google-research/google-research | direction_net/dataset.py | Python | apache-2.0 | 15,616 | 0.005635 |
# Copyright (c) 2014 Rackspace US, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common classes for local filesystem certificate handling
"""
import os
from oslo_config import cfg
from octavia.certificates.common import cert
TLS_CERT_DEFAULT = os.environ.get(
'OS_OCTAVIA_TLS_CA_CERT', '/etc/ssl/certs/ssl-cert-snakeoil.pem'
)
TLS_KEY_DEFAULT = os.environ.get(
'OS_OCTAVIA_TLS_CA_KEY', '/etc/ssl/private/ssl-cert-snakeoil.key'
)
TLS_PKP_DEFAULT = os.environ.get('OS_OCTAVIA_CA_KEY_PASS')
TLS_PASS_AMPS_DEFAULT = os.environ.get('TLS_PASS_AMPS_DEFAULT',
'insecure-key-do-not-use-this-key')
TLS_DIGEST_DEFAULT = os.environ.get('OS_OCTAVIA_CA_SIGNING_DIGEST', 'sha256')
TLS_STORAGE_DEFAULT = os.environ.get(
'OS_OCTAVIA_TLS_STORAGE', '/var/lib/octavia/certificates/'
)
certgen_opts = [
cfg.StrOpt('ca_certificate',
default=TLS_CERT_DEFAULT,
help='Absolute path to the CA Certificate for signing. Defaults'
' to env[OS_OCTAVIA_TLS_CA_CERT].'),
cfg.StrOpt('ca_private_key',
default=TLS_KEY_DEFAULT,
help='Absolute path to the Private Key for signing. Defaults'
' to env[OS_OCTAVIA_TLS_CA_KEY].'),
cfg.StrOpt('ca_private_key_passphrase',
default=TLS_PKP_DEFAULT,
help='Passphrase for the Private Key. Defaults'
' to env[OS_OCTAVIA_CA_KEY_PASS] or None.'),
cfg.StrOpt('server_certs_key_passphrase',
default=TLS_PASS_AMPS_DEFAULT,
help='Passphrase for encrypting Amphora Certificates and '
'Private Keys. Must be 32, base64(url) compatible, '
'characters long. Defaults to env[TLS_PASS_AMPS_DEFAULT] '
'or insecure-key-do-not-use-this-key',
regex=r'^[A-Za-z0-9\-_=]{32}$',
required=True),
cfg.StrOpt('signing_digest',
default=TLS_DIGEST_DEFAULT,
help='Certificate signing digest. Defaults'
' to env[OS_OCTAVIA_CA_SIGNING_DIGEST] or "sha256".'),
cfg.IntOpt('cert_validity_time',
default=30 * 24 * 60 * 60,
help="The validity time for the Amphora Certificates "
"(in seconds)."),
]
certmgr_opts = [
cfg.StrOpt('storage_path',
default=TLS_STORAGE_DEFAULT,
help='Absolute path to the certificate storage directory. '
'Defaults to env[OS_OCTAVIA_TLS_STORAGE].')
]
class LocalCert(cert.Cert):
"""Representation of a Cert for local storage."""
def __init__(self, certificate, private_key, intermediates=None,
private_key_passphrase=None):
self.certificate = certificate
self.intermediates = intermediates
self.private_key = private_key
self.private_key_passphrase = private_key_passphrase
def get_certificate(self):
return self.certificate
def get_intermediates(self):
return self.intermediates
def get_private_key(self):
return self.private_key
def get_private_key_passphrase(self):
return self.private_key_passphrase
| openstack/octavia | octavia/certificates/common/local.py | Python | apache-2.0 | 3,779 | 0 |
'''
Basic processing procedures for analog signals (e.g., performing a z-score of a signal, or filtering a signal).
:copyright: Copyright 2014-2015 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
'''
from __future__ import division, print_function
import numpy as np
import scipy.signal
import quantities as pq
import neo
def zscore(signal, inplace=True):
'''
Apply a z-score operation to one or several AnalogSignalArray objects.
The z-score operation subtracts the mean :math:`\\mu` of the signal, and
divides by its standard deviation :math:`\\sigma`:
.. math::
Z(x(t))= \\frac{x(t)-\\mu}{\\sigma}
If an AnalogSignalArray containing multiple signals is provided, the
z-transform is always calculated for each signal individually.
If a list of AnalogSignalArray objects is supplied, the mean and standard
deviation are calculated across all objects of the list. Thus, all list
elements are z-transformed by the same values of :math:`\\mu` and
:math:`\\sigma`. For AnalogSignalArrays, each signal of the array is
treated separately across list elements. Therefore, the number of signals
must be identical for each AnalogSignalArray of the list.
Parameters
----------
signal : neo.AnalogSignalArray or list of neo.AnalogSignalArray
Signals for which to calculate the z-score.
inplace : bool
If True, the contents of the input signal(s) is replaced by the
z-transformed signal. Otherwise, a copy of the original
AnalogSignalArray(s) is returned. Default: True
Returns
-------
neo.AnalogSignalArray or list of neo.AnalogSignalArray
The output format matches the input format: for each supplied
AnalogSignalArray object a corresponding object is returned containing
the z-transformed signal with the unit dimensionless.
Use Case
--------
You may supply a list of AnalogSignalArray objects, where each object in
the list contains the data of one trial of the experiment, and each signal
of the AnalogSignalArray corresponds to the recordings from one specific
electrode in a particular trial. In this scenario, you will z-transform the
signal of each electrode separately, but transform all trials of a given
electrode in the same way.
Examples
--------
>>> a = neo.AnalogSignalArray(
... np.array([1, 2, 3, 4, 5, 6]).reshape(-1,1)*mV,
... t_start=0*s, sampling_rate=1000*Hz)
>>> b = neo.AnalogSignalArray(
... np.transpose([[1, 2, 3, 4, 5, 6], [11, 12, 13, 14, 15, 16]])*mV,
... t_start=0*s, sampling_rate=1000*Hz)
>>> c = neo.AnalogSignalArray(
... np.transpose([[21, 22, 23, 24, 25, 26], [31, 32, 33, 34, 35, 36]])*mV,
... t_start=0*s, sampling_rate=1000*Hz)
>>> print zscore(a)
[[-1.46385011]
[-0.87831007]
[-0.29277002]
[ 0.29277002]
[ 0.87831007]
[ 1.46385011]] dimensionless
>>> print zscore(b)
[[-1.46385011 -1.46385011]
[-0.87831007 -0.87831007]
[-0.29277002 -0.29277002]
[ 0.29277002 0.29277002]
[ 0.87831007 0.87831007]
[ 1.46385011 1.46385011]] dimensionless
>>> print zscore([b,c]) # doctest: +NORMALIZE_WHITESPACE
[<AnalogSignalArray(array([[-1.11669108, -1.08361877],
[-1.0672076 , -1.04878252],
[-1.01772411, -1.01394628],
[-0.96824063, -0.97911003],
[-0.91875714, -0.94427378],
[-0.86927366, -0.90943753]]) * dimensionless, [0.0 s, 0.006 s],
sampling rate: 1000.0 Hz)>,
<AnalogSignalArray(array([[ 0.78170952, 0.84779261],
[ 0.86621866, 0.90728682],
[ 0.9507278 , 0.96678104],
[ 1.03523694, 1.02627526],
[ 1.11974608, 1.08576948],
[ 1.20425521, 1.1452637 ]]) * dimensionless, [0.0 s, 0.006 s],
sampling rate: 1000.0 Hz)>]
'''
# Transform input to a list
if type(signal) is not list:
signal = [signal]
# Calculate mean and standard deviation
m = np.mean(np.concatenate(signal), axis=0, keepdims=True)
s = np.std(np.concatenate(signal), axis=0, keepdims=True)
if not inplace:
# Create new signal instance
result = [sig.duplicate_with_new_array(
(sig.magnitude - m.magnitude) / s.magnitude) for sig in signal]
for sig in result:
sig /= sig.units
else:
# Overwrite signal
for sig in signal:
sig[:] = pq.Quantity(
(sig.magnitude - m.magnitude) / s.magnitude,
units=sig.units)
sig /= sig.units
result = signal
# Return single object, or list of objects
if len(result) == 1:
return result[0]
else:
return result
def butter(signal, highpass_freq=None, lowpass_freq=None, order=4,
filter_function='filtfilt', fs=1.0, axis=-1):
"""
Butterworth filtering function for neo.AnalogSignalArray. Filter type is
determined according to how values of `highpass_freq` and `lowpass_freq`
are given (see Parameters section for details).
Parameters
----------
signal : AnalogSignalArray or Quantity array or NumPy ndarray
Time series data to be filtered. When given as Quantity array or NumPy
ndarray, the sampling frequency should be given through the keyword
argument `fs`.
highpass_freq, lowpass_freq : Quantity or float
High-pass and low-pass cut-off frequencies, respectively. When given as
float, the given value is taken as frequency in Hz.
Filter type is determined depending on values of these arguments:
* highpass_freq only (lowpass_freq = None): highpass filter
* lowpass_freq only (highpass_freq = None): lowpass filter
* highpass_freq < lowpass_freq: bandpass filter
* highpass_freq > lowpass_freq: bandstop filter
order : int
Order of Butterworth filter. Default is 4.
filter_function : string
Filtering function to be used. Either 'filtfilt'
(`scipy.signal.filtfilt()`) or 'lfilter' (`scipy.signal.lfilter()`). In
most applications 'filtfilt' should be used, because it doesn't bring
about phase shift due to filtering. Default is 'filtfilt'.
fs : Quantity or float
The sampling frequency of the input time series. When given as float,
its value is taken as frequency in Hz. When the input is given as neo
AnalogSignalArray, its attribute is used to specify the sampling
frequency and this parameter is ignored. Default is 1.0.
axis : int
Axis along which filter is applied. Default is -1.
Returns
-------
filtered_signal : AnalogSignalArray or Quantity array or NumPy ndarray
Filtered input data. The shape and type is identical to those of the
input.
"""
def _design_butterworth_filter(Fs, hpfreq=None, lpfreq=None, order=4):
# set parameters for filter design
Fn = Fs / 2.
# - filter type is determined according to the values of cut-off
# frequencies
if lpfreq and hpfreq:
if hpfreq < lpfreq:
Wn = (hpfreq / Fn, lpfreq / Fn)
btype = 'bandpass'
else:
Wn = (lpfreq / Fn, hpfreq / Fn)
btype = 'bandstop'
elif lpfreq:
Wn = lpfreq / Fn
btype = 'lowpass'
elif hpfreq:
Wn = hpfreq / Fn
btype = 'highpass'
else:
raise ValueError(
"Either highpass_freq or lowpass_freq must be given"
)
# return filter coefficients
return scipy.signal.butter(order, Wn, btype=btype)
# design filter
Fs = signal.sampling_rate.rescale(pq.Hz).magnitude \
if hasattr(signal, 'sampling_rate') else fs
Fh = highpass_freq.rescale(pq.Hz).magnitude \
if isinstance(highpass_freq, pq.quantity.Quantity) else highpass_freq
Fl = lowpass_freq.rescale(pq.Hz).magnitude \
if isinstance(lowpass_freq, pq.quantity.Quantity) else lowpass_freq
b, a = _design_butterworth_filter(Fs, Fh, Fl, order)
# When the input is AnalogSignalArray, the axis for time index (i.e. the
# first axis) needs to be rolled to the last
data = np.asarray(signal)
if isinstance(signal, neo.AnalogSignalArray):
data = np.rollaxis(data, 0, len(data.shape))
# apply filter
if filter_function is 'lfilter':
filtered_data = scipy.signal.lfilter(b, a, data, axis=axis)
elif filter_function is 'filtfilt':
filtered_data = scipy.signal.filtfilt(b, a, data, axis=axis)
else:
raise ValueError(
"filter_func must to be either 'filtfilt' or 'lfilter'"
)
if isinstance(signal, neo.AnalogSignalArray):
return signal.duplicate_with_new_array(filtered_data.T)
elif isinstance(signal, pq.quantity.Quantity):
return filtered_data * signal.units
else:
return filtered_data
| sonjagruen/elephant | elephant/signal_processing.py | Python | bsd-3-clause | 9,104 | 0.00022 |
import requests
from PIL import Image, ImageEnhance, ImageChops, ImageFilter
from io import BytesIO, StringIO
import time
import sys, os
import codecs
url = 'http://d1222391-23d7-46de-abef-73cbb63c1862.levels.pathwar.net'
imgurl = url + '/captcha.php'
headers = { 'Host' : 'd1222391-23d7-46de-abef-73cbb63c1862.levels.pathwar.net',
'User-Agent' : 'Mozilla/5.0 (X11; Linux x86_64; rv:37.0) Gecko/20100101 Firefox/37.0',
'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language' : 'en-US,en;q=0.5',
'Accept-Encoding' : 'gzip, deflate',
'DNT' : '1',
'Referer' : 'http://http://d1222391-23d7-46de-abef-73cbb63c1862.levels.pathwar.net/',
'Cookie' : 'PHPSESSID=',#erased
'Authorization' : 'Basic ',#erased
# 'Connection' : 'keep-alive',
'Content-Type' : 'application/x-www-form-urlencoded' }
def recognize(img, bounds):
# read dataset of images for each letter
imgs = {}
datfile = open("ads.dat", "rt")
line = datfile.readline()
while line!="":
key = line[0]
if key not in imgs:
imgs[key] = []
imgs[key].append(Image.open(StringIO.StringIO(line[2:-1].decode("hex"))))
line = datfile.readline()
datfile.close()
# calculate difference with dataset for each boundbox
word = ""
for bound in bounds:
guess = []
total = (img.crop(bound).size)[0]*(img.crop(bound).size)[1]*1.0
for key in imgs:
for pattern in imgs[key]:
diff = ImageChops.difference(img.crop(bound), pattern.resize(img.crop(bound).size, Image.NEAREST))
pixels = list(diff.getdata())
samePixCnt = sum(i==0 for i in pixels)
guess.append([samePixCnt, key])
guess.sort(reverse=True)
word = word+guess[0][1]
print(total, guess[0:3], guess[0][0]/total, guess[1][0]/total, guess[2][0]/total)
print(word)
return word.replace("_", "")
def separate(img):
# count number of pixels for each column
colPixCnts = []
for col in range(img.size[0]):
pixels = list(img.crop([col, 0, col+1, img.size[1]]).getdata())
colPixCnts.append(sum(i==0 for i in pixels))
print (colPixCnts)
print("\n")
# average out pixel counts for trough column
for i in range(3, len(colPixCnts)-3, 2):
if colPixCnts[i-3]>4 and colPixCnts[i+3]>4:
colPixCnts[i-2:i+3] = [j+10 for j in colPixCnts[i-2:i+3]]
print(colPixCnts)
print("\n")
# calculate all bounding boxes of all letters
bounds = []
left = 0
right = 0
for col in range(img.size[0]): # slice all letters per column
if left==0 and colPixCnts[col]>20: # if (begin not set) and (col has letter)
left = col # then letter begin
if left!=0 and colPixCnts[col]<=20: # if (begin is set) and (col no letter)
right = col # then letter end
if right-left>8: # if (the letter is wide enough)
##############################################
print((right-left))
top = -1
bottom = -1
prev = -1
curr = -1
for row in range(img.size[1]): # slice single letter per row
pixels = list(img.crop([left, row, right, row+1]).getdata())
rowPixCnt = sum(i==255 for i in pixels)
if rowPixCnt==(right-left): # if (row no letter)
curr = row
if (curr-prev)>(bottom-top): # if (the letter is tall enough)
top = prev
bottom = curr
prev = curr
if (img.size[1]-prev)>(bottom-top): # if (the letter align to bottom)
top = prev
bottom = img.size[1]
##############################################
bounds.append([left, top+1, right, bottom]) # top row should has letter
left = 0
right = 0
print(bounds)
return bounds
def prepare(im):
im2 = Image.new("P",im.size,255)
for x in range(im.size[1]):
for y in range(im.size[0]):
pix = im.getpixel((y,x))
if pix == 1: # these are the numbers to get
im2.putpixel((y,x),0)
# im2 = im2.convert("RGB")
im2 = im2.resize((im2.size[0]*8, im2.size[1]*8), Image.BILINEAR)
# im2 = im2.resize((int(im2.size[0] / 2), int(im2.size[1] / 2)), Image.ANTIALIAS)
# im2 = ImageEnhance.Contrast(im2).enhance(1.4)
# im2 = ImageEnhance.Sharpness(im2).enhance(5)
# im2 = ImageChops.invert(im2)
# im2 = im2.filter(ImageFilter.MedianFilter(3))
# im2 = im2.convert('P')
return im2
def _train(img, bounds):
datfile = open("ads.dat", "rt")
lines = datfile.readlines()
datfile.close()
datfile = open("ads.dat", "at")
for bound in bounds:
img.crop(bound).show()
letter = input("Type in the letters you see in the image above (ENTER to skip): ")
bmpfile = BytesIO()
img.crop(bound).save(bmpfile, format='BMP')
# g = codecs.encode(bmpfile.getvalue(), 'hex_codec')
s = codecs.encode(bmpfile.getvalue(), 'hex')
s = codecs.decode(s)
line = letter+"|"+s+"\n"
if (letter!="") and (line not in lines): # if (not skipped) and (not duplicated)
datfile.write(line)
print(line)
bmpfile.close()
datfile.close()
def vertical_cut(im):
im = im.convert("P")
im2 = Image.new("P",im.size,255)
im = im.convert("P")
temp = {}
for x in range(im.size[1]):
for y in range(im.size[0]):
pix = im.getpixel((y,x))
temp[pix] = pix
if pix == 1: # these are the numbers to get
im2.putpixel((y,x),0)
# new code starts here
inletter = False
foundletter=False
start = 0
end = 0
letters = []
for y in range(im2.size[0]): # slice across
for x in range(im2.size[1]): # slice down
pix = im2.getpixel((y,x))
if pix != 255:
inletter = True
if foundletter == False and inletter == True:
foundletter = True
start = y
if foundletter == True and inletter == False:
foundletter = False
end = y
letters.append((start,end))
inletter=False
bounds = []
for letter in letters:
bounds.append([ letter[0] , 0, letter[1], im2.size[1] ])
print(bounds)
return bounds
if __name__=="__main__":
# if len(sys.argv) < 2:
# print(("usage: %s image" % (sys.argv[0])))
# sys.exit(2)
# file_name = sys.argv[1]
# img = Image.open(file_name).convert('P')
i = 0
while i < 3 :
response = requests.get(imgurl, headers = headers)
the_page = response.content
file = BytesIO(the_page)
img = Image.open(file)
# img = prepare(img)
img = img.resize((img.size[0]*4, img.size[1]*4), Image.BILINEAR)
img.show()
# bounds = separate(img)
bounds = vertical_cut(img)
_train(img, bounds)
i = i + 1
| KKfo/captcha_solver | experiment.py | Python | gpl-3.0 | 7,350 | 0.011837 |
#!/usr/bin/env python3
# Copyright (c) 2008-9 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 2 of the License, or
# version 3 of the License, or (at your option) any later version. It is
# provided for educational purposes and is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.
"""Provides the Item example classes.
"""
class Item(object):
def __init__(self, artist, title, year=None):
self.__artist = artist
self.__title = title
self.__year = year
def artist(self):
return self.__artist
def setArtist(self, artist):
self.__artist = artist
def title(self):
return self.__title
def setTitle(self, title):
self.__title = title
def year(self):
return self.__year
def setYear(self, year):
self.__year = year
def __str__(self):
year = ""
if self.__year is not None:
year = " in {0}".format(self.__year)
return "{0} by {1}{2}".format(self.__title, self.__artist, year)
class Painting(Item):
def __init__(self, artist, title, year=None):
super(Painting, self).__init__(artist, title, year)
class Sculpture(Item):
def __init__(self, artist, title, year=None, material=None):
super(Sculpture, self).__init__(artist, title, year)
self.__material = material
def material(self):
return self.__material
def setMaterial(self, material):
self.__material = material
def __str__(self):
materialString = ""
if self.__material is not None:
materialString = " ({0})".format(self.__material)
return "{0}{1}".format(super(Sculpture, self).__str__(),
materialString)
class Dimension(object):
def __init__(self, width, height, depth=None):
self.__width = width
self.__height = height
self.__depth = depth
def width(self):
return self.__width
def setWidth(self, width):
self.__width = width
def height(self):
return self.__height
def setHeight(self, height):
self.__height = height
def depth(self):
return self.__depth
def setDepth(self, depth):
self.__depth = depth
def area(self):
raise NotImplemented
def volume(self):
raise NotImplemented
if __name__ == "__main__":
items = []
items.append(Painting("Cecil Collins", "The Poet", 1941))
items.append(Painting("Cecil Collins", "The Sleeping Fool", 1943))
items.append(Painting("Edvard Munch", "The Scream", 1893))
items.append(Painting("Edvard Munch", "The Sick Child", 1896))
items.append(Painting("Edvard Munch", "The Dance of Life", 1900))
items.append(Sculpture("Auguste Rodin", "Eternal Springtime", 1917,
"plaster"))
items.append(Sculpture("Auguste Rodin", "Naked Balzac", 1917,
"plaster"))
items.append(Sculpture("Auguste Rodin", "The Secret", 1925,
"bronze"))
uniquematerials = set()
for item in items:
print(item)
if hasattr(item, "material"):
uniquematerials.add(item.material())
print("Sculptures use {0} unique materials".format(
len(uniquematerials)))
| paradiseOffice/Bash_and_Cplus-plus | CPP/full_examples/pyqt/chap03/item.py | Python | gpl-2.0 | 3,660 | 0.005738 |
# -*-coding:Utf-8 -*
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le masque <coords2d>."""
import re
from primaires.interpreteur.masque.masque import Masque
from primaires.interpreteur.masque.fonctions import *
from primaires.interpreteur.masque.exceptions.erreur_validation \
import ErreurValidation
# Constantes
RE_COORDS = re.compile(r"^-?[0-9]+\.-?[0-9]+$")
class Coordonnees2D(Masque):
"""Masque <coordonnees2d>.
On attend des coordonnées en 2D en paramètre
sous la forme x.y
"""
nom = "coords2d"
nom_complet = "coordonnées 2D"
def init(self):
"""Initialisation des attributs"""
self.coords = (None, None)
def repartir(self, personnage, masques, commande):
"""Répartition du masque."""
str_coordonnees = liste_vers_chaine(commande).lstrip()
str_coordonnees = str_coordonnees.split(" ")[0]
if not str_coordonnees:
raise ErreurValidation(
"Précisez des coordonnées.", False)
if not RE_COORDS.search(str_coordonnees):
raise ErreurValidation(
"Ceci ne sont pas des coordonnées valides.", False)
self.a_interpreter = str_coordonnees
commande[:] = commande[len(str_coordonnees):]
masques.append(self)
return True
def valider(self, personnage, dic_masques):
"""Validation du masque"""
Masque.valider(self, personnage, dic_masques)
coordonnees = self.a_interpreter
coordonnees = tuple(int(e) for e in coordonnees.split("."))
self.coords = coordonnees
return True
| stormi/tsunami | src/primaires/salle/masques/coordonnees2d/__init__.py | Python | bsd-3-clause | 3,189 | 0.0044 |
import json
from mflow_nodes.processors.base import BaseProcessor
from mflow_nodes.stream_node import get_processor_function, get_receiver_function
from mflow_nodes.node_manager import NodeManager
def setup_file_writing_receiver(connect_address, output_filename):
"""
Setup a node that writis the message headers into an output file for later inspection.
:param connect_address: Address the node connects to.
:param output_filename: Output file.
:return: Instance of ExternalProcessWrapper.
"""
# Format the output file.
with open(output_filename, 'w') as output_file:
output_file.write("[]")
def process_message(message):
with open(output_filename, 'r') as input_file:
test_data = json.load(input_file)
test_data.append(message.get_header())
with open(output_filename, 'w') as output:
output.write(json.dumps(test_data, indent=4))
processor = BaseProcessor()
processor.process_message = process_message
receiver = NodeManager(processor_function=get_processor_function(processor=processor,
connection_address=connect_address),
receiver_function=get_receiver_function(connection_address=connect_address),
processor_instance=processor)
return receiver
| datastreaming/mflow_nodes | tests/helpers.py | Python | gpl-3.0 | 1,389 | 0.0036 |
'''
Implements the RTS ALUA Target Port Group class.
This file is part of RTSLib.
Copyright (c) 2016 by Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
'''
from .node import CFSNode
from .utils import RTSLibError, RTSLibALUANotSupported, fread, fwrite
import six
alua_rw_params = ['alua_access_state', 'alua_access_status',
'alua_write_metadata', 'alua_access_type', 'preferred',
'nonop_delay_msecs', 'trans_delay_msecs',
'implicit_trans_secs', 'alua_support_offline',
'alua_support_standby', 'alua_support_transitioning',
'alua_support_active_nonoptimized',
'alua_support_unavailable', 'alua_support_active_optimized']
alua_ro_params = ['tg_pt_gp_id', 'members', 'alua_support_lba_dependent']
alua_types = ['None', 'Implicit', 'Explicit', 'Implicit and Explicit']
alua_statuses = ['None', 'Altered by Explicit STPG', 'Altered by Implicit ALUA']
class ALUATargetPortGroup(CFSNode):
"""
ALUA Target Port Group interface
"""
def __repr__(self):
return "<ALUA TPG %s>" % self.name
def __init__(self, storage_object, name, tag=None):
"""
@param storage_object: backstore storage object to create ALUA group for
@param name: name of ALUA group
@param tag: target port group id. If not passed in, try to look
up existing ALUA TPG with the same name
"""
if storage_object.alua_supported is False:
raise RTSLibALUANotSupported("Backend does not support ALUA setup")
# default_tg_pt_gp takes tag 1
if tag is not None and (tag > 65535 or tag < 1):
raise RTSLibError("The TPG Tag must be between 1 and 65535")
super(ALUATargetPortGroup, self).__init__()
self.name = name
self.storage_object = storage_object
self._path = "%s/alua/%s" % (storage_object.path, name)
if tag is not None:
try:
self._create_in_cfs_ine('create')
except OSError as msg:
raise RTSLibError(msg)
try:
fwrite("%s/tg_pt_gp_id" % self._path, tag)
except IOError as msg:
self.delete()
raise RTSLibError("Cannot set id to %d: %s" % (tag, str(msg)))
else:
try:
self._create_in_cfs_ine('lookup')
except OSError as msg:
raise RTSLibError(msg)
# Public
def delete(self):
"""
Delete ALUA TPG and unmap from LUNs
"""
self._check_self()
# default_tg_pt_gp created by the kernel and cannot be deleted
if self.name == "default_tg_pt_gp":
raise RTSLibError("Can not delete default_tg_pt_gp")
# This will reset the ALUA tpg to default_tg_pt_gp
super(ALUATargetPortGroup, self).delete()
def _get_alua_access_state(self):
self._check_self()
path = "%s/alua_access_state" % self.path
return int(fread(path))
def _set_alua_access_state(self, newstate):
self._check_self()
path = "%s/alua_access_state" % self.path
try:
fwrite(path, str(int(newstate)))
except IOError as e:
raise RTSLibError("Cannot change ALUA state: %s" % e)
def _get_alua_access_status(self):
self._check_self()
path = "%s/alua_access_status" % self.path
status = fread(path)
return alua_statuses.index(status)
def _set_alua_access_status(self, newstatus):
self._check_self()
path = "%s/alua_access_status" % self.path
try:
fwrite(path, str(int(newstatus)))
except IOError as e:
raise RTSLibError("Cannot change ALUA status: %s" % e)
def _get_alua_access_type(self):
self._check_self()
path = "%s/alua_access_type" % self.path
alua_type = fread(path)
return alua_types.index(alua_type)
def _set_alua_access_type(self, access_type):
self._check_self()
path = "%s/alua_access_type" % self.path
try:
fwrite(path, str(int(access_type)))
except IOError as e:
raise RTSLibError("Cannot change ALUA access type: %s" % e)
def _get_preferred(self):
self._check_self()
path = "%s/preferred" % self.path
return int(fread(path))
def _set_preferred(self, pref):
self._check_self()
path = "%s/preferred" % self.path
try:
fwrite(path, str(int(pref)))
except IOError as e:
raise RTSLibError("Cannot set preferred: %s" % e)
def _get_alua_write_metadata(self):
self._check_self()
path = "%s/alua_write_metadata" % self.path
return int(fread(path))
def _set_alua_write_metadata(self, pref):
self._check_self()
path = "%s/alua_write_metadata" % self.path
try:
fwrite(path, str(int(pref)))
except IOError as e:
raise RTSLibError("Cannot set alua_write_metadata: %s" % e)
def _get_alua_support_active_nonoptimized(self):
self._check_self()
path = "%s/alua_support_active_nonoptimized" % self.path
return int(fread(path))
def _set_alua_support_active_nonoptimized(self, enabled):
self._check_self()
path = "%s/alua_support_active_nonoptimized" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_active_nonoptimized: %s" % e)
def _get_alua_support_active_optimized(self):
self._check_self()
path = "%s/alua_support_active_optimized" % self.path
return int(fread(path))
def _set_alua_support_active_optimized(self, enabled):
self._check_self()
path = "%s/alua_support_active_optimized" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_active_optimized: %s" % e)
def _get_alua_support_offline(self):
self._check_self()
path = "%s/alua_support_offline" % self.path
return int(fread(path))
def _set_alua_support_offline(self, enabled):
self._check_self()
path = "%s/alua_support_offline" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_offline: %s" % e)
def _get_alua_support_unavailable(self):
self._check_self()
path = "%s/alua_support_unavailable" % self.path
return int(fread(path))
def _set_alua_support_unavailable(self, enabled):
self._check_self()
path = "%s/alua_support_unavailable" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_unavailable: %s" % e)
def _get_alua_support_standby(self):
self._check_self()
path = "%s/alua_support_standby" % self.path
return int(fread(path))
def _set_alua_support_standby(self, enabled):
self._check_self()
path = "%s/alua_support_standby" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_standby: %s" % e)
def _get_alua_support_transitioning(self):
self._check_self()
path = "%s/alua_support_transitioning" % self.path
return int(fread(path))
def _set_alua_support_transitioning(self, enabled):
self._check_self()
path = "%s/alua_support_transitioning" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_transitioning: %s" % e)
def _get_alua_support_lba_dependent(self):
self._check_self()
path = "%s/alua_support_lba_dependent" % self.path
return int(fread(path))
def _get_members(self):
self._check_self()
path = "%s/members" % self.path
member_list = []
for member in fread(path).splitlines():
lun_path = member.split("/")
if len(lun_path) != 4:
continue
member_list.append({ 'driver': lun_path[0], 'target': lun_path[1],
'tpgt': int(lun_path[2].split("_", 1)[1]),
'lun': int(lun_path[3].split("_", 1)[1]) })
return member_list
def _get_tg_pt_gp_id(self):
self._check_self()
path = "%s/tg_pt_gp_id" % self.path
return int(fread(path))
def _get_trans_delay_msecs(self):
self._check_self()
path = "%s/trans_delay_msecs" % self.path
return int(fread(path))
def _set_trans_delay_msecs(self, secs):
self._check_self()
path = "%s/trans_delay_msecs" % self.path
try:
fwrite(path, str(int(secs)))
except IOError as e:
raise RTSLibError("Cannot set trans_delay_msecs: %s" % e)
def _get_implicit_trans_secs(self):
self._check_self()
path = "%s/implicit_trans_secs" % self.path
return int(fread(path))
def _set_implicit_trans_secs(self, secs):
self._check_self()
path = "%s/implicit_trans_secs" % self.path
try:
fwrite(path, str(int(secs)))
except IOError as e:
raise RTSLibError("Cannot set implicit_trans_secs: %s" % e)
def _get_nonop_delay_msecs(self):
self._check_self()
path = "%s/nonop_delay_msecs" % self.path
return int(fread(path))
def _set_nonop_delay_msecs(self, delay):
self._check_self()
path = "%s/nonop_delay_msecs" % self.path
try:
fwrite(path, str(int(delay)))
except IOError as e:
raise RTSLibError("Cannot set nonop_delay_msecs: %s" % e)
def dump(self):
d = super(ALUATargetPortGroup, self).dump()
d['name'] = self.name
d['tg_pt_gp_id'] = self.tg_pt_gp_id
for param in alua_rw_params:
d[param] = getattr(self, param, None)
return d
alua_access_state = property(_get_alua_access_state, _set_alua_access_state,
doc="Get or set ALUA state. "
"0 = Active/optimized, "
"1 = Active/non-optimized, "
"2 = Standby, "
"3 = Unavailable, "
"4 = LBA Dependent, "
"14 = Offline, "
"15 = Transitioning")
alua_access_type = property(_get_alua_access_type, _set_alua_access_type,
doc="Get or set ALUA access type. "
"1 = Implicit, 2 = Explicit, 3 = Both")
alua_access_status = property(_get_alua_access_status,
_set_alua_access_status,
doc="Get or set ALUA access status. "
"0 = None, "
"1 = Altered by Explicit STPG, "
"2 = Altered by Implicit ALUA")
preferred = property(_get_preferred, _set_preferred,
doc="Get or set preferred bit. 1 = Pref, 0 Not-Pre")
alua_write_metadata = property(_get_alua_write_metadata,
_set_alua_write_metadata,
doc="Get or set alua_write_metadata flag. "
"enable (1) or disable (0)")
tg_pt_gp_id = property(_get_tg_pt_gp_id, doc="Get ALUA Target Port Group ID")
members = property(_get_members, doc="Get LUNs in Target Port Group")
alua_support_active_nonoptimized = property(_get_alua_support_active_nonoptimized,
_set_alua_support_active_nonoptimized,
doc="Enable (1) or disable (0) "
"Active/non-optimized support")
alua_support_active_optimized = property(_get_alua_support_active_optimized,
_set_alua_support_active_optimized,
doc="Enable (1) or disable (0) "
"Active/optimized support")
alua_support_offline = property(_get_alua_support_offline,
_set_alua_support_offline,
doc="Enable (1) or disable (0) "
"offline support")
alua_support_unavailable = property(_get_alua_support_unavailable,
_set_alua_support_unavailable,
doc="enable (1) or disable (0) "
"unavailable support")
alua_support_standby = property(_get_alua_support_standby,
_set_alua_support_standby,
doc="enable (1) or disable (0) "
"standby support")
alua_support_lba_dependent = property(_get_alua_support_lba_dependent,
doc="show lba_dependent support "
"enabled (1) or disabled (0)")
alua_support_transitioning = property(_get_alua_support_transitioning,
_set_alua_support_transitioning,
doc="enable (1) or disable (0) "
"transitioning support")
trans_delay_msecs = property(_get_trans_delay_msecs,
_set_trans_delay_msecs,
doc="msecs to delay state transition")
implicit_trans_secs = property(_get_implicit_trans_secs,
_set_implicit_trans_secs,
doc="implicit transition time limit")
nonop_delay_msecs = property(_get_nonop_delay_msecs, _set_nonop_delay_msecs,
doc="msecs to delay IO when non-optimized")
@classmethod
def setup(cls, storage_obj, alua_tpg, err_func):
name = alua_tpg['name']
if name == 'default_tg_pt_gp':
return
alua_tpg_obj = cls(storage_obj, name, alua_tpg['tg_pt_gp_id'])
for param, value in six.iteritems(alua_tpg):
if param != 'name' and param != 'tg_pt_gp_id':
try:
setattr(alua_tpg_obj, param, value)
except:
raise RTSLibError("Could not set attribute '%s' for alua tpg '%s'"
% (param, alua_tpg['name']))
| cvubrugier/rtslib-fb | rtslib/alua.py | Python | apache-2.0 | 15,637 | 0.001151 |
from django.core.management.base import BaseCommand, CommandError
from django.core.cache import cache
class Command(BaseCommand):
help = 'Clears the cache'
def handle(self, *args, **options):
print("Clearing cache!")
cache.clear()
| classam/threepanel | threepanel/dashboard/management/commands/clear_cache.py | Python | agpl-3.0 | 258 | 0.007752 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PRIMO2 -- Probabilistic Inference Modules.
# Copyright (C) 2013-2017 Social Cognitive Systems Group,
# Faculty of Technology, Bielefeld University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the Lesser GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import unittest
import numpy as np
from primo2.networks import BayesianNetwork
from primo2.nodes import DiscreteNode
from primo2.io import XMLBIFParser
from primo2.inference.order import Orderer
from primo2.inference.exact import VariableElimination
from primo2.inference.exact import FactorTree
class EliminationOderTest(unittest.TestCase):
def test_min_degree_elimination_order(self):
bn = XMLBIFParser.parse("primo2/tests/slippery.xbif")
order = Orderer.get_min_degree_order(bn)
#Test for all possible/equivalent orders since the actual order might is not
#determined based on the random nature hash in Python3
potentialOrders = [["slippery_road", "wet_grass", "sprinkler", "winter", "rain"],
["slippery_road", "wet_grass", "sprinkler", "rain", "winter"],
["slippery_road", "wet_grass", "rain", "sprinkler", "winter"],
["slippery_road", "wet_grass", "rain", "winter", "sprinkler"],
["slippery_road", "wet_grass", "winter", "rain", "sprinkler"],
["slippery_road", "wet_grass", "winter", "sprinkler", "rain"],
["slippery_road", "winter", "sprinkler", "wet_grass", "rain"],
["slippery_road", "winter", "sprinkler", "rain", "wet_grass"],
["slippery_road", "winter", "rain", "sprinkler", "wet_grass"],
["slippery_road", "winter", "rain", "wet_grass", "sprinkler"],
["slippery_road", "winter", "wet_grass", "sprinkler", "rain"],
["slippery_road", "winter", "wet_grass", "rain", "sprinkler"],
["slippery_road", "sprinkler", "winter", "wet_grass", "rain"],
["slippery_road", "sprinkler", "winter", "rain", "wet_grass"],
["slippery_road", "sprinkler", "wet_grass", "winter", "rain"],
["slippery_road", "sprinkler", "wet_grass", "rain", "winter"],
["slippery_road", "sprinkler", "rain", "winter", "wet_grass"],
["slippery_road", "sprinkler", "rain", "wet_grass", "winter"],
["slippery_road", "rain", "wet_grass", "sprinkler", "winter"],
["slippery_road", "rain", "wet_grass", "winter", "sprinkler"],
["slippery_road", "rain", "winter", "wet_grass", "sprinkler"],
["slippery_road", "rain", "winter", "sprinkler", "wet_grass"],
["slippery_road", "rain", "sprinkler", "wet_grass", "winter"],
["slippery_road", "rain", "sprinkler", "winter", "wet_grass"]]
self.assertTrue(order in potentialOrders)
"""
TODO BETTER TEST WITH CERTAIN ORDER!
"""
#Check error handling
with self.assertRaises(TypeError) as cm:
Orderer.get_min_degree_order("Not a Bayesian Network.")
self.assertEqual(str(cm.exception), "Only Bayesian Networks are currently supported.")
def test_random_elimination_order(self):
bn = XMLBIFParser.parse("primo2/tests/slippery.xbif")
order = Orderer.get_random_order(bn)
variables = ["slippery_road", "winter", "rain", "sprinkler", "wet_grass"]
self.assertEqual(len(order), len(variables))
for v in variables:
self.assertTrue(v in order)
#Check error handling
with self.assertRaises(TypeError) as cm:
Orderer.get_min_degree_order("Not a Bayesian Network.")
self.assertEqual(str(cm.exception), "Only Bayesian Networks are currently supported.")
class VariableEliminationTest(unittest.TestCase):
def setUp(self):
self.bn = XMLBIFParser.parse("primo2/tests/slippery.xbif")
def test_empty_cpt(self):
bn = BayesianNetwork()
from primo2.nodes import DiscreteNode
n1 = DiscreteNode("a")
n2 = DiscreteNode("b")
bn.add_node(n1)
bn.add_node(n2)
bn.add_edge(n1,n2)
res = VariableElimination.naive_marginals(bn, ["a"])
np.testing.assert_array_almost_equal(res.get_potential(), np.array([0.0, 0.0]))
def test_naive_marginals(self):
resFactor = VariableElimination.naive_marginals(self.bn, ["winter"])
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.6, 0.4]))
def test_naive_marginal_evidence_trivial(self):
resFactor = VariableElimination.naive_marginals(self.bn, ["rain"], {"winter": "true"})
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.8, 0.2]))
def test_naive_marginal_evidence_trivial_multiple_evidence(self):
resFactor = VariableElimination.naive_marginals(self.bn, ["wet_grass"], {"sprinkler": "true", "rain": "false"})
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.1, 0.9]))
def test_naive_marginal_evidence(self):
resFactor = VariableElimination.naive_marginals(self.bn, ["wet_grass"], {"winter": "true"})
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.668, 0.332]))
def test_naive_marginal_evidence_multiple_evidence(self):
resFactor = VariableElimination.naive_marginals(self.bn, ["wet_grass"], {"winter": "true", "rain": "false"})
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.02, 0.98]))
def test_bucket_marginals(self):
resFactor = VariableElimination.bucket_marginals(self.bn, ["winter"])
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.6, 0.4]))
#
def test_bucket_marginal_evidence_trivial(self):
resFactor = VariableElimination.bucket_marginals(self.bn, ["rain"], {"wet_grass": "false"})
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.158858, 0.841142]))
def test_bucket_marginal_evidence_trivial_multiple_evidence(self):
resFactor = VariableElimination.bucket_marginals(self.bn, ["wet_grass"], {"sprinkler": "true", "rain": "false"})
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.1, 0.9]))
def test_bucket_marginal_evidence(self):
resFactor = VariableElimination.bucket_marginals(self.bn, ["wet_grass"], {"winter": "true"})
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.668, 0.332]))
def test_bucket_marginal_evidence_multiple_evidence(self):
resFactor = VariableElimination.bucket_marginals(self.bn, ["wet_grass"], {"winter": "true", "rain": "false"})
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.02, 0.98]))
### TODO check multiple marginals
# def test_bucket_multiple_marginals(self):
# resFactor = VariableElimination.bucket_marginals(self.bn, ["wet_grass", "rain"], {"winter": "true", "slippery_road": "false"})
class FactorEliminationTest(unittest.TestCase):
def setUp(self):
self.bn = XMLBIFParser.parse("primo2/tests/slippery.xbif")
def test_not_connected_node_without_cpt(self):
# bn = BayesianNetwork()
from primo2.nodes import DiscreteNode
n = DiscreteNode("a")
self.bn.add_node(n)
ft = FactorTree.create_jointree(self.bn)
ft.set_evidence({"a": "False"})
res = ft.marginals(["a"])
#Even with evidence set, when we do not have a cpt the result should remain
#at 0! Even if only to indicate that something might be wrong with that
#node.
np.testing.assert_array_almost_equal(res.get_potential(), np.array([0.0, 0.0]))
def test_empty_cpt(self):
bn = BayesianNetwork()
from primo2.nodes import DiscreteNode
n1 = DiscreteNode("a")
n2 = DiscreteNode("b")
bn.add_node(n1)
bn.add_node(n2)
bn.add_edge(n1,n2)
ft = FactorTree.create_jointree(bn)
res = ft.marginals(["a"])
np.testing.assert_array_almost_equal(res.get_potential(), np.array([0.0, 0.0]))
def test_create_jointree(self):
order = ["slippery_road", "wet_grass", "sprinkler", "winter", "rain"]
ft = FactorTree.create_jointree(self.bn, order=order)
#As above, alternatives need to be contained as well for python3
desiredCliques = ["slippery_roadrain", "wet_grasssprinklerrain",
"wet_grassrainsprinkler", "sprinklerwinterrain",
"sprinklerrainwinter", "wintersprinklerrain",
"winterrainsprinkler", "rainsprinklerwinter",
"rainwintersprinkler"]
self.assertEqual(len(ft.tree), 3)
for n in ft.tree.nodes(): # was nodes_iter in networkx 1.x
self.assertTrue(n in desiredCliques)
def test_jointree_marginals(self):
ft = FactorTree.create_jointree(self.bn)
resFactor = ft.marginals(["winter"])
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.6, 0.4]))
def test_jointree_marginals2(self):
ft = FactorTree.create_jointree(self.bn)
resFactor = ft.marginals(["slippery_road"])
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.364, 0.636]))
def test_jointree_marginals3(self):
ft = FactorTree.create_jointree(self.bn)
resFactor = ft.marginals(["sprinkler"])
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.42, 0.58]))
def test_jointree_marginals_trivial_evidence(self):
ft = FactorTree.create_jointree(self.bn)
ft.set_evidence({"slippery_road":"true"})
resFactor = ft.marginals(["slippery_road"])
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([1.0, 0.0]))
def test_jointree_evidence_trivial(self):
ft = FactorTree.create_jointree(self.bn)
ft.set_evidence({"wet_grass": "false"})
resFactor = ft.marginals(["rain"])
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.158858, 0.841142]))
def test_jointree_marginal_evidence_trivial_multiple_evidence(self):
ft = FactorTree.create_jointree(self.bn)
ft.set_evidence({"sprinkler": "true", "rain": "false"})
resFactor = ft.marginals(["wet_grass"])
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.1, 0.9]))
def test_jointree_marginal_evidence(self):
ft = FactorTree.create_jointree(self.bn)
ft.set_evidence({"winter": "true"})
resFactor = ft.marginals(["wet_grass"])
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.668, 0.332]))
def test_jointree_marginal_evidence_multiple_evidence(self):
ft = FactorTree.create_jointree(self.bn)
ft.set_evidence( {"winter": "true", "rain": "false"})
resFactor = ft.marginals(["wet_grass"])
np.testing.assert_array_almost_equal(resFactor.get_potential(), np.array([0.02, 0.98]))
def test_jointree_marginal_soft_evidence(self):
bn = BayesianNetwork()
cloth = DiscreteNode("cloth", ["green","blue", "red"])
sold = DiscreteNode("sold")
bn.add_node(cloth)
bn.add_node(sold)
bn.add_edge("cloth", "sold")
cloth.set_cpd(np.array([0.3,0.3,0.4]))
sold.set_cpd(np.array([[0.4, 0.4, 0.8],
[0.6, 0.6, 0.2]]))
tree = FactorTree.create_jointree(bn)
tree.set_evidence({"cloth": np.array([0.7,0.25,0.05])}, softPosteriors=True)
np.testing.assert_array_almost_equal(tree.marginals(["cloth"]).get_potential(), np.array([0.7,0.25,0.05]))
np.testing.assert_array_almost_equal(tree.marginals(["sold"]).get_potential(), np.array([0.42,0.58]))
if __name__ == "__main__":
#Workaround so that this script also finds the resource files when run directly
# from within the tests folder
import os
os.chdir("../..")
unittest.main()
| SocialCognitiveSystems/PRIMO | primo2/tests/Inference_test.py | Python | lgpl-3.0 | 13,455 | 0.012337 |
#---------------------------------
#Joseph Boyd - joseph.boyd@epfl.ch
#---------------------------------
from bs4 import BeautifulSoup
from urllib2 import urlopen
import csv
BASE_URL = 'http://www.tutiempo.net'
PAGE_1 = '/en/Climate/India/IN.html'
PAGE_2 = '/en/Climate/India/IN_2.html'
headings = ['Location', 'Year', 'Month', 'T', 'TM', 'Tm', 'SLP', 'H', 'PP', 'VV', 'V', 'VM', 'VG', 'RA', 'SN', 'TS', 'FG']
MAX_ROWS = 100000
FIRST_YEAR = 1999
def get_links(url):
html = urlopen(url).read()
soup = BeautifulSoup(html, 'lxml')
location_links = soup.find('div', id='ListadosV4')
locations_links = [BASE_URL + li.a['href'] for li in location_links.findAll('li')]
return locations_links
def write_log(message):
f_log = open("log.txt", 'a')
f_log.write(message)
f_log.close()
def main():
links = get_links(BASE_URL + PAGE_1)
links.extend(get_links(BASE_URL + PAGE_2))
csvfile = open('climate_data_1.csv', 'wb')
csv_writer = csv.writer(csvfile)
csv_writer.writerow(headings)
num_rows = 0; num_files = 1
for link in links:
print ('Retrieving data from %s ...\n'%(link))
html = urlopen(link).read()
soup = BeautifulSoup(html, 'lxml')
year_list = soup.find('div', id='SelectYear')
title = link.split('/')[-2]
print ('Location: %s\n'%(title))
if year_list is None:
continue
for li in year_list.findAll('li'):
year = int(','.join(li.findAll(text=True)))
print (str(year) + '\n')
if year >= FIRST_YEAR:
html = urlopen(BASE_URL + li.a['href']).read()
soup = BeautifulSoup(html, 'lxml')
month_list = soup.find('div', id='SelectMes')
if month_list is None:
month_list = soup.find('div','ListasLeft')
if month_list is None:
continue
for month in month_list.findAll('li'):
month_name = ','.join(month.findAll(text=True))
if month_name[0:10] == 'Historical':
month_name = month_name.split(" ")[1]
print (month_name + '\n')
html = urlopen(BASE_URL + month.a['href']).read()
soup = BeautifulSoup(html, 'lxml')
climate_table = soup.find('table', 'TablaClima')
if climate_table is None:
continue
climate_rows = climate_table.findAll('tr')
for row in climate_rows[1:-2]:
data = row.findAll('td')
print_line = [title, year, month_name]
for datum in data:
a = ','.join(datum.findAll(text=True))
print_line.append(a.encode('utf8'))
csv_writer.writerow(print_line)
num_rows += 1
if num_rows == MAX_ROWS:
csvfile.close()
num_files += 1
csvfile = open('climate_data_%s.csv'%(num_files), 'wb')
csv_writer = csv.writer(csvfile)
csv_writer.writerow(headings)
num_rows = 0
csvfile.close()
if __name__ == '__main__':
main()
| FAB4D/humanitas | data_collection/ts/climate/get_climate_data.py | Python | bsd-3-clause | 3,498 | 0.006861 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` that wraps a [batch] matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorFullMatrix"]
@tf_export("linalg.LinearOperatorFullMatrix")
class LinearOperatorFullMatrix(linear_operator.LinearOperator):
"""`LinearOperator` that wraps a [batch] matrix.
This operator wraps a [batch] matrix `A` (which is a `Tensor`) with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `M x N` matrix.
```python
# Create a 2 x 2 linear operator.
matrix = [[1., 2.], [3., 4.]]
operator = LinearOperatorFullMatrix(matrix)
operator.to_dense()
==> [[1., 2.]
[3., 4.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 4 linear operators.
matrix = tf.random_normal(shape=[2, 3, 4, 4])
operator = LinearOperatorFullMatrix(matrix)
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
#### Performance
`LinearOperatorFullMatrix` has exactly the same performance as would be
achieved by using standard `TensorFlow` matrix ops. Intelligent choices are
made based on the following initialization hints.
* If `dtype` is real, and `is_self_adjoint` and `is_positive_definite`, a
Cholesky factorization is used for the determinant and solve.
In all cases, suppose `operator` is a `LinearOperatorFullMatrix` of shape
`[M, N]`, and `x.shape = [N, R]`. Then
* `operator.matmul(x)` is `O(M * N * R)`.
* If `M=N`, `operator.solve(x)` is `O(N^3 * R)`.
* If `M=N`, `operator.determinant()` is `O(N^3)`.
If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
matrix,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorFullMatrix"):
r"""Initialize a `LinearOperatorFullMatrix`.
Args:
matrix: Shape `[B1,...,Bb, M, N]` with `b >= 0`, `M, N >= 0`.
Allowed dtypes: `float16`, `float32`, `float64`, `complex64`,
`complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
TypeError: If `diag.dtype` is not an allowed type.
"""
with ops.name_scope(name, values=[matrix]):
self._matrix = ops.convert_to_tensor(matrix, name="matrix")
self._check_matrix(self._matrix)
super(LinearOperatorFullMatrix, self).__init__(
dtype=self._matrix.dtype,
graph_parents=[self._matrix],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
def _check_matrix(self, matrix):
"""Static check of the `matrix` argument."""
allowed_dtypes = [
dtypes.float16,
dtypes.float32,
dtypes.float64,
dtypes.complex64,
dtypes.complex128,
]
matrix = ops.convert_to_tensor(matrix, name="matrix")
dtype = matrix.dtype
if dtype not in allowed_dtypes:
raise TypeError(
"Argument matrix must have dtype in %s. Found: %s"
% (allowed_dtypes, dtype))
if matrix.get_shape().ndims is not None and matrix.get_shape().ndims < 2:
raise ValueError(
"Argument matrix must have at least 2 dimensions. Found: %s"
% matrix)
def _shape(self):
return self._matrix.get_shape()
def _shape_tensor(self):
return array_ops.shape(self._matrix)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
return linear_operator_util.matmul_with_broadcast(
self._matrix, x, adjoint_a=adjoint, adjoint_b=adjoint_arg)
def _to_dense(self):
return self._matrix
| benoitsteiner/tensorflow-xsmm | tensorflow/python/ops/linalg/linear_operator_full_matrix.py | Python | apache-2.0 | 6,537 | 0.001836 |
# -*- coding: utf-8 -*-
"""
Some Python tools for reading select data from Nastran .op2 files.
Converted from the Yeti version.
Can read files in big or little endian format.
@author: Tim Widrick
"""
from __future__ import print_function
import sys
import struct
import itertools as it
import warnings
from six import PY2
import numpy as np
import pyNastran.op2.dev.n2y as n2y
# Notes on the op2 format.
#
# DATA BLOCK:
# All data blocks (including header) start with header 3 elements:
# [reclen, key, endrec]
# - reclen = 1 32-bit integer that specifies number of bytes in
# key (either 4 or 8)
# - key = 4 or 8 byte integer specifying number of words in next
# record
# - endrec = reclen
#
# DATA SET, can be multiple records:
# Next is [reclen, data, endrec]
# - reclen = 1 32-bit integer that specifies number of bytes
# in data
# - data = reclen bytes long, variable format; may be part of
# a data set or the complete set
# - endrec = reclen
#
# Next is info about whether we're done with current data set:
# [reclen, key, endrec]
# - reclen = 1 32-bit integer that specifies number of bytes
# in key (either 4 or 8)
# - key = 4 or 8 byte integer specifying number of words in
# next record; if 0, done with data set
# - endrec = reclen
#
# If not done, we have [reclen, data, endrec] for part 2 (and
# so on) for the record.
#
# Once data set is complete, we have: [reclen, key, endrec]
# - reclen = 1 32-bit integer that specifies number of bytes in
# key (either 4 or 8)
# - key = 4 or 8 byte integer specifying number of words in next
# record (I think ... not useful?)
# - endrec = reclen
#
# Then: [reclen, rec_type, endrec]
# - reclen = 1 32-bit integer that specifies number of bytes in
# rec_type (either 4 or 8)
# - rec_type = 0 if table (4 or 8 bytes)
# - endrec = reclen
#
# Then, info on whether we're done with data block:
# [reclen, key, endrec]
# - reclen = 1 32-bit integer that specifies number of bytes in
# key (either 4 or 8)
# - key = 4 or 8 byte integer specifying number of words in next
# record; if 0, done with data block
# - endrec = reclen
#
# If not done, we have [reclen, data, endrec] for record 2 and so
# on, until data block is read in.
def expand_dof(ids, pvgrids):
"""
Expands vector of ids to [id, dof].
Parameters
----------
ids : 1d array-like
Vector of node ids
pvgrids : 1d array-like
True/False vector same length as `ids`. The True entries
indicate which elements in `ids` are grids; those will get all
6 DOF while all other ids will just get 0 for the DOF.
Returns
-------
dof : 2d ndarray
2 column matrix: [id, dof]
Examples
--------
>>> import numpy as np
>>> import op2
>>> ids = [1, 2, 3, 4]
>>> pvgrids = [True, False, False, True]
>>> expand_dof(ids, pvgrids)
array([[1, 1],
[1, 2],
[1, 3],
[1, 4],
[1, 5],
[1, 6],
[2, 0],
[3, 0],
[4, 1],
[4, 2],
[4, 3],
[4, 4],
[4, 5],
[4, 6]])
"""
ids, pvgrids = np.atleast_1d(ids, pvgrids)
n = len(ids)
dof = np.zeros((n, 6), int)
dof[pvgrids] = np.arange(1, 7)
V = np.zeros((n, 6), bool)
V[:, 0] = True
V[pvgrids, 1:] = True
expids = np.reshape(ids, (-1, 1)) * V
V = V.flatten()
expids = expids.flatten()
dof = dof.flatten()
return np.vstack((expids[V], dof[V])).T
class OP2(object):
"""Class for reading Nastran op2 files and nas2cam data files."""
def __init__(self, filename=None):
self._fileh = None
self._CodeFuncs = None
if isinstance(filename, str):
self._op2_open(filename)
def __del__(self):
if self._fileh:
self._fileh.close()
self._fileh = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self._fileh:
self._fileh.close()
self._fileh = None
return False
@property
def CodeFuncs(self):
"""See :func:`_check_code`."""
if self._CodeFuncs is None:
def func1(item_code):
if item_code // 1000 in [2, 3, 6]:
return 2
return 1
def func2(item_code):
return item_code % 100
def func3(item_code):
return item_code % 1000
def func4(item_code):
return item_code // 10
def func5(item_code):
return item_code % 10
def func6(item_code):
warnings.warn('Function code 6 method not verified',
RuntimeWarning)
if item_code & 8:
return 0
return 1
def func7(item_code):
v = item_code // 1000
if v in [0, 2]:
return 0
if v in [1, 3]:
return 1
return 2
def funcbig(func_code, item_code):
return item_code & (func_code & 65535)
self._CodeFuncs = {
1: func1, 2: func2, 3: func3, 4: func4,
5: func5, 6: func6, 7: func7,
'big': funcbig,
}
return self._CodeFuncs
def _op2_open(self, filename):
"""
Open op2 file in correct endian mode.
Sets these class variables:
_fileh : file handle
Value returned by open().
_swap : bool
True if bytes must be swapped to correct endianness.
_bit64 : True or False
True if 'key' integers are 64-bit.
_endian : string
Will be '=' if `swap` is False; otherwise, either '>' or '<'
for big-endian and little-endian, respectively.
_intstr : string
Either `endian` + 'i4' or `endian` + 'i8'.
_ibytes : integer
Either 4 or 8 (corresponds to `intstr`)
_int32str : string
`endian` + 'i4'.
_label : string
The op2 header label or, if none, None.
_date : vector
Three element date vector, or None.
_nastheader : string
Nastran header for file, or None.
_postheaderpos : integer
File position after header.
dbnames : dictionary
See :func:`directory` for description. Contains data block
names, bytes in file, file positions, and for matrices, the
matrix size.
dblist : list
See :func:`directory` for description. Contains same info
as dbnames, but in a list of ordered and formatted strings.
_Str4 : struct.Struct object
Precompiled for reading 4 byte integers (corresponds to
`int32str`).
_Str : struct.Struct object
Precompiled for reading 4 or 8 byte integers (corresponds
to `intstr`).
File is positioned after the header label (at `postheaderpos`).
"""
self._fileh = open(filename, 'rb')
self.dbnames = []
self.dblist = []
reclen = struct.unpack('i', self._fileh.read(4))[0]
self._fileh.seek(0)
reclen = np.array(reclen, dtype=np.int32)
if not np.any(reclen == [4, 8]):
self._swap = True
reclen = reclen.byteswap()
if not np.any(reclen == [4, 8]):
self._fileh.close()
self._fileh = None
raise RuntimeError('Could not decipher file. First'
'4-byte integer should be 4 or 8.')
if sys.byteorder == 'little':
self._endian = '>'
else:
self._endian = '<'
else:
self._swap = False
self._endian = '='
self._Str4 = struct.Struct(self._endian + 'i')
if reclen == 4:
self._bit64 = False
self._intstr = self._endian + 'i4'
self._intstru = self._endian + '%di'
self._ibytes = 4
self._Str = self._Str4
else:
self._bit64 = True
self._intstr = self._endian + 'i8'
self._intstru = self._endian + '%dq'
self._ibytes = 8
self._Str = struct.Struct(self._endian + 'q')
# print('bit64 = ', self._bit64)
self._rowsCutoff = 3000
self._int32str = self._endian + 'i4'
self._int32stru = self._endian + '%di'
self._read_op2_header()
self._postheaderpos = self._fileh.tell()
self.directory(verbose=False)
def _get_key(self):
"""Reads [reclen, key, endrec] triplet and returns key."""
self._fileh.read(4)
key = self._Str.unpack(self._fileh.read(self._ibytes))[0]
self._fileh.read(4)
return key
def _skip_key(self, n):
"""Skips `n` key triplets ([reclen, key, endrec])."""
self._fileh.read(n*(8+self._ibytes))
def _read_op2_header(self):
"""
Returns Nastran output2 header label (or 'no header').
"""
key = self._get_key()
if key != 3:
self._fileh.seek(0)
self._date = self._nastheader = self._label = None
return
self._fileh.read(4) # reclen
frm = self._intstru % key
bytes = self._ibytes*key
self._date = struct.unpack(frm, self._fileh.read(bytes))
# self._date = np.fromfile(self._fileh, self._intstr, key)
self._fileh.read(4) # endrec
self._get_key()
reclen = self._Str4.unpack(self._fileh.read(4))[0]
self._nastheader = self._fileh.read(reclen).decode()
self._fileh.read(4) # endrec
self._get_key()
reclen = self._Str4.unpack(self._fileh.read(4))[0]
self._label = self._fileh.read(reclen).decode().\
strip().replace(' ', '')
self._fileh.read(4) # endrec
self._skip_key(2)
def _valid_name(self, bstr):
"""
Returns a valid variable name from the byte string `bstr`.
"""
if PY2:
return bstr.strip()
else:
return ''.join(chr(c) for c in bstr if (
47 < c < 58 or 64 < c < 91 or c == 95 or 96 < c < 123))
def _read_op2_end_of_table(self):
"""Read Nastran output2 end-of-table marker.
Returns
-------
tuple: (eot, key)
eot : integer
1 if end-of-file has been reached and 0 otherwise.
key : integer
0 of eot is 1; next key value otherwise.
"""
bstr = self._fileh.read(4) # reclen
if len(bstr) == 4:
key = self._Str.unpack(self._fileh.read(self._ibytes))[0]
self._fileh.read(4) # endrec
else:
key = 0
if key == 0:
return 1, 0
return 0, key
def _read_op2_name_trailer(self):
"""Read Nastran output2 datablock name and trailer.
Returns
-------
tuple: (name, trailer, type)
name : string
Name of upcoming data block (upper case).
trailer : tuple
Data block trailer.
type : 0 or 1
0 means table, 1 means matrix. I think.
All outputs will be None for end-of-file.
"""
eot, key = self._read_op2_end_of_table()
if key == 0:
# print('return None, None, None')
return None, None, None
reclen = self._Str4.unpack(self._fileh.read(4))[0]
db_binary_name = self._fileh.read(reclen)
db_name = self._valid_name(db_binary_name)
self._fileh.read(4) # endrec
self._get_key()
key = self._get_key()
self._fileh.read(4) # reclen
frm = self._intstru % key
nbytes = self._ibytes * key
# prevents a giant read
assert nbytes > 0, nbytes
trailer = struct.unpack(frm, self._fileh.read(nbytes))
# trailer = np.fromfile(self._fileh, self._intstr, key)
self._fileh.read(4) # endrec
self._skip_key(4)
reclen = self._Str4.unpack(self._fileh.read(4))[0]
db_name2 = self._valid_name(self._fileh.read(reclen))
self._fileh.read(4) # endrec
self._skip_key(2)
rec_type = self._get_key()
return db_name, trailer, rec_type
def read_op2_matrix(self, name, trailer):
"""
Read and return Nastran op2 matrix at current file position.
It is assumed that the name has already been read in via
:func:`_read_op2_name_trailer`.
The size of the matrix is read from trailer:
nrows = trailer[2]
ncols = trailer[1]
"""
dtype = 1
nrows = trailer[2]
ncols = trailer[1]
print(' %s (%s, %s)' % (name, nrows, ncols))
matrix = np.zeros((nrows, ncols), order='F')
if self._bit64:
intsize = 8
else:
intsize = 4
col = 0
frm = self._endian + '%dd'
print('frm =', frm)
while dtype > 0: # read in matrix columns
# key is number of elements in next record (row # followed
# by key-1 real numbers)
key = self._get_key()
# read column
while key > 0:
reclen = self._Str4.unpack(self._fileh.read(4))[0]
r = self._Str.unpack(self._fileh.read(self._ibytes))[0]-1
n = (reclen - intsize) // 8
if n < self._rowsCutoff:
matrix[r:r+n, col] = struct.unpack(
frm % n, self._fileh.read(n*8))
else:
matrix[r:r+n, col] = np.fromfile(
self._fileh, np.float64, n)
self._fileh.read(4) # endrec
key = self._get_key()
col += 1
self._get_key()
dtype = self._get_key()
self._read_op2_end_of_table()
if self._swap:
matrix = matrix.byteswap()
if name in ['EFMFSMS', 'EFMASSS', 'RBMASSS']:
print(matrix)
return matrix
def skip_op2_matrix(self, trailer):
"""
Skip Nastran op2 matrix at current position.
It is assumed that the name has already been read in via
:func:`_read_op2_name_trailer`.
The size of the matrix is read from trailer:
rows = trailer[2]
cols = trailer[1]
"""
dtype = 1
while dtype > 0: # read in matrix columns
# key is number of elements in next record (row # followed
# by key-1 real numbers)
key = self._get_key()
# skip column
while key > 0:
reclen = self._Str4.unpack(self._fileh.read(4))[0]
self._fileh.seek(reclen, 1)
self._fileh.read(4) # endrec
key = self._get_key()
self._get_key()
dtype = self._get_key()
self._read_op2_end_of_table()
def skip_op2_table(self):
"""Skip over Nastran output2 table."""
eot, key = self._read_op2_end_of_table()
if key == 0:
return
while key > 0:
while key > 0:
reclen = self._Str4.unpack(self._fileh.read(4))[0]
self._fileh.seek(8+reclen, 1)
key = self._Str.unpack(self._fileh.read(self._ibytes))[0]
self._fileh.read(4) # endrec
self._skip_key(2)
eot, key = self._read_op2_end_of_table()
def read_op2_matrices(self):
"""Read all matrices from Nastran output2 file.
Returns dictionary containing all matrices in the op2 file:
{'NAME1': matrix1, 'NAME2': matrix2, ...}
The keys are the names as stored (upper case).
"""
self._fileh.seek(self._postheaderpos)
mats = {}
while 1:
name, trailer, rectype = self._read_op2_name_trailer()
if name is None:
break
if rectype > 0:
print("Reading matrix {}...".format(name))
mats[name] = self.read_op2_matrix(trailer)
else:
self.skip_op2_table()
return mats
def print_data_block_directory(self):
"""
Prints op2 data block directory. See also :func:`directory`.
"""
if len(self.dblist) == 0:
self.directory(verbose=False)
for s in self.dblist:
print(s)
def directory(self, verbose=True, redo=False): # TODO: _read_op2_name_trailer
"""
Return list of data block names in op2 file.
Parameters
----------
verbose : bool (or any true/false variable)
If True, print names, sizes, and file offsets to screen.
redo : bool
If True, scan through file and redefine self.dbnames even
if it is already set.
Returns tuple: (dbnames, dblist)
--------------------------------
dbnames : Dictionary
Dictionary indexed by data block name. Each value is a
list, one element per occurrence of the data block in the
op2 file. Each element is another list that has 3
elements: [fpos, bytes, size]:
::
fpos : 2-element list; file position start and stop
(stop value is start of next data block)
bytes: number of bytes data block consumes in file
size : 2-element list; for matrices, [rows, cols],
for tables [0, 0]
dblist : list
List of strings for printing. Contains the info above
in formatted and sorted (in file position order) strings.
As an example of using dbnames, to get a list of all sizes of
matrices named 'KAA':
::
o2 = op2.OP2('mds.op2')
s = [item[2] for item in o2.dbnames['KAA']]
For another example, to read in first matrix named 'KAA':
::
o2 = op2.OP2('mds.op2')
fpos = o2.dbnames['KAA'][0][0][0]
o2._fileh.seek(fpos)
name, trailer, rectype = o2._read_op2_name_trailer()
kaa = o2.read_op2_matrix(trailer)
This routine also sets self.dbnames = dbnames.
"""
if len(self.dbnames) > 0 and not redo:
return self.dbnames
dbnames = {}
dblist = []
self._fileh.seek(self._postheaderpos)
pos = self._postheaderpos
while 1:
name, trailer, dbtype = self._read_op2_name_trailer()
if name is None:
break
if dbtype > 0:
self.skip_op2_matrix(trailer)
size = [trailer[2], trailer[1]]
s = 'Matrix {0:8}'.format(name)
else:
self.skip_op2_table()
size = [0, 0]
s = 'Table {0:8}'.format(name)
cur = self._fileh.tell()
s += (', bytes = {0:10} [{1:10} to {2:10}]'.
format(cur-pos-1, pos, cur))
if size != [0, 0]:
s += (', {0:6} x {1:<}'.
format(size[0], size[1]))
if name not in dbnames:
dbnames[name] = []
dbnames[name].append([[pos, cur], cur-pos-1, size])
dblist.append(s)
pos = cur
self.dbnames = dbnames
self.dblist = dblist
if verbose:
self.print_data_block_directory()
return dbnames, dblist
def read_op2_dynamics(self):
"""
Reads the TLOAD data from a DYNAMICS datablock.
Returns matrix of TLOADS. Rows = 5 or 6, Cols = number of
TLOADs. TLOAD ids are in first row; other data in matrix may
not be useful.
"""
key = self._get_key()
if self._ibytes == 4:
header_Str = struct.Struct(self._endian + 'iii')
hbytes = 12
else:
header_Str = struct.Struct(self._endian + 'qqq')
hbytes = 24
eot = 0
print('self._intstr = %r' % self._intstr)
data = np.zeros(0, dtype=self._intstr)
while not eot:
while key > 0:
self._fileh.read(4) # reclen
header = header_Str.unpack(self._fileh.read(hbytes))
if header == (7107, 71, 138):
if key < self._rowsCutoff:
bytes = (key-3)*self._ibytes
ndata = struct.unpack(self._intstru % (key-3),
self._fileh.read(bytes))
else:
ndata = np.fromfile(self._fileh,
self._intstr, key-3)
data = np.hstack((data, ndata))
else:
self._fileh.seek((key-3)*self._ibytes, 1)
self._fileh.read(4) # endrec
key = self._get_key()
self._skip_key(2)
eot, key = self._read_op2_end_of_table()
if np.any(data):
L = len(data)
mult5 = L == 5*(L // 5)
mult6 = L == 6*(L // 6)
err1 = ('Could not determine if TLOADs are 5 or 6 rows! '
'Both work. Routine needs updating.')
err2 = ('Could not determine if TLOADs are 5 or 6 rows! '
'Neither work. Routine needs updating.')
if mult5:
mindelta5 = np.min(np.diff(data[0::5]))
if mult6:
mindelta6 = np.min(np.diff(data[0::6]))
if mult5:
if mult6:
# L is multiple of both 5 and 6:
if mindelta5 > 0:
if mindelta6 > 0:
raise ValueError(err1)
rows = 5
else:
if mindelta6 > 0:
rows = 6
else:
raise ValueError(err2)
else:
if mindelta5 > 0:
rows = 5
else:
raise ValueError(err2)
elif mult6:
if mindelta6 > 0:
rows = 6
else:
raise ValueError(err2)
else:
raise ValueError(err2)
data = np.reshape(data, (rows, -1), order='F')
return data
def read_op2_tload(self):
"""
Returns the TLOAD data from an op2 file.
This routine scans the op2 file for the DYNAMICS datablock and
then calls :func:`read_op2_dynamics` to read the data.
"""
if len(self.dbnames) == 0:
self.directory(verbose=False)
fpos = self.dbnames['DYNAMICS'][0][0][0]
self._fileh.seek(fpos)
name, trailer, dbtype = self._read_op2_name_trailer()
return self.read_op2_dynamics()
def read_op2_record(self, form=None, N=0):
"""
Read Nastran output2 data record.
Parameters
----------
form : string or None
String specifying format, or None to read in signed integers.
One of::
'int' (same as None)
'uint'
'single'
'double'
'bytes' -- raw bytes from file
N : integer
Number of elements in final data record; use 0 if unknown.
Returns numpy 1-d vector or, if form=='bytes', a bytes string.
This routine will read in a 'super' record if the data spans
more than one logical record.
"""
key = self._get_key()
f = self._fileh
if not form or form == 'int':
frm = self._intstr
frmu = self._intstru
bytes_per = self._ibytes
elif form == 'uint':
frm = self._intstr.replace('i', 'u')
frmu = self._intstru.replace('i', 'I')
bytes_per = self._ibytes
elif form == 'double':
frm = self._endian + 'f8'
frmu = self._endian + '%dd'
bytes_per = 8
elif form == 'single':
frm = self._endian + 'f4'
frmu = self._endian + '%df'
bytes_per = 4
elif form == 'bytes':
data = b''
while key > 0:
reclen = self._Str4.unpack(f.read(4))[0]
data += f.read(reclen)
f.read(4) # endrec
key = self._get_key()
self._skip_key(2)
return data
else:
raise ValueError("form must be one of: None, 'int', "
"'uint', 'double', 'single' or 'bytes'")
if N:
# print('frm=%r' % frm)
data = np.zeros(N, dtype=frm)
i = 0
while key > 0:
reclen = self._Str4.unpack(f.read(4))[0]
# f.read(4) # reclen
n = reclen // bytes_per
if n < self._rowsCutoff:
b = n * bytes_per
# print('frmu=%r' % frmu)
data[i:i+n] = struct.unpack(frmu % n, f.read(b))
else:
data[i:i+n] = np.fromfile(f, frm, n)
i += n
f.read(4) # endrec
key = self._get_key()
else:
data = np.zeros(0, dtype=frm)
while key > 0:
reclen = self._Str4.unpack(f.read(4))[0]
# f.read(4) # reclen
n = reclen // bytes_per
if n < self._rowsCutoff:
b = n * bytes_per
cur = struct.unpack(frmu % n, f.read(b))
else:
cur = np.fromfile(f, frm, n)
data = np.hstack((data, cur))
f.read(4) # endrec
key = self._get_key()
self._skip_key(2)
return data
def skip_op2_record(self):
"""
Skip over Nastran output2 data record (or super-record).
"""
key = self._get_key()
while key > 0:
reclen = self._Str4.unpack(self._fileh.read(4))[0]
self._fileh.seek(reclen+4, 1)
key = self._get_key()
self._skip_key(2)
def read_op2_table_headers(self, name):
"""
Read op2 table headers and echo them to the screen.
Parameters
----------
name : string
Name of data block that headers are being read for.
File must be positioned after name and trailer block. For
example, to read the table headers of the last GEOM1S data
block::
o2 = op2.OP2('modes.op2')
fpos = o2.dbnames['GEOM1S'][-1][0][0]
o2._fileh.seek(fpos)
name, trailer, dbtype = o2._read_op2_name_trailer()
o2.read_op2_table_headers('GEOM1S')
"""
key = self._get_key()
print("{0} Headers:".format(name))
Frm = struct.Struct(self._intstru % 3)
eot = 0
while not eot:
while key > 0:
reclen = self._Str4.unpack(self._fileh.read(4))[0]
head = Frm.unpack(self._fileh.read(3*self._ibytes))
print(np.hstack((head, reclen)))
self._fileh.seek((key-3)*self._ibytes, 1)
self._fileh.read(4)
key = self._get_key()
self._skip_key(2)
eot, key = self._read_op2_end_of_table()
def _check_code(self, item_code, funcs, vals, name):
"""
Checks that the code (ACODE or TCODE probably) value is
acceptable.
Parameters
----------
item_code : integer
The ACODE or TCODE (or similar) value for the record.
funcs : list of integers
These are the function code values to check for `code`
vals : list of lists of integers
These are the acceptable values for the `code` functions;
ignored if `acode` is None.
name : string
Name for message; eg: 'TCODE'
Returns
-------
True if all values are acceptable, False otherwise.
Notes
-----
The function codes in `funcs` are:
====== ==========================================
Code Operation
====== ==========================================
1 if (item_code//1000 = 2,3,6) then return 2
else return 1
2 mod(item_code,100)
3 mod(item_code,1000)
4 item_code//10
5 mod(item_code,10)
6 if iand(item_code,8)!=0??? then set to 0,
else set to 1
7 if item_code//1000
= 0 or 2, then set to 0
= 1 or 3, then set to 1
> 3, then set to 2.
>65535 iand(item_code,iand(func_code,65535))
====== ==========================================
where `iand` is the bit-wise AND operation. For example, ACODE,4
means that the ACODE value should be integer divided it by 10.
So, if ACODE is 22, ACODE,4 is 2.
"""
if len(funcs) != len(vals):
raise ValueError('len(funcs) != len(vals)!')
for func, val in zip(funcs, vals):
if 1 <= func <= 7:
if self.CodeFuncs[func](item_code) not in val:
warnings.warn('{0} value {1} not acceptable; func={2}; allowed={3}'.
format(name, item_code, func, val),
RuntimeWarning)
return False
elif func > 65535:
if self.CodeFuncs['big'](func, item_code) not in val:
warnings.warn('{0} value {1} not acceptable'.
format(name, item_code),
RuntimeWarning)
return False
else:
raise ValueError('Unknown function code: {0}'.
format(func))
return True
def _read_op2_ougv1(self, name):
"""
Read op2 OUGV1 mode shape data block.
Parameters
----------
name : string
Name of OUGV1 data block.
Returns
-------
ougv1 : dict
Dictionary with::
'ougv1' : the OUGV1 matrix
'lambda' : the eigenvalues; len(lambda) = size(ougv1,2)
'dof' : 2-column matrix of: [id, dof];
size(dof,1) = size(ougv1,1)
Notes
-----
Can currently only read a real eigenvalue table (ACODE,4 = 2,
TCODE,1 = 1, TCODE,2 = 7, and TCODE,7 in [0, 2]).
"""
float2_Str = struct.Struct(self._endian + 'ff')
iif6_int = np.dtype(self._endian+'i4')
iif6_bytes = 32
if self._ibytes == 4:
i4_Str = struct.Struct(self._endian + 'iiii')
i4_bytes = 16
else:
i4_Str = struct.Struct(self._endian + 'qqqq')
i4_bytes = 32
pos = self._fileh.tell()
key = self._get_key()
lam = np.zeros(1, float)
ougv1 = None
J = 0
eot = 0
while not eot:
if J == 1:
# compute number of modes by files bytes:
startpos = pos + 8 + self._ibytes
bytes_per_mode = self._fileh.tell() - startpos
dbdir = self.dbnames[name]
for i in range(len(dbdir)):
if dbdir[i][0][0] < startpos < dbdir[i][0][1]:
endpos = dbdir[i][0][1]
break
nmodes = (endpos - startpos) // bytes_per_mode
print('Number of modes in OUGV1 is {0:d}'.format(nmodes))
keep = lam
lam = np.zeros(nmodes, float)
lam[0] = keep
keep = ougv1
ougv1 = np.zeros((keep.shape[0], nmodes), float,
order='F')
ougv1[:, 0] = keep[:, 0]
# IDENT record:
reclen = self._Str4.unpack(self._fileh.read(4))[0]
header = i4_Str.unpack(self._fileh.read(i4_bytes))
# header = (ACODE, TCODE, ...)
achk = self._check_code(header[0], [4], [[2]], 'ACODE')
# item_code, funcs, vals, name
tchk = self._check_code(header[1], [1, 2, 7],
[[1], [7], [0, 2]], 'TCODE')
if not (achk and tchk):
self._fileh.seek(pos)
self.skip_op2_table()
return
self._fileh.read(self._ibytes) # mode bytes
lam[J] = float2_Str.unpack(self._fileh.read(8))[0]
# ttl bytes = reclen + 4 + 3*(4+ibytes+4)
# = reclen + 28 - 3*ibytes
# read bytes = 4*ibytes + ibytes + 8 = 8 + 5*ibytes
# self._fileh.seek(reclen-2*self._ibytes+20, 1) # ... or:
self._fileh.read(reclen-2*self._ibytes+20)
# DATA record:
if ougv1 is None:
print('masking')
# - process DOF information on first column only
# - there are 8 elements per node:
# id*10, type, x, y, z, rx, ry, rz
data = self.read_op2_record('bytes') # 1st column
n = len(data) // iif6_bytes
print('iif6_int =', iif6_int) # int32
data = np.fromstring(data, iif6_int)
data1 = (data.reshape(n, 8))[:, :2]
pvgrids = data1[:, 1] == 1
dof = expand_dof(data1[:, 0] // 10, pvgrids)
# form partition vector for modeshape data:
V = np.zeros((n, 8), bool)
V[:, 2] = True # all nodes have 'x'
V[pvgrids, 3:] = True # only grids have all 6
# print('V =\n', V)
V = V.flatten()
# initialize ougv1 with first mode shape:
data.dtype = np.float32 # reinterpret as floats
ougv1 = data[V].reshape(-1, 1)
else:
data = self.read_op2_record('single', V.shape[0])
ougv1[:, J] = data[V]
J += 1
# print('Finished reading mode {0:3d}, Frequency ={1:6.2f}'.format(
# J, np.sqrt(lam[J-1])/(2*np.pi)))
eot, key = self._read_op2_end_of_table()
return {'ougv1': ougv1, 'lambda': lam, 'dof': dof}
def _read_op2_emap(self, nas, nse, trailer):
"""
Read Nastran output2 EMAP data block.
Parameters
----------
nas : dict
Dictionary; has at least {'dnids': {}}.
nse : integer
Number of superelements.
trailer : 1-d array
The trailer for the EMAP data block.
Fills in the dnids member of nas.
See :func:`read_nas2cam_op2`.
"""
words4bits = trailer[4]
data1 = self.read_op2_record()
# [se bitpos proc_order dnse bitpos_dnse prim_se se_type]
data1 = np.reshape(data1[:7*nse], (-1, 7))
# read 2nd record:
key = self._get_key()
data2 = np.zeros(0, dtype='u4')
frm = self._endian + 'u4'
frmu = self._endian + '%dI'
if self._ibytes == 8:
mult = 2
else:
mult = 1
while key > 0:
self._fileh.read(4) # reclen
if mult*key < self._rowsCutoff:
cur = struct.unpack(frmu % (mult*key),
self._fileh.read(4*mult*key))
else:
cur = np.fromfile(self._fileh, frm, mult*key)
data2 = np.hstack((data2, cur))
self._fileh.read(4) # endrec
key = self._get_key()
if self._ibytes == 8:
data2 = np.reshape(data2, (4, -1))
data2 = data2[[0, 3], :].flatten()
self._skip_key(2)
# [ grid_id [bitmap] ]
data2 = np.reshape(data2, (-1, words4bits))
# 1 in front need to skip over grid_id (vars are col indices)
word4bit_up = 1 + data1[:, 1] // 32
word4bit_dn = 1 + data1[:, 4] // 32
bitpos_up = 31 - data1[:, 1] % 32
bitpos_dn = 31 - data1[:, 4] % 32
for j in range(nse-1):
se = data1[j, 0]
bitdn = 1 << bitpos_dn[j]
bitup = 1 << bitpos_up[j]
connected = np.logical_and(data2[:, word4bit_dn[j]] & bitdn,
data2[:, word4bit_up[j]] & bitup)
grids = data2[connected, 0]
nas['dnids'][se] = grids
for j in range(nse): # = 1 to nse:
self.skip_op2_record()
self._get_key()
def _read_op2_bgpdt(self):
"""
Read record 1 of the Nastran output2 BGPDT data block.
Returns vector of the BGPDT data or [] if no data found.
Vector is 9*ngrids in length. For each grid:
::
[ coord_id
internal_id
external_id
dof_type;
permanent_set_constraint
boundary_grid_id
x
y
z ]
The x, y, z values are the grid location in basic.
See :func:`rdn2cop2`.
"""
if self._ibytes == 4:
Str = struct.Struct(self._endian + 'iiiiiiddd')
Sbytes = 24 + 24
wpg = 12 # words per grid
wpd = 2 # words per double
else:
Str = struct.Struct(self._endian + 'qqqqqqddd')
Sbytes = 48 + 24
wpg = 9 # words per grid
wpd = 1 # words per double
rfrm = self._endian + '%dd'
key = self._get_key()
datarec = []
ileft = 0 # remaining left over
dleft = 0 # remaining doubles left over
a = np.arange(6)
b = np.arange(6, 9)
v = np.arange(9)
A = grids = 0
while key > 0:
self._fileh.read(4) # reclen
if ileft > 0:
i = A + a[6-ileft:] + grids*9
# datarec[i] = np.fromfile(self._fileh, self._intstr, ileft)
bytes = self._ibytes * ileft
datarec[i] = struct.unpack(self._intstru % ileft,
self._fileh.read(bytes))
if dleft > 0:
i = A + b[3-dleft:] + grids*9
# datarec[i] = np.fromfile(self._fileh, rfrm, dleft)
datarec[i] = struct.unpack(rfrm % dleft,
self._fileh.read(8*dleft))
key = key - ileft - dleft*wpd
# number of complete grids remaining in this record:
grids = key // wpg
A = len(datarec)
# round up for memory allocation (for possible partial):
n = (key + wpg - 1) // wpg
datarec = np.hstack((datarec, np.zeros(n*9)))
Av = A + v
for i in range(grids):
datarec[Av + i*9] = Str.unpack(self._fileh.read(Sbytes))
# read in remainder of record if any
ileft = 0
dleft = 0
if key > grids*wpg:
# number of words left (1 word/int, 2 words/double)
n = key - grids*wpg
if n >= 6:
i = A+a+grids*9
# datarec[i] = np.fromfile(self._fileh, self._intstr, 6)
bytes = self._ibytes * 6
datarec[i] = struct.unpack(self._intstru % 6,
self._fileh.read(bytes))
# divide by wpd to get number of doubles
n = (n - 6) // wpd
dleft = 3-n
if n >= 1:
i = A + b[:n] + grids*9
# datarec[i] = np.fromfile(self._fileh, rfrm, n)
datarec[i] = struct.unpack(rfrm % n,
self._fileh.read(8*n))
else:
i = A + a[:n] + grids*9
# datarec[i] = np.fromfile(self._fileh, self._intstr, n)
bytes = self._ibytes * n
datarec[i] = struct.unpack(self._intstru % n,
self._fileh.read(bytes))
ileft = 6-n
dleft = 3
self._fileh.read(4) # endrec
key = self._get_key()
self._skip_key(2)
return datarec
def _read_op2_bgpdt68(self):
"""
Read record 1 of the Nastran output2 BGPDT68 data block.
Returns vector of the BGPDT data or [] if no data found.
Vector is 4*ngrids in length. For each grid:
::
[ coord_id
x
y
z ]
The x, y, z values are the grid location in basic.
"""
Str = struct.Struct(self._endian + 'ifff')
Sbytes = 16
wpg = 4 # words per grid
wpd = 1 # words per single
rfrm = self._endian + '%df'
key = self._get_key()
datarec = []
ileft = 0 # remaining left over
dleft = 0 # remaining doubles left over
a = np.arange(1)
b = np.arange(1, 4)
v = np.arange(4)
A = grids = 0
while key > 0:
self._fileh.read(4) # reclen
if ileft > 0:
i = A + grids*4
# datarec[i] = np.fromfile(self._fileh, self._int32str, ileft)
bytes = 4 * ileft
datarec[i] = struct.unpack(self._int32stru % ileft,
self._fileh.read(bytes))
if dleft > 0:
i = A + b[3-dleft:] + grids*4
# datarec[i] = np.fromfile(self._fileh, rfrm, dleft)
datarec[i] = struct.unpack(rfrm % dleft,
self._fileh.read(4*dleft))
key = key - ileft - dleft*wpd
# number of complete grids remaining in this record:
grids = key // wpg
A = len(datarec)
# round up for memory allocation (for possible partial):
n = (key + wpg - 1) // wpg
datarec = np.hstack((datarec, np.zeros(n*4)))
Av = A + v
for i in range(grids):
datarec[Av + i*4] = Str.unpack(self._fileh.read(Sbytes))
# read in remainder of record if any
ileft = 0
dleft = 0
if key > grids*wpg:
# number of words left (1 word/int, 2 words/double)
n = key - grids*wpg
if n >= 1:
i = A + a + grids*4
datarec[i] = self._Str4.unpack(self._fileh.read(4))[0]
# divide by wpd to get number of doubles
n = (n - 1) // wpd
dleft = 3-n
if n >= 1:
i = A + b[:n] + grids*4
# datarec[i] = np.fromfile(self._fileh, rfrm, n)
datarec[i] = struct.unpack(rfrm % n,
self._fileh.read(4*n))
self._fileh.read(4) # endrec
key = self._get_key()
self._skip_key(2)
return datarec
def _read_op2_cstm(self):
"""
Read Nastran output2 CSTM data block.
Returns 14-column matrix 2-d array of the CSTM data:
::
[
[ id1 type xo yo zo T(1,1:3) T(2,1:3) T(3,1:3) ]
[ id2 type xo yo zo T(1,1:3) T(2,1:3) T(3,1:3) ]
...
]
T is transformation from local to basic for the coordinate
system.
See :func:`read_nas2cam_op2`.
"""
cstm_rec1 = self.read_op2_record()
cstm_rec2 = self.read_op2_record('double')
self._read_op2_end_of_table()
# assemble coordinate system table
length = len(cstm_rec1)
cstm = np.zeros((length/4, 14))
cstm[:, 0] = cstm_rec1[::4]
cstm[:, 1] = cstm_rec1[1::4]
# start index into rec2 for xo, yo, zo, T (12 values) is in
# last (4th) position in rec1 for each coordinate system:
pv = range(12)
for i, j in enumerate(cstm_rec1[3::4]):
cstm[i, 2:] = cstm_rec2[j+pv-1] # -1 for 0 offset
return cstm
def _read_op2_cstm68(self):
"""
Read record 1 of Nastran output2 CSTM68 data block.
Returns vector of the CSTM data or [] if no data found. Vector
is 14 * number of coordinate systems in length. For each
coordinate system:
::
[ id type xo yo zo T(1,1:3) T(2,1:3) T(3,1:3) ]
T is transformation from local to basic for the coordinate
system.
"""
Str = struct.Struct(self._endian + 'ii' + 'f'*12)
Sbytes = 4*14
wpg = 14 # words per grid
wpd = 1 # words per single
key = self._get_key()
rfrm = self._endian + '%df'
datarec = []
ileft = 0 # integers to read that are left over
dleft = 0 # singles left
a = np.arange(2)
b = np.arange(2, 14)
v = np.arange(14)
A = grids = 0
while key > 0:
self._fileh.read(4) # reclen
if ileft > 0:
i = A + a[2-ileft:] + grids*14
# datarec[i] = np.fromfile(self._fileh,
# self._int32str, ileft)
bytes = 4 * ileft
datarec[i] = struct.unpack(self._int32stru % ileft,
self._fileh.read(bytes))
if dleft > 0:
i = A + b[12-dleft:] + grids*14
# datarec[i] = np.fromfile(self._fileh, rfrm, dleft)
datarec[i] = struct.unpack(rfrm % dleft,
self._fileh.read(4*dleft))
key = key - ileft - dleft*wpd
# number of complete grids remaining in this record
grids = key // wpg
A = len(datarec)
# round up for memory allocation (for possible partial):
n = (key + wpg - 1) // wpg
datarec = np.hstack((datarec, np.zeros(n*14)))
Av = A + v
for i in range(grids):
datarec[Av + i*14] = Str.unpack(self._fileh.read(Sbytes))
# read in remainder of record if any
ileft = 0
dleft = 0
if key > grids*wpg:
# number of words left (1 word/int, 2 words/single)
n = key - grids*wpg
if n >= 2:
i = A + a + grids*14
# datarec[i] = np.fromfile(self._fileh,
# self._int32str, 2)
datarec[i] = struct.unpack(self._int32stru % 2,
self._fileh.read(8))
# divide by wpd to get number of singles
n = (n - 2) // wpd
dleft = 12-n
if n >= 1:
i = A + b[:n] + grids*14
# datarec[i] = np.fromfile(self._fileh, rfrm, n)
datarec[i] = struct.unpack(rfrm % n,
self._fileh.read(4*n))
else:
# n must be 1 here
i = A + grids*14
datarec[i] = self._Str4.unpack(self._fileh.read(4))[0]
ileft = 2-n
dleft = 12
self._fileh.read(4) # endrec
key = self._get_key()
self._skip_key(2)
self._read_op2_end_of_table()
return datarec
def _read_op2_geom1_cord2(self):
if self._ibytes == 4:
header_Str = struct.Struct(self._endian + 'iii')
cord2_Str = struct.Struct(self._endian + '4i9f')
sebulk_Str = struct.Struct(self._endian + '4if3i')
hbytes = 12
cbytes = 4*13
bbytes = 4*8
else:
header_Str = struct.Struct(self._endian + 'qqq')
cord2_Str = struct.Struct(self._endian + '4q9d')
sebulk_Str = struct.Struct(self._endian + '4qd3q')
hbytes = 24
cbytes = 8*13
bbytes = 8*8
CORD2R = (2101, 21, 8)
CORD2C = (2001, 20, 9)
CORD2S = (2201, 22, 10)
SEBULK = (1427, 14, 465)
SECONCT = (427, 4, 453)
cord2 = np.zeros((0, 13))
sebulk = np.zeros((1, 8))
selist = np.array([[0, 0]], int)
key = self._get_key()
eot = 0
# data = np.zeros(0, dtype=self._intstr)
while not eot:
while key > 0:
self._fileh.read(4) # reclen
# reclen = self._Str4.unpack(self._fileh.read(4))[0]
head = header_Str.unpack(self._fileh.read(hbytes))
if head in [CORD2R, CORD2C, CORD2S]:
n = (key-3) // 13
data = np.zeros((n, 13))
for i in range(n):
data[i] = cord2_Str.unpack(self._fileh.read(cbytes))
cord2 = np.vstack((cord2, data))
elif head == SEBULK:
n = (key-3) // 8
sebulk = np.zeros((n, 8))
for i in range(n):
sebulk[i] = sebulk_Str.unpack(self._fileh.read(bbytes))
elif head == SECONCT:
n = key - 3
if n < self._rowsCutoff:
nbytes = n * self._ibytes
seconct = np.zeros(n, int)
seconct[:] = struct.unpack(self._intstru % n,
self._fileh.read(nbytes))
else:
seconct = np.fromfile(self._fileh, self._intstr, n)
pv = np.nonzero(seconct == -1)[0][1:-2:2] + 1
pv = np.hstack((0, pv))
u = np.unique(seconct[pv], return_index=True)[1]
pv = pv[u]
selist = np.vstack((seconct[pv], seconct[pv+1])).T
selist = np.vstack((selist, [0, 0]))
else:
self._fileh.seek((key-3)*self._ibytes, 1)
self._fileh.read(4) # endrec
key = self._get_key()
self._skip_key(2)
eot, key = self._read_op2_end_of_table()
cord2 = np.delete(cord2, 2, axis=1)
return n2y.build_coords(cord2), sebulk, selist
def _read_op2_selist(self):
"""
Read SLIST data block and return `selist` for :func:`read_nas2cam_op2`.
See :func:`read_nas2cam_op2`.
"""
slist = self.read_op2_record()
slist[1::7] = 0
self.skip_op2_record()
self._read_op2_end_of_table()
return np.vstack((slist[::7], slist[4::7])).T
def _read_op2_uset(self):
"""
Read the USET data block.
Returns 1-d USET array. The 2nd bit is cleared for the S-set.
See :func:`rdn2cop2`.
"""
uset = self.read_op2_record('uint')
# clear the 2nd bit for all S-set:
sset = 0 != (uset & n2y.mkusetmask("s"))
if any(sset):
uset[sset] = uset[sset] & ~2
self._read_op2_end_of_table()
return uset
def _read_op2_eqexin(self):
"""
Read the EQEXIN data block.
Returns (EQEXIN1, EQEXIN) tuple.
See :func:`read_nas2cam_op2`.
"""
eqexin1 = self.read_op2_record()
eqexin = self.read_op2_record()
self._read_op2_end_of_table()
return eqexin1, eqexin
def _proc_bgpdt(self, eqexin1, eqexin, ver68=False, bgpdtin=None):
"""
Reads and processes the BGPDT data block for :func:`read_nas2cam_op2`
and :func:`read_post_op2`.
Returns (bgpdt, dof, doftype, nid, upids)
See :func:`read_nas2cam_op2`, :func:`read_post_op2`.
"""
if ver68:
bgpdt_rec1 = bgpdtin
else:
bgpdt_rec1 = self._read_op2_bgpdt()
self.read_op2_record()
self.skip_op2_table()
# assemble coordinates table
# bgpdt: [x, y, z, cid]
if ver68:
bgpdt_rec1 = bgpdt_rec1.reshape((-1, 4))
bgpdt = bgpdt_rec1[:, [1, 2, 3, 0]]
else:
bgpdt_rec1 = bgpdt_rec1.reshape((-1, 9))
bgpdt = bgpdt_rec1[:, [6, 7, 8, 0]]
# assemble dof table:
dof = eqexin[1::2] // 10
doftype = eqexin[1::2] - 10*dof
nid = eqexin[::2]
# eqexin is in external sort, so sort it
i = eqexin1[1::2].argsort()
dof = dof[i]
doftype = doftype[i]
nid = nid[i]
if ver68:
upids = None
else:
upids = bgpdt_rec1[:, 5].astype(int)
return bgpdt, dof, doftype, nid, upids
def _build_Uset(self, se, dof, doftype, nid, uset, bgpdt,
cstm=None, cstm2=None):
"""
Builds the 6-column uset table for :func:`rdn2cop2` and
:func:`rdpostop2`.
Returns: (uset, cstm, cstm2).
See :func:`read_nas2cam_op2`.
"""
# Fill in all dof use -1 as default and set dof as
# appropriate ... make it big enough for grids (6 cols).
# The -1s will be partitioned out below.
rd = len(dof)
rb = np.size(bgpdt, 0)
if rd != rb:
raise RuntimeError(
'RDOP2USET: BGPDTS incompatible with '
'EQEXINS for superelement {}.\n'
' Guess: residual run clobbered EQEXINS\n'
' Fix: add the "fxphase0" alter to your '
'residual run'.format(se))
coordinfo = np.zeros((rd, 18))
coordinfo[:, :4] = bgpdt
if cstm is None:
n = len(cstm2)
cstm = np.zeros((n, 14))
for i, key in enumerate(cstm2):
cstm[i, :2] = cstm2[key][0, :2]
cstm[i, 2:] = (cstm2[key].flatten())[3:]
cref = cstm[:, 0].astype(int)
c_all = bgpdt[:, 3].astype(int)
i = np.argsort(cref)
pv = i[np.searchsorted(cref, c_all, sorter=i)]
coordinfo[:, 4] = cstm[pv, 1]
coordinfo[:, 6:] = cstm[pv, 2:]
grids = doftype == 1
ngrids = np.sum(grids)
nongrids = rd - ngrids
doflist = np.zeros((rd, 6)) - 1
if ngrids > 0:
doflist[grids, :] = np.arange(1, 7)
if nongrids > 0:
doflist[grids == False, 0] = 0
doflist = doflist.flatten()
idlist = np.dot(nid.reshape(-1, 1), np.ones((1, 6))).flatten()
coordinfo = coordinfo.reshape((rd*6, 3))
# partition out -1s:
pv = doflist != -1
doflist = doflist[pv]
idlist = idlist[pv]
coordinfo = coordinfo[pv, :]
if uset is None:
warnings.warn('uset information not found. Putting all '
'DOF in b-set.', RuntimeWarning)
#import n2y
b = n2y.mkusetmask('b')
uset = np.zeros(len(doflist), int) + b
uset = np.hstack((np.vstack((idlist, doflist, uset)).T,
coordinfo))
if cstm2 is None:
cstm2 = {}
for row in cstm:
m = np.zeros((5, 3))
m[0, :2] = row[:2]
m[1:, :] = row[2:].reshape((4, 3))
cstm2[int(row[0])] = m
return uset, cstm, cstm2
def _read_op2_maps(self):
"""
Reads and returns the MAPS information for :func:`read_nas2cam_op2`.
"""
if self._ibytes == 4:
id_Str = struct.Struct(self._endian + 'id')
id_bytes = 12
else:
id_Str = struct.Struct(self._endian + 'qd')
id_bytes = 16
key = 1
maps = np.zeros((0, 2))
while key:
key = self._get_key() # 2 (1 integer, 1 double)
self._fileh.read(4) # reclen 12 or 16 bytes
curmap = id_Str.unpack(self._fileh.read(id_bytes))
maps = np.vstack((maps, curmap))
self._fileh.read(4) # endrec
self._skip_key(2) # 1st key is mystery negative
key = self._get_key() # 1 if cont, 0 if done
self._get_key()
maps[:, 0] -= 1
return maps
def _read_op2_drm(self):
"""
Read Nastran output2 DRM data block (table).
Returns tuple: (drm, iddof)
----------------------------
drm : ndarray
The drm matrix.
iddof : ndarray
2-column matrix of [id, dof].
This routine is beta -- check output carefully.
"""
def getStr(iprev, elemtype, ir_Str, ir_bytes):
if np.any(elemtype == np.array([4, 5])):
ints_rec2 = 1
else:
ints_rec2 = 2
if ints_rec2 != iprev:
if self._bit64:
ir_Str = struct.Struct(self._endian + 'q'*ints_rec2)
ir_bytes = 8*ints_rec2
else:
ir_Str = struct.Struct(self._endian + 'i'*ints_rec2)
ir_bytes = 4*ints_rec2
return ir_Str, ir_bytes, ints_rec2
if self._bit64:
rfrm = self._endian + 'f8'
rfrmu = self._endian + '%dd'
rsize = 8
else:
rfrm = self._endian + 'f4'
rfrmu = self._endian + '%df'
rsize = 4
u1 = self.read_op2_record()
elemtype = u1[1]
elemid = u1[2]
ir_Str, ir_bytes, ints_rec2 = getStr(0, elemtype, None, None)
nwords = u1[9]
key = self._get_key()
block = 7*4+3*self._ibytes
# determine records/column by scanning first column:
rpc = 0
fp = self._fileh
pos = fp.tell()
id1 = -1
drmrow = 0
blocksize = 500 # number of rows or cols to grow iddof and drm
drmrows = blocksize
iddof = np.zeros((drmrows, 2), int)
KN = key, nwords
while key >= nwords:
L = nwords - ints_rec2
fp.read(4) # reclen
dataint = ir_Str.unpack(fp.read(ir_bytes))
id_cur = dataint[0] // 10
if id1 == -1:
id1 = id_cur
elif id1 == id_cur:
break
rpc += 1
if drmrow+L >= drmrows:
iddof = np.vstack((iddof,
np.zeros((blocksize, 2), int)))
drmrows += blocksize
iddof[drmrow:drmrow+L, 0] = id_cur
iddof[drmrow:drmrow+L, 1] = elemid
fp.seek(self._ibytes*L, 1)
drmrow += L
# read rest of record:
for i in range(1, key // nwords):
dataint = ir_Str.unpack(fp.read(ir_bytes))
id_cur = dataint[0] // 10
if drmrow+L >= drmrows:
iddof = np.vstack((iddof,
np.zeros((blocksize, 2), int)))
drmrows += blocksize
iddof[drmrow:drmrow+L, 0] = id_cur
iddof[drmrow:drmrow+L, 1] = elemid
fp.seek(self._ibytes*L, 1)
drmrow += L
fp.seek(block, 1)
key = self._get_key()
if key > 0:
fp.read(4) # reclen
if key < self._rowsCutoff:
u1 = struct.unpack(self._intstru % key,
fp.read(key*self._ibytes))
else:
u1 = np.fromfile(fp, self._intstr, key)
if u1[1] != elemtype:
raise RuntimeError('u1[1] != elemtype')
# above check precludes next two lines:
# elemtype = u1[1]
# ir_Str, ir_bytes, ints_rec2 = getStr(ints_rec2,
# elemtype,
# ir_Str, ir_bytes)
# if u1[2] != elemid:
# raise RuntimeError('u1[2] != elemid ... should it?')
elemid = u1[2]
if u1[9] != nwords:
raise RuntimeError('u1[9] != nwords ... should it?')
# nwords = u1[9]
fp.seek(block, 1)
key = self._get_key()
drmrows = drmrow
iddof = iddof[:drmrows]
drmcols = blocksize
fp.seek(pos)
B = np.zeros((drmrows, drmcols), order='F')
drm = B.copy()
drmcol = 0
key, nwords = KN
while key >= nwords:
drmrow = 0
if drmcol == drmcols:
drm = np.asfortranarray(np.hstack((drm, B)))
drmcols += blocksize
for _ in it.repeat(None, rpc):
L = nwords - ints_rec2
fp.read(4) # reclen
for i in range(key // nwords):
# dataint = ir_Str.unpack(fp.read(ir_bytes))
fp.read(ir_bytes)
if L < self._rowsCutoff:
drm[drmrow:drmrow+L, drmcol] = struct.unpack(
rfrmu % L, fp.read(rsize*L))
else:
drm[drmrow:drmrow+L, drmcol] = np.fromfile(fp, rfrm, L)
drmrow += L
fp.seek(block, 1)
key = self._get_key()
if key > 0:
fp.read(4) # reclen
if key < self._rowsCutoff:
u1 = struct.unpack(self._intstru % key,
fp.read(key*self._ibytes))
else:
u1 = np.fromfile(fp, self._intstr, key)
else:
break
fp.seek(block, 1)
key = self._get_key()
drmcol += 1
return drm[:, :drmcol], iddof
def read_drm2op2(self, verbose=False):
"""
Read op2 file output by DRM2 DMAP.
Parameters
----------
verbose : bool
If true, echo names of tables and matrices to screen.
Returns
-------
drmkeys : dictionary
- 'dr' : data recovery items in order requested (from
XYCDBDRS)
- 'drs' : sorted version of 'dr' (from XYCDBDRS)
- 'tougv1', 'tougs1', etc : directories corresponding to
the data recovery matrices (which are written to op4).
All of these start with 'to' (lower case).
File is created with a header and then these data blocks are
written:
::
OUTPUT2 XYCDBDRS//0/OP2UNIT $
OUTPUT2 TOUGV1,TOUGS1,TOUGD1//0/OP2UNIT $
OUTPUT2 TOQGS1,TOQGD1,TOEFS1,TOEFD1//0/OP2UNIT $
OUTPUT2 TOESS1,TOESD1//0/OP2UNIT $
"""
self._fileh.seek(self._postheaderpos)
drmkeys = {}
self.verbose = verbose
while 1:
name, trailer, rectype = self._read_op2_name_trailer()
if name is None:
break
if rectype > 0:
if verbose:
print("Skipping matrix %r..." % name)
self.skip_op2_matrix(trailer)
# matrix = self.rdop2matrix(trailer)
elif len(name) > 2 and name.find('TO') == 0:
if verbose:
print("Reading %r..." % name)
# self.skipop2table()
# skip record 1
self.read_op2_record()
# record 2 contains directory
# - starting at 10: type, id, number, row, 0
info = self.read_op2_record()[10:]
drmkeys[name.lower()] = (info.reshape(-1, 5).T)[:-1]
self._read_op2_end_of_table()
elif len(name) > 4 and name[:4] == 'XYCD':
if verbose:
print("Reading %r..." % name)
# record 1 contains order of request info
drmkeys['dr'] = self.read_op2_record()
# record 2 contains sorted list
drmkeys['drs'] = self.read_op2_record().reshape(-1, 6).T
self._read_op2_end_of_table()
else:
if verbose:
print("Skipping table %r..." % name)
self.skip_op2_table()
return drmkeys
def read_nas2cam_op2(self):
"""
Read Nastran output2 file written by DMAP NAS2CAM; usually
called by :func:`rdnas2cam`.
Returns dictionary with the following members:
'selist' : array
2-columns matrix: [ seid, dnseid ] where, for each row,
dnseid is the downstream superelement for seid. (dnseid = 0
if seid = 0).
'uset' : dictionary
Indexed by the SE number. Each member is a 6-column matrix
described below.
'cstm' : dictionary
Indexed by the SE number. Each member is a 14-column matrix
containing the coordinate system transformation matrix for
each coordinate system. See description below.
'cstm2' : dictionary
Indexed by the SE number. Each member is another dictionary
indexed by the coordinate system id number. This has the
same information as 'cstm', but in a different format. See
description below.
'maps' : dictionary
Indexed by the SE number. Each member is a mapping table
for mapping the A-set order from upstream to downstream;
see below.
'dnids' : dictionary
Indexed by the SE number. Each member is a vector of ids of
the A-set ids of grids and spoints for SE in the downstream
superelement. When using the CSUPER entry, these will be
the ids on that entry. (Does not have each DOF, just ids.)
'upids' : dictionary
Indexed by the SE number. Each member is a vector of ids of
the A-set grids and spoints for upstream se's that connect
to SE. These ids are internally generated and should match
with 'dnids'. This allows, for example, the routine
:func:`n2y.upasetpv` to work. (Does not have each DOF, just
ids.)
The module n2y has many routines that use the data created by
this routine.
'uset' description
------------------
Each USET variable is a 6-column matrix where the rows
correspond to the DOF in Nastran internal sort, and the columns
are:
::
USET = [ ID DOF Set_Membership Coord_Info ]
where, for grids, Coord_Info is a 6 row by 3 column matrix:
::
Coord_Info = [[x y z] # location of node in basic
[id type 0] # coord. id and type
[xo yo zo] # origin of coord. system
[ T ]] # 3x3 transformation to basic
# for coordinate system
Coord_Info = [ 0 0 0 ] # for SPOINTs
'cstm' description
------------------
Each CSTM contains all the coordinate system information for
the superelement. Some or all of this info is in the USET
table, but if a coordinate system is not used as an output
system of any grid, it will not show up in USET. That is why
CSTM is here. CSTM has 14 columns:
::
CSTM = [ id type xo yo zo T(1,:) T(2,:) T(3,:) ]
Note that each CSTM always starts with the two ids 0 and -1.
The 0 is the basic coordinate system and the -1 is a dummy for
SPOINTs. Note the T is transformation between coordinate
systems as defined (not necessarily the same as the
transformation for a particular grid ... which, for
cylindrical and spherical, depends on grid location). This is
the same T as in the USET table.
For example, to convert coordinates from global to basic:
::
Rectangular (type = 1):
[x; y; z] = T*[xg; yg; zg] + [xo; yo; zo]
Cylindrical (type = 2):
% c = cos(theta); s = sin(theta)
[x; y; z] = T*[R c; R s; zg] + [xo; yo; zo]
Spherical (type = 3):
% s1 = sin(theta); s2 = sin(phi)
[x; y; z] = T*[r s1 c2; r s1 s2; r c1] + [xo; yo; zo]
'cstm2' description
------------------
Each CSTM2 is a dictionary with the same 5x3 that the
'Coord_Info' listed above has (doesn't include the first row
which is the node location). The dictionary is indexed by the
coordinate id.
'maps' description
------------------
MAPS will be [] for superelements whose A-set dof did not get
rearranged going downstream (on the CSUPER entry.) For other
superelements, MAPS will contain two columns: [order, scale].
The first column reorders upstream A-set to be in the order
that they appear in the downstream: Down = Up(MAPS(:,1)). The
second column is typically 1.0; if not, these routines will
print an error message and stop. Together with DNIDS, a
partition vector can be formed for the A-set of an upstream
superelement (see :func:`n2y.upasetpv`).
The op2 file that this routine reads is written by the Nastran
DMAP NAS2CAM. The data in the file are expected to be in this
order:
::
SLIST & EMAP or SUPERID
For each superelement:
USET
EQEXINS
CSTMS (if required)
BGPDTS
MAPS (if required)
Note: The 2nd bit for the DOF column of all USET tables is
cleared for all S-set. See :func:`n2y.mkusetmask` for more
information.
See rdnas2cam, n2y.
Example:
::
import op2
import n2y
# list superelement 100 DOF that are in the B set:
o2 = op2.OP2('nas2cam.op2')
nas = op2.rdn2cop2()
bset = n2y.mksetpv(nas['uset'][100], 'p', 'b')
print('bset of se100 = ', nas['uset'][100][bset, :2])
"""
# setup basic coordinate system info and a dummy for spoints:
bc = np.array([[+0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1],
[-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
nas = {'uset': {},
'cstm': {},
'cstm2': {},
'maps': {},
'dnids': {},
'upids': {}}
self._fileh.seek(self._postheaderpos)
# read datablock (slist) header record:
name, trailer, dbtype = self._read_op2_name_trailer()
if dbtype > 0:
selist = np.hstack((self.read_op2_matrix(trailer), [[0]]))
selist = selist.astype(int)
name, trailer, dbtype = self._read_op2_name_trailer()
else:
selist = self._read_op2_selist()
nse = np.size(selist, 0)
name, trailer, dbtype = self._read_op2_name_trailer()
if name == "EMAP":
self._read_op2_emap(nas, nse, trailer)
name, trailer, dbtype = self._read_op2_name_trailer()
# read uset and eqexins tables and do some processing:
for se in selist[:, 0]:
if not name:
break
uset = self._read_op2_uset()
name, trailer, dbtype = self._read_op2_name_trailer()
eqexin1, eqexin = self._read_op2_eqexin()
name, trailer, dbtype = self._read_op2_name_trailer()
if name == "CSTMS":
cstm = np.vstack((bc, self._read_op2_cstm()))
name, trailer, dbtype = self._read_op2_name_trailer()
else:
cstm = bc
bgpdt, dof, doftype, nid, upids = self._proc_bgpdt(eqexin1, eqexin)
nas['upids'][se] = upids
Uset, cstm, cstm2 = self._build_Uset(se, dof, doftype, nid,
uset, bgpdt, cstm, None)
nas['uset'][se] = Uset
nas['cstm'][se] = cstm
nas['cstm2'][se] = cstm2
name, trailer, dbtype = self._read_op2_name_trailer()
if name == "MAPS":
nas['maps'][se] = self._read_op2_maps()
name, trailer, dbtype = self._read_op2_name_trailer()
else:
nas['maps'][se] = []
nas['selist'] = selist
return nas
def read_nas2cam(op2file='nas2cam', op4file=None):
"""
Read op2/op4 data written by the DMAP NAS2CAM.
Parameters
----------
op2file : string
Either the basename of the .op2 and .op4 files, or the full
name of the .op2 file
op4file : string or None
The name of the .op4 file or, if None, builds name from the
`op2file` input.
Returns dictionary with these members:
-------------------------------------
All members created by :func:`rdn2cop2` (see that routine's
help).
'nrb' : integer
The number of rigid-body modes for residual.
All the following members are dictionaries indexed by SE number
(note that this routine will read all matrices for each SE, not
just those listed here):
'ulvs' : dictionary
The ULVS matrices (row partitions of residual modes to the
A-set DOF of the SE).
'lambda' : dictionary
The eigenvalues for each SE.
'gm' : dictionary
N-set to M-set transformation matrix GM: M = GM N.
'got' : dictionary
constraint modes
'goq' : dictionary
normal modes
'rfmodes' : dictionary
index partition vector for res-flex modes
'maa' : dictionary
A-set mass
'baa' : dictionary
A-set damping
'kaa' : dictionary
A-set stiffness
'pha' : dictionary
A-set modes
'mdd' : dictionary
D-set mass
'bdd' : dictionary
D-set damping
'kdd' : dictionary
D-set stiffness
'pdt' : dictionary
D-set loads
'mgg' : dictionary
G-set mass
'kgg' : dictionary
G-set stiffness
'phg' : dictionary
G-set mode shape matrix
'rbg' : dictionary
G-set rigid-body modes; see also drg output and rbgeom_uset
'drg' : dictionary
G-set transpose of rigid-body modes; see also 'rbg' and
:func:n2y.`rbgeom_uset`. `drg` = `rbg.T` if both are
present.
'pg' : dictionary
G-set loads
And any other "extra" matrices that were written to the op4
file. Some common extras are:
'fgravh' : array
gravity on generalized dof for se 0
'fgravg' : array
gravity on G-set physical dof for se 0
See :func:`rdn2cop2` for a description of what is expected of the
`op2file`. The `op4file` is expected to contain certain marker
matrices. Scalar SE_START starts each superelement and can be
followed by any matrices for that superelement. The end of the
superelement input is marked by a matrix named LOOP_END.
See also the Nastran DMAP NAS2CAM.
"""
if not op4file:
op4file = op2file + '.op4'
op2file = op2file + '.op2'
# read op2 file:
with OP2(op2file) as o2:
nas = o2.rdn2cop2()
# read op4 file:
from pyNastran.op2.dev.op4 import OP4
o4 = OP4()
#op4names, op4vars, *_ = o4.listload(op4file)
op4names, op4vars = o4.listload(op4file)[:1]
# loop over superelements:
j = 0
for se in nas['selist'][:, 0]:
if op4names[j] != "se_start":
raise RuntimeError("matrices are not in understandable"
" order. Expected 'se_start', got "
"'{0}'".format(op4names[j]))
# read all matrices for this se
j += 1
while 1:
name = op4names[j]
if name in ("loop_end", "se_start"):
# go on to next se or to residual
break
if name not in nas:
nas[name] = {}
if se == 0 and name == "lambda":
# count number of rigid body modes
nrb = sum(op4vars[j] < .005)[0]
nas['nrb'] = nrb
nas['lambda'][0] = abs(op4vars[j].flatten())
elif name == 'lambda':
nas[name][se] = op4vars[j].flatten()
elif name == 'rfmodes':
nas[name][se] = np.nonzero(op4vars[j])[0]
else:
nas[name][se] = op4vars[j]
j += 1
if name == "loop_end":
j += 1
break
while j < len(op4vars):
nas[op4names[j]] = op4vars[j]
j += 1
return nas
def get_dof_descs():
"""
Returns dictionary of descriptions for Nastran data recovery items.
Normally called by :func:`procdrm12`.
Returns
-------
desc : dictionary
Has keys: 'acce', 'spcf', 'stress', 'force'
desc['acce'] : numpy string array
['T1', 'T2', 'T3', 'R1', 'R2', 'R3']
desc['spcf'] : numpy string array
['Fx', 'Fy', 'Fz', 'Mx', 'My', 'Mz']
desc['stress'] : dict
Dictionary with element numbers as keys to numpy string arrays.
desc['stress'] : dict
Dictionary with element numbers as keys to numpy string arrays.
The stress and force returns are dictionaries indexed by the
element id. For example, for the CBAR (which is element 34):
desc['stress'][34] = ['CBAR Bending Stress 1 - End A',
'CBAR Bending Stress 2 - End A',
...]
desc['force'][34] = ['CBAR Bending Moment 1 - End A',
'CBAR Bending Moment 2 - End A',
...]
"""
# Acceleration, Velocity, Displacement Recovery Items:
accedesc = ["T1", "T2", "T3", "R1", "R2", "R3"]
spcfdesc = ["Fx", "Fy", "Fz", "Mx", "My", "Mz"]
stress = {}
force = {}
# CBAR Recovery Items (element 34): Item code
stress[34] = ["CBAR Bending Stress 1 - End A", # 2
"CBAR Bending Stress 2 - End A", # 3
"CBAR Bending Stress 3 - End A", # 4
"CBAR Bending Stress 4 - End A", # 5
"CBAR Axial Stress", # 6
"CBAR Max. Bend. Stress -End A", # 7
"CBAR Min. Bend. Stress -End A", # 8
"CBAR M.S. Tension", # 9
"CBAR Bending Stress 1 - End B", # 10
"CBAR Bending Stress 2 - End B", # 11
"CBAR Bending Stress 3 - End B", # 12
"CBAR Bending Stress 4 - End B", # 13
"CBAR Max. Bend. Stress -End B", # 14
"CBAR Min. Bend. Stress -End B", # 15
"CBAR M.S. Compression"] # 16
force[34] = ["CBAR Bending Moment 1 - End A", # 2
"CBAR Bending Moment 2 - End A", # 3
"CBAR Bending Moment 1 - End B", # 4
"CBAR Bending Moment 2 - End B", # 5
"CBAR Shear 1", # 6
"CBAR Shear 2", # 7
"CBAR Axial Force", # 8
"CBAR Torque"] # 9
# CBEAM Recovery Items (element 2): Item code
stress2_main = ["CBEAM External grid pt. ID", # 2
"CBEAM Station dist./length", # 3
"CBEAM Long. Stress at Pt. C", # 4
"CBEAM Long. Stress at Pt. D", # 5
"CBEAM Long. Stress at Pt. E", # 6
"CBEAM Long. Stress at Pt. F", # 7
"CBEAM Maximum stress", # 8
"CBEAM Minimum stress", # 9
"CBEAM M.S. Tension", # 10
"CBEAM M.S. Compression"] # 11
# expand and append station id for all 11 stations:
stress2 = [i+' End-A' for i in stress2_main]
for K in range(2, 11):
id_string = ' K={0:2}'.format(K)
stress2 += [i+id_string for i in stress2_main]
stress2 += [i+' End-B' for i in stress2_main]
stress[2] = stress2
force2_main = ["CBEAM External grid pt. ID", # 2
"CBEAM Station dist./length", # 3
"CBEAM Bending moment plane 1", # 4
"CBEAM Bending moment plane 2", # 5
"CBEAM Web shear plane 1", # 6
"CBEAM Web shear plane 2", # 7
"CBEAM Axial force", # 8
"CBEAM Total torque", # 9
"CBEAM Warping torque"] # 10
# expand and append station id for all 11 stations:
force2 = [i+' End-A' for i in force2_main]
for K in range(2, 11):
id_string = ' K={0:2}'.format(K)
force2 += [i+id_string for i in force2_main]
force2 += [i+' End-B' for i in force2_main]
force[2] = force2
# CBUSH Recovery Items (element 102): Item code
stress[102] = ["CBUSH Translation-x", # 2
"CBUSH Translation-y", # 3
"CBUSH Translation-z", # 4
"CBUSH Rotation-x", # 5
"CBUSH Rotation-y", # 6
"CBUSH Rotation-z"] # 7
force[102] = ["CBUSH Force-x", # 2
"CBUSH Force-y", # 3
"CBUSH Force-z", # 4
"CBUSH Moment-x", # 5
"CBUSH Moment-y", # 6
"CBUSH Moment-z"] # 7
# CROD Recovery Items (element 10=CONROD, 1=CROD):
stress1 = ["Axial Stress", # 2
"M.S. Axial Stress", # 3
"Torsional Stress", # 4
"M.S. Torsional Stress"] # 5
force1 = ["Axial Force", # 2
"Torque"] # 3
stress[1] = ['CROD '+ i + ' ' for i in stress1]
force[1] = ['CROD '+ i + ' ' for i in force1]
stress[10] = ['CONROD ' + i for i in stress1]
force[10] = ['CONROD ' + i for i in force1]
# CELAS1, 2, 3 Recovery Items (elements 11, 12, 13):
stress[11] = 'CELAS1 Stress'
stress[12] = 'CELAS2 Stress'
stress[13] = 'CELAS3 Stress'
force[11] = 'CELAS1 Force'
force[12] = 'CELAS2 Force'
force[13] = 'CELAS3 Force'
# CQUAD4 Recovery Items (element 33):
stress[33] = ["CQUAD4 Fiber distance Z1", # 2
"CQUAD4 Z1 Normal x", # 3
"CQUAD4 Z1 Normal y", # 4
"CQUAD4 Z1 Shear xy", # 5
"CQUAD4 Z1 Shear angle", # 6
"CQUAD4 Z1 Major principal", # 7
"CQUAD4 Z1 Minor principal", # 8
"CQUAD4 Z1 von Mises or max shear", # 9
"CQUAD4 Fiber distance Z2", # 10
"CQUAD4 Z2 Normal x", # 11
"CQUAD4 Z2 Normal y", # 12
"CQUAD4 Z2 Shear xy", # 13
"CQUAD4 Z2 Shear angle", # 14
"CQUAD4 Z2 Major principal", # 15
"CQUAD4 Z2 Minor principal", # 16
"CQUAD4 Z2 von Mises or max shear"] # 17
force[33] = ["CQUAD4 Membrane force x", # 2
"CQUAD4 Membrane force y", # 3
"CQUAD4 Membrane force xy", # 4
"CQUAD4 Bending moment x", # 5
"CQUAD4 Bending moment y", # 6
"CQUAD4 Bending moment xy", # 7
"CQUAD4 Shear x", # 8
"CQUAD4 Shear y"] # 9
# CQUADR Recovery Items (element 82, and CQUAD8-64):
stress[82] = ["CQUADR EID ", # 1
"CQUADR CEN/ ", # 2
"CQUADR 4 ", # 3
"CQUADR Fiber distance Z1 ", # 4
"CQUADR Z1 Normal x ", # 5
"CQUADR Z1 Normal y ", # 6
"CQUADR Z1 Shear xy ", # 7
"CQUADR Z1 Shear angle ", # 8
"CQUADR Z1 Major principal ", # 9
"CQUADR Z1 Minor principal ", # 10
"CQUADR Z1 von Mises or max shear ", # 11
"CQUADR Fiber distance Z2 ", # 12
"CQUADR Z2 Normal x ", # 13
"CQUADR Z2 Normal y ", # 14
"CQUADR Z2 Shear xy ", # 15
"CQUADR Z2 Shear angle ", # 16
"CQUADR Z2 Major principal ", # 17
"CQUADR Z2 Minor principal ", # 18
"CQUADR Z2 von Mises or max shear ", # 19
"CQUADR Grid 1 ", # 20
"CQUADR Fiber distance Z1 c1", # 21
"CQUADR Z1 Normal x c1", # 22
"CQUADR Z1 Normal y c1", # 23
"CQUADR Z1 Shear xy c1", # 24
"CQUADR Z1 Shear angle c1", # 25
"CQUADR Z1 Major principal c1", # 26
"CQUADR Z1 Minor principal c1", # 27
"CQUADR Z1 von Mises or max shear c1", # 28
"CQUADR Fiber distance Z2 c1", # 29
"CQUADR Z2 Normal x c1", # 30
"CQUADR Z2 Normal y c1", # 31
"CQUADR Z2 Shear xy c1", # 32
"CQUADR Z2 Shear angle c1", # 33
"CQUADR Z2 Major principal c1", # 34
"CQUADR Z2 Minor principal c1", # 35
"CQUADR Z2 von Mises or max shear c1", # 36
"CQUADR Grid 2 ", # 37
"CQUADR Fiber distance Z1 c2", # 38
"CQUADR Z1 Normal x c2", # 39
"CQUADR Z1 Normal y c2", # 40
"CQUADR Z1 Shear xy c2", # 41
"CQUADR Z1 Shear angle c2", # 42
"CQUADR Z1 Major principal c2", # 43
"CQUADR Z1 Minor principal c2", # 44
"CQUADR Z1 von Mises or max shear c2", # 45
"CQUADR Fiber distance Z2 c2", # 46
"CQUADR Z2 Normal x c2", # 47
"CQUADR Z2 Normal y c2", # 48
"CQUADR Z2 Shear xy c2", # 49
"CQUADR Z2 Shear angle c2", # 50
"CQUADR Z2 Major principal c2", # 51
"CQUADR Z2 Minor principal c2", # 52
"CQUADR Z2 von Mises or max shear c2", # 53
"CQUADR Grid 3 ", # 54
"CQUADR Fiber distance Z1 c3", # 55
"CQUADR Z1 Normal x c3", # 56
"CQUADR Z1 Normal y c3", # 57
"CQUADR Z1 Shear xy c3", # 58
"CQUADR Z1 Shear angle c3", # 59
"CQUADR Z1 Major principal c3", # 60
"CQUADR Z1 Minor principal c3", # 61
"CQUADR Z1 von Mises or max shear c3", # 62
"CQUADR Fiber distance Z2 c3", # 63
"CQUADR Z2 Normal x c3", # 64
"CQUADR Z2 Normal y c3", # 65
"CQUADR Z2 Shear xy c3", # 66
"CQUADR Z2 Shear angle c3", # 67
"CQUADR Z2 Major principal c3", # 68
"CQUADR Z2 Minor principal c3", # 69
"CQUADR Z2 von Mises or max shear c3", # 70
"CQUADR Grid 4 ", # 71
"CQUADR Fiber distance Z1 c4", # 72
"CQUADR Z1 Normal x c4", # 73
"CQUADR Z1 Normal y c4", # 74
"CQUADR Z1 Shear xy c4", # 75
"CQUADR Z1 Shear angle c4", # 76
"CQUADR Z1 Major principal c4", # 77
"CQUADR Z1 Minor principal c4", # 78
"CQUADR Z1 von Mises or max shear c4", # 79
"CQUADR Fiber distance Z2 c4", # 80
"CQUADR Z2 Normal x c4", # 81
"CQUADR Z2 Normal y c4", # 82
"CQUADR Z2 Shear xy c4", # 83
"CQUADR Z2 Shear angle c4", # 84
"CQUADR Z2 Major principal c4", # 85
"CQUADR Z2 Minor principal c4", # 86
"CQUADR Z2 von Mises or max shear c4"] # 87
force[82] = ["CQUADR Membrane force x ", # 4
"CQUADR Membrane force y ", # 5
"CQUADR Membrane force xy ", # 6
"CQUADR Bending moment x ", # 7
"CQUADR Bending moment y ", # 8
"CQUADR Bending moment xy ", # 9
"CQUADR Shear x ", # 10
"CQUADR Shear y ", # 11
"CQUADR (non-documented item) ", # 12
"CQUADR Membrane force x c1", # 13
"CQUADR Membrane force y c1", # 14
"CQUADR Membrane force xy c1", # 15
"CQUADR Bending moment x c1", # 16
"CQUADR Bending moment y c1", # 17
"CQUADR Bending moment xy c1", # 18
"CQUADR Shear x c1", # 19
"CQUADR Shear y c1", # 20
"CQUADR (non-documented item) ", # 21
"CQUADR Membrane force x c2", # 22
"CQUADR Membrane force y c2", # 23
"CQUADR Membrane force xy c2", # 24
"CQUADR Bending moment x c2", # 25
"CQUADR Bending moment y c2", # 26
"CQUADR Bending moment xy c2", # 27
"CQUADR Shear x c2", # 28
"CQUADR Shear y c2", # 29
"CQUADR (non-documented item) ", # 30
"CQUADR Membrane force x c3", # 31
"CQUADR Membrane force y c3", # 32
"CQUADR Membrane force xy c3", # 33
"CQUADR Bending moment x c3", # 34
"CQUADR Bending moment y c3", # 35
"CQUADR Bending moment xy c3", # 36
"CQUADR Shear x c3", # 37
"CQUADR Shear y c3", # 38
"CQUADR (non-documented item) ", # 39
"CQUADR Membrane force x c4", # 40
"CQUADR Membrane force y c4", # 41
"CQUADR Membrane force xy c4", # 42
"CQUADR Bending moment x c4", # 43
"CQUADR Bending moment y c4", # 44
"CQUADR Bending moment xy c4", # 45
"CQUADR Shear x c4", # 46
"CQUADR Shear y c4"] # 47
stress[64] = [i.replace('CQUADR', 'CQ8-64') for i in stress[82]]
force[64] = [i.replace('CQUADR', 'CQ8-64') for i in force[82]]
# CTRIAR Recovery Items (element 70, and CTRIA6-75):
stress[70] = ["CTRIAR Z1 Normal x ", # 5
"CTRIAR Z1 Normal y ", # 6
"CTRIAR Z1 Shear xy ", # 7
"CTRIAR Z1 Q shear angle ", # 8
"CTRIAR Z1 Major principal ", # 9
"CTRIAR Z1 Minor principal ", # 10
"CTRIAR Z1 von Mises or max shear ", # 11
"CTRIAR (non-documented item) ", # 12
"CTRIAR Z2 Normal x ", # 13
"CTRIAR Z2 Normal y ", # 14
"CTRIAR Z2 Shear xy ", # 15
"CTRIAR Z2 Q shear angle ", # 16
"CTRIAR Z2 Major principal ", # 17
"CTRIAR Z2 Minor principal ", # 18
"CTRIAR Z2 von Mises or max shear ", # 19
"CTRIAR (non-documented item) ", # 20
"CTRIAR (non-documented item) ", # 21
"CTRIAR Z1 Normal x c1", # 22
"CTRIAR Z1 Normal y c1", # 23
"CTRIAR Z1 Shear xy c1", # 24
"CTRIAR Z1 Q shear angle c1", # 25
"CTRIAR Z1 Major principal c1", # 26
"CTRIAR Z1 Minor principal c1", # 27
"CTRIAR Z1 von Mises or max shear c1", # 28
"CTRIAR (non-documented item) c1", # 29
"CTRIAR Z2 Normal x c1", # 30
"CTRIAR Z2 Normal y c1", # 31
"CTRIAR Z2 Shear xy c1", # 32
"CTRIAR Z2 Q shear angle c1", # 33
"CTRIAR Z2 Major principal c1", # 34
"CTRIAR Z2 Minor principal c1", # 35
"CTRIAR Z2 von Mises or max shear c1", # 36
"CTRIAR (non-documented item) ", # 37
"CTRIAR (non-documented item) ", # 38
"CTRIAR Z1 Normal x c2", # 39
"CTRIAR Z1 Normal y c2", # 40
"CTRIAR Z1 Shear xy c2", # 41
"CTRIAR Z1 Q shear angle c2", # 42
"CTRIAR Z1 Major principal c2", # 43
"CTRIAR Z1 Minor principal c2", # 44
"CTRIAR Z1 von Mises or max shear c2", # 45
"CTRIAR (non-documented item) c2", # 46
"CTRIAR Z2 Normal x c2", # 47
"CTRIAR Z2 Normal y c2", # 48
"CTRIAR Z2 Shear xy c2", # 49
"CTRIAR Z2 Q shear angle c2", # 50
"CTRIAR Z2 Major principal c2", # 51
"CTRIAR Z2 Minor principal c2", # 52
"CTRIAR Z2 von Mises or max shear c2", # 53
"CTRIAR (non-documented item) ", # 54
"CTRIAR (non-documented item) ", # 55
"CTRIAR Z1 Normal x c3", # 56
"CTRIAR Z1 Normal y c3", # 57
"CTRIAR Z1 Shear xy c3", # 58
"CTRIAR Z1 Q shear angle c3", # 59
"CTRIAR Z1 Major principal c3", # 60
"CTRIAR Z1 Minor principal c3", # 61
"CTRIAR Z1 von Mises or max shear c3", # 62
"CTRIAR (non-documented item) c3", # 63
"CTRIAR Z2 Normal x c3", # 64
"CTRIAR Z2 Normal y c3", # 65
"CTRIAR Z2 Shear xy c3", # 66
"CTRIAR Z2 Q shear angle c3", # 67
"CTRIAR Z2 Major principal c3", # 68
"CTRIAR Z2 Minor principal c3", # 69
"CTRIAR Z2 von Mises or max shear c3"] # 70
force[70] = ["CTRIAR Membrane force x ", # 4
"CTRIAR Membrane force y ", # 5
"CTRIAR Membrane force xy ", # 6
"CTRIAR Bending moment x ", # 7
"CTRIAR Bending moment y ", # 8
"CTRIAR Bending moment xy ", # 9
"CTRIAR Shear x ", # 10
"CTRIAR Shear y ", # 11
"CTRIAR (non-documented item) ", # 12
"CTRIAR Membrane force x c1", # 13
"CTRIAR Membrane force y c1", # 14
"CTRIAR Membrane force xy c1", # 15
"CTRIAR Bending moment x c1", # 16
"CTRIAR Bending moment y c1", # 17
"CTRIAR Bending moment xy c1", # 18
"CTRIAR Shear x c1", # 19
"CTRIAR Shear y c1", # 20
"CTRIAR (non-documented item) ", # 21
"CTRIAR Membrane force x c2", # 22
"CTRIAR Membrane force y c2", # 23
"CTRIAR Membrane force xy c2", # 24
"CTRIAR Bending moment x c2", # 25
"CTRIAR Bending moment y c2", # 26
"CTRIAR Bending moment xy c2", # 27
"CTRIAR Shear x c2", # 28
"CTRIAR Shear y c2", # 29
"CTRIAR (non-documented item) ", # 30
"CTRIAR Membrane force x c3", # 31
"CTRIAR Membrane force y c3", # 32
"CTRIAR Membrane force xy c3", # 33
"CTRIAR Bending moment x c3", # 34
"CTRIAR Bending moment y c3", # 35
"CTRIAR Bending moment xy c3", # 36
"CTRIAR Shear x c3", # 37
"CTRIAR Shear y c3"] # 38
stress[75] = [i.replace('CTRIAR', 'CT6-75') for i in stress[70]]
force[75] = [i.replace('CTRIAR', 'CT6-75') for i in force[70]]
for i in stress:
stress[i] = np.array(stress[i])
force[i] = np.array(force[i])
return {'acce': np.array(accedesc),
'spcf': np.array(spcfdesc),
'stress': stress,
'force': force}
def _get_tinr(iddof, idj):
"""
Called by get_drm.
Parameters
----------
iddof : 2d array
Each col has [type, id, number of rows, start row]
idj : integer
Id to return info for.
Returns tuple of (type, start row)
Note: start row return value starts at 0, not at 1.
"""
i = np.nonzero(iddof[1] == idj)[0]
tinr = iddof[:, i]
return tinr[0, 0], tinr[3, 0]-1
def get_drm(drminfo, otm, drms, drmkeys, dr, desc):
"""
Called by :func:`procdrm12` to add displacement-dependent data
recovery items to the otm input.
Parameters
----------
drminfo : tuple
DRM Information; (output drm name, 3 or 5 character Nastran
name, description index).
- if the second input is 3 chars, say '---', this routine
uses the following members of `drms` and `drmkeys`::
'm---d1', 'm---s1' and 't---d1' if available (mode-acce), or
'm---x1', 't---x1' if not (mode-disp)
- if the second input is 5 chars, say '-----', this routine
uses 'm-----' and 't-----'
- the description index is used to get info from `desc`.
otm : input/output dictionary
Filled in with 'DTM' (or 'DTMA', 'DTMD') and 'DTM_id_dof',
'DTM_desc'.
drms : dictionary
Contains all drms from op4 file.
drmkeys : dictionary
Contains the keys (directories) to the drms.
dr : array
Matrix 3 x number of data recovery items: [type; id; dof].
Type is 1 for displacements.
desc : dictionary
Output of :func:`get_dof_descs`.
Examples usages::
get_drm(('DTM', 'oug', 'acce'), otm, drms, drmkeys, dr, desc)
get_drm(('ATM', 'ougv1', 'acce'), ...)
get_drm(('LTM', 'oef', 'force'), ...)
get_drm(('SPCF', 'oqg', 'spcf'), ...)
get_drm(('STM', 'oes', 'stress'), ...)
"""
drc = dr.shape[1]
ID = dr[1, :]
DOF = dr[2, :]
nm, nasnm, desci = drminfo
otm[nm+'_id_dof'] = np.vstack((ID, DOF)).T
# arg offset is for translating between Nastran argument to
# matrix index; eg 'x' recovery for a grid is arg 3, so offset
# is 3
if nasnm.find('oug') > -1 or nasnm.find('oqg') > -1:
offset = 3
otm[nm+'_id_dof'][:, 1] -= 2
else:
offset = 2
if not isinstance(desc[desci], dict):
otm[nm+'_desc'] = desc[desci][DOF-offset]
getdesc = False
else:
getdesc = True
_desc = nm+'_desc'
otm[_desc] = [''] * drc
_dct = desc[desci]
_name = desci.capitalize()
if len(nasnm) == 3 and 'm'+nasnm+'d1' in drms:
d1 = drms['m'+nasnm+'d1'][0]
s1 = drms['m'+nasnm+'s1'][0]
iddof = drmkeys['t'+nasnm+'d1']
acce = nm+'A'
disp = nm+'D'
otm[acce] = np.zeros((drc, d1.shape[1]))
otm[disp] = np.zeros((drc, s1.shape[1]))
lastid = -1
for j in range(drc): # loop over requests
# find rows corresponding to requested grid
if ID[j] != lastid:
eltype, srow = _get_tinr(iddof, ID[j])
lastid = ID[j]
otm[acce][j] = d1[srow+DOF[j]-offset]
otm[disp][j] = s1[srow+DOF[j]-offset]
if getdesc:
if eltype in _dct:
otm[_desc][j] = _dct[eltype][DOF[j]-offset]
else:
otm[_desc][j] = ('EL-{0}, El. Type {1:3}, '
'Code {2:3} ').format(_name,
eltype,
DOF[j])
else:
if len(nasnm) == 3:
matname = 'm'+nasnm+'x1'
tabname = 't'+nasnm+'x1'
else:
matname = 'm'+nasnm
tabname = 't'+nasnm
x1 = drms[matname][0]
iddof = drmkeys[tabname]
otm[nm] = np.zeros((drc, x1.shape[1]))
lastid = -1
for j in range(drc): # loop over requests
# find rows corresponding to requested grid
if ID[j] != lastid:
eltype, srow = _get_tinr(iddof, ID[j])
lastid = ID[j]
otm[nm][j] = x1[srow+DOF[j]-offset]
if getdesc:
if eltype in _dct:
otm[_desc][j] = _dct[eltype][DOF[j]-offset]
else:
otm[_desc][j] = ('EL-{0}, El. Type {1:3}, '
'Code {2:3} ').format(_name,
eltype,
DOF[j])
def proccess_drm1_drm2(op2file, op4file=None, dosort=True):
"""
Process op2/op4 file2 output from DRM1/DRM2 DMAPs to form data
recovery matrices.
Parameters
----------
op2file : string
Either the basename of the .op2 and .op4 files, or the full
name of the .op2 file
op4file : string or None
The name of the .op4 file or, if None, builds name from the
`op2file` input.
dosort : bool
If True, sort data recovery rows in ascending order by ID/DOF.
Otherwise, return in order requested in Nastran run.
Returns
-------
otm : dictionary
Has data recovery matrices (DRMs), id/dof info, and generic
descriptions. The potential DRM keys are:
::
'ATM' : acceleration DRM
For mode-displacement:
'DTM' : displacement DRM
'LTM' : element force (loads) DRM
'SPCF' : SPC forces DRM
'STM' : element stress DRM
For mode-acceleration:
'DTMD' : displacement-dependent part of displacement DRM
'DTMA' : acceleration-dependent part of displacement DRM
'LTMD' : displacement-dependent part of element force DRM
'LTMA' : acceleration-dependent part of element force DRM
'SPCFD': displacement-dependent part of SPCF forces DRM
'SPCFA': acceleration-dependent part of SPCF forces DRM
'STMD' : displacement-dependent part of element stress DRM
'STMA' : displacement-dependent part of element stress DRM
The id/dof matrices are each 2 columns of [id, dof] with number
of rows equal to the number of rows in corresponding DRM. The
keys are the applicable strings from:
::
'ATM_id_dof'
'DTM_id_dof'
'LTM_id_dof' - dof is actually the Nastran item code
'SPCF_id_dof'
'STM_id_dof' - dof is actually the Nastran item code
The descriptions are arrays of strings with generic descriptions
for each data recovery item. Length is equal to number of rows
in corresponding DRM. See :func:`get_dof_descs` for more
information. The keys are the applicable strings from:
::
'ATM_desc'
'DTM_desc'
'LTM_desc',
'SPCF_desc'
'STM_desc'.
Currently, only displacements, accelerations, SPC forces, element
forces and element stresses (for some elements) are implemented.
Example usage::
import op2
otm = op2.proccess_drm1_drm2('drm2')
"""
if not op4file:
op4file = op2file + '.op4'
op2file = op2file + '.op2'
# read op4 file:
from pyNastran.op2.dev.op4 import OP4
o4 = OP4()
drms = o4.dctload(op4file)
with OP2(op2file) as o2:
drm_keys = o2.rddrm2op2()
N = drm_keys['drs'].shape[1]
# drs format:
# 6 elements per recovery item:
# 1 - Subcase number (0 for all)
# 2 - Vector request type
# 3 - Point or Element ID
# 4 - Component
# 5 - XY output type
# 6 - Destination code
# Vector request type:
Vreq = ["Displacement", # 1
"Velocity", # 2
"Acceleration", # 3
"SPC Force", # 4
"Load", # 5
"Stress", # 6
"Element Force", # 7
"SDisplacement", # 8
"SVelocity", # 9
"SAcceleration", # 10
"Nonlinear Force", # 11
"Total"] # 12
# XY output type:
# 1 = Response
# 2 = PSDF
# 3 = AUTO
#
# Destination code:
# 0 = XYpeak only (from DRMEXT)
# 1 = Print
# 2 = Plot
# 3 = Print, Plot
# 4 = Punch
# 5 = Print, Punch
# 6 = Plot, Punch
# 7 = Print, Plot, Punch
if not dosort:
# reshape dr:
dr = drm_keys['dr']
r = np.nonzero(dr == dr[0])[0]
r = np.hstack((r, len(dr)))
n = len(r) - 1
# dr(r) = ? -- starts every XYPEAK card
# dr(r+1:3) = 0, 0, 0 ?
# dr(r+4) = 1 ?
# dr(r+5) = request type
# dr(r+6:8) = 0, 0, #(?)
# dr(r+9) = id 1
# dr(r+10) = dof 1
# dr(r+11) = 0
# ... r + 9, 10, 11 can repeat until three -1's are reached
# These 3 values repeat when there is a comma: 1(T1),1(T2)
# dr(X:X+2) = -1, -1, -1
# 8-X+2 repeat until all dof for an XYPEAK are listed
# This section repeats when there is a slash: 1(T1)/1(T2)
DR = np.zeros((3, N), dtype=int) # [type; id; dof]
R = 0 # index into DR columns
for j in range(n): # loop over XYPEAK cards
curtype = dr[r[j] + 5]
J = r[j] + 9 # index to first id
while J < r[j+1]:
while dr[J] != -1:
DR[:, R] = curtype, dr[J], dr[J+1]
R += 1
J += 3
J += 4 # jump over [-1,-1,-1,#]
else:
DR = drm_keys['drs'][1:4] # use sorted version
desc = get_dof_descs()
drm_info = {
1: ('DTM', 'oug', 'acce'),
3: ('ATM', 'ougv1', 'acce'),
4: ('SPCF', 'oqg', 'spcf'),
6: ('STM', 'oes', 'stress'),
7: ('LTM', 'oef', 'force'),
}
otm = {}
types = np.array([1, 3, 4, 6, 7])
for drtype in range(1, 13):
pv = np.nonzero(DR[0] == drtype)[0]
if pv.size > 0:
if np.any(drtype == types):
print('Processing "{0}" requests...'.format(Vreq[drtype-1]))
get_drm(drm_info[drtype], otm, drms,
drm_keys, DR[:, pv], desc)
else:
print('Skipping %r requests. Needs to be added '
'to proccess_drm1_drm2().' % Vreq[drtype-1])
return otm
def read_post_op2(op2_filename, verbose=False, getougv1=False):
"""
Reads PARAM,POST,-1 op2 file and returns dictionary of data.
Parameters
----------
op2_filename : string
Name of op2 file.
verbose : bool
If true, echo names of tables and matrices to screen
getougv1 : bool
If true, read the OUGV1 matrices, if any.
Returns dictionary with following members
-----------------------------------------
'uset' : array
6-column matrix as described in class OP2, member function
:func:`readd_nas2cam_op2`.
'cstm' : array
14-column matrix containing the coordinate system
transformation matrix for each coordinate system. See
description in class OP2, member function
:func:`readd_nas2cam_op2`.
'cstm2' : dictionary
Dictionary indexed by the coordinate system id number. This
has the same information as 'cstm', but in a different format.
See description in class OP2, member function
:func:`readd_nas2cam_op2`.
'mats' : dictionary
Dictionary of matrices read from op2 file and indexed by the
name. The 'tload' entry is a typical entry. If `getougv1` is
true, `mats` will contain a list of all 'OUGV1' and 'BOPHIG'
matrices.
"""
# read op2 file:
with OP2(op2_filename) as o2:
mats = {}
selist = uset = cstm2 = None
se = 0
if getougv1:
mats['ougv1'] = []
o2._fileh.seek(o2._postheaderpos)
eqexin1 = None
dof = None
Uset = None
cstm = None
while 1:
name, trailer, dbtype = o2._read_op2_name_trailer()
# print('name = %r' % name)
# print('trailer = %s' % str(trailer))
# print('dbtype = %r' % dbtype)
if name is None:
break
if name == '':
raise RuntimeError('name=%r' % name)
if dbtype > 0:
if verbose:
print("Reading matrix {0}...".format(name))
if name not in mats:
mats[name] = []
mats[name] += [o2.read_op2_matrix(name, trailer)]
else:
if name.find('BGPDT') == 0:
if verbose:
print("Reading table {0}...".format(name))
bgpdt_rec1 = o2._read_op2_bgpdt68()
o2.skip_op2_table()
continue
# if name.find('CSTM') == 0:
# if verbose:
# print("Reading table {}...".format(name))
# cstm = o2._rdop2cstm68().reshape((-1, 14))
# cstm = np.vstack((bc, cstm))
# continue
elif name.find('GEOM1') == 0:
if verbose:
print("Reading table {0}...".format(name))
cords, sebulk, selist = o2._read_op2_geom1_cord2()
if 0 not in cords:
cords[0] = np.array([[0., 1., 0.],
[0., 0., 0.],
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
if -1 not in cords:
cords[-1] = np.zeros((5, 3)) # dummy for spoints
cords[-1][0, 0] = -1
cstm2 = cords
continue
elif name.find('DYNAMIC') == 0:
if verbose:
print("Reading DYNAMIC table {0}...".format(name))
mats['tload'] = o2.read_op2_dynamics()
continue
elif name.find('EQEXIN') == 0:
if verbose:
print("Reading EQEXIN table {0}...".format(name))
eqexin1, eqexin = o2._read_op2_eqexin()
continue
elif name.find('USET') == 0:
if verbose:
print("Reading USET table {0}...".format(name))
uset = o2._read_op2_uset()
continue
elif getougv1 and (name.find('OUGV1') == 0 or
name.find('BOPHIG') == 0):
if verbose:
print("Reading OUG table {0}...".format(name))
mats['ougv1'] += [o2._read_op2_ougv1(name)]
continue
# if name.find('OEF1X') == 0:
# if verbose:
# print("Reading table {}...\n".format(name))
# mats['oef1x'] = o2._rdop2drm()
# continue
elif verbose:
print("Skipping table %r..." % name)
o2.skip_op2_table()
if eqexin1 is not None:
(bgpdt, dof, doftype, nid, upids) = o2._proc_bgpdt(
eqexin1, eqexin, True, bgpdt_rec1)
if dof is not None:
Uset, cstm, cstm2 = o2._build_Uset(
se, dof, doftype, nid, uset, bgpdt, None, cstm2)
return {'uset': Uset,
'cstm': cstm,
'cstm2': cstm2,
'mats': mats,
'selist': selist}
| saullocastro/pyNastran | pyNastran/op2/dev/op2.py | Python | lgpl-3.0 | 118,219 | 0.000068 |
import time
t0 = time.time()
import os
import numpy as n
import sys
import glob
import cPickle
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
from scipy.interpolate import interp1d
L_box = 1000./0.6777
tracer_names = n.array(['S8_ELG', 'S8_BG1', 'S8_BG2', 'S5_GAL', 'S8_QSO', 'S6_AGN', 'S5_BCG'])
marker_dict={'S5_BCG':'1', 'S5_GAL':'2', 'S6_AGN':'3', 'S8_BG1':',', 'S8_BG2':',', 'S8_ELG':',', 'S8_QSO':'x'}
color_dict ={'S5_BCG':'r', 'S5_GAL':'r', 'S6_AGN':'m', 'S8_BG1':'k', 'S8_BG2':'g', 'S8_ELG':'b', 'S8_QSO':'g'}
p0 = n.array([[-1., -1.]])
points = {'S5_BCG':p0, 'S5_GAL':p0, 'S6_AGN':p0, 'S8_BG1':p0, 'S8_BG2':p0, 'S8_ELG':p0, 'S8_QSO':p0}
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
cosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115, Ob0=0.048206)
zs = n.arange(0.,4,0.001)
dc_2_z = interp1d(cosmoMD.comoving_distance(zs),zs)
import astropy.io.fits as fits
sf = fits.open(os.path.join(os.environ['MD10'],'output_MD_1.0Gpc.fits'))[1].data
plot_dir = '/afs/mpe/www/people/comparat/eRoMok/pie_plots/'
work_dir = os.path.join(os.environ['MD10'],'work_agn')
# redshift loop
#ii = 0
def get_slice(cpickle_dump_file, x_observer=0., y_observer=0., z_observer = 0., x_shift=0., y_shift=0., z_shift=0., slice_z_min=0., slice_z_max = 10., distance_min=0., distance_max = L_box):
snap_selection = (sf['comoving_distance']<distance_max)&(sf['comoving_distance']>distance_min)
snaps = sf[snap_selection]
z_all = sf['redshift'][snap_selection]
z_boundaries = n.hstack((dc_2_z(distance_min), (z_all[1:]+z_all[:-1])/2., dc_2_z(distance_max)))
for ii, el in enumerate(snaps): # in range(len(z_all)):
z_min, z_max = z_boundaries[ii], z_boundaries[ii+1]
r_min, r_max = cosmoMD.comoving_distance(z_min).value, cosmoMD.comoving_distance(z_max).value
position_files = n.array(glob.glob(os.path.join(work_dir, 'out_'+el['snap_name']+'_SAM_Nb_?.fits')))
position_files.sort()
# position file loop
print r_min, r_max
for index in range(len(position_files)):
print time.time()-t0
print position_files[index]
positions = fits.open(position_files[index])[1].data
tracer_files = n.array(glob.glob(os.path.join(work_dir, 'out_'+el['snap_name']+'_SAM_Nb_'+str(index)+'_4MOST_*.fits')))
tracer_files.sort()
# tracer loop
#path_2_tracer_file = tracer_files[0]
for path_2_tracer_file in tracer_files:
print path_2_tracer_file
spl_bn = os.path.basename(path_2_tracer_file)[:-5].split('_')
tracer_name = spl_bn[-2]+'_'+spl_bn[-1]
ids = fits.open(path_2_tracer_file)[1].data['line_number']
x_i = positions['x'][ids]/0.6777 - x_observer + x_shift
y_i = positions['y'][ids]/0.6777 - y_observer + y_shift
z_i = positions['z'][ids]/0.6777 - z_observer + z_shift
shell = (x_i*x_i + y_i*y_i + z_i*z_i < r_max**2.) & (x_i*x_i + y_i*y_i + z_i*z_i > r_min**2.)
slice = (shell) & (z_i>slice_z_min) &(z_i<slice_z_max)
points[tracer_name] = n.vstack(( points[tracer_name], n.transpose([x_i[slice], y_i[slice]]) ))
f=open(cpickle_dump_file, 'w')
cPickle.dump(points,f)
f.close()
return points
points_1 = get_slice(os.path.join(work_dir, 'slice_1_Lbox.pkl'))
points_2 = get_slice(os.path.join(work_dir, 'slice_2_Lbox.pkl'), x_shift = L_box, distance_min=L_box, distance_max = 2*L_box)
points_3 = get_slice(os.path.join(work_dir, 'slice_3_Lbox.pkl'), x_shift = 2*L_box, distance_min=2*L_box, distance_max = 3*L_box)
points_4 = get_slice(os.path.join(work_dir, 'slice_4_Lbox.pkl'), x_shift = 3*L_box, distance_min=3*L_box, distance_max = 4*L_box)
points_1 = cPickle.load(open(os.path.join(work_dir, 'slice_1_Lbox.pkl'),'r'))
points_2 = cPickle.load(open(os.path.join(work_dir, 'slice_2_Lbox.pkl'),'r'))
points_3 = cPickle.load(open(os.path.join(work_dir, 'slice_3_Lbox.pkl'),'r'))
points_4 = cPickle.load(open(os.path.join(work_dir, 'slice_4_Lbox.pkl'),'r'))
def plot_slice(points, name='slice_1_Lbox.png', lims=(0,L_box)) :
p.figure(0, ((6,6)))
p.axes([0.17,0.17,0.78,0.78])
for tracer in tracer_names:
x_pos, y_pos = points[tracer].T
p.plot(x_pos, y_pos,marker=marker_dict[tracer],color=color_dict[tracer],rasterized=True,ls='None',label=tracer)
p.legend(loc=0, frameon=False, fontsize=9)
p.xlabel('Mpc')
p.ylabel('Mpc')
p.xlim(lims)
p.ylim((0,L_box))
p.title(str(n.round(dc_2_z(lims[0]),2))+'<z<'+str(n.round(dc_2_z(lims[1]),2)) )
p.savefig(os.path.join(plot_dir, name))
p.clf()
plot_slice(points_1, name='slice_1_Lbox.png', lims=(0*L_box,1*L_box))
plot_slice(points_2, name='slice_2_Lbox.png', lims=(1*L_box,2*L_box))
plot_slice(points_3, name='slice_3_Lbox.png', lims=(2*L_box,3*L_box))
plot_slice(points_4, name='slice_4_Lbox.png', lims=(3*L_box,4*L_box))
sys.exit()
p.figure(0, ((6,6)))
p.axes([0.17,0.17,0.78,0.78])
for tracer in tracer_names:
x_pos, y_pos = points_2[tracer].T
p.plot(x_pos, y_pos,marker=marker_dict[tracer],color=color_dict[tracer],rasterized=True,ls='None',label=tracer)
p.legend(loc=0, frameon=False, fontsize=9)
p.xlabel('Mpc')
p.ylabel('Mpc')
p.xlim(lims)
p.ylim((0.,L_box))
p.savefig(os.path.join(plot_dir, 'slice_2_Lbox.png'))
p.clf()
| JohanComparat/nbody-npt-functions | bin/bin_SMHMr/MD10-pie-plot.py | Python | cc0-1.0 | 5,119 | 0.034186 |
#
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""
This module contains classes for managing maps related to namespaces.
"""
import re
from typing import Any, Container, Dict, Iterator, List, Optional, MutableMapping, \
Mapping, TypeVar
from .exceptions import XMLSchemaValueError, XMLSchemaTypeError
from .helpers import local_name
from .aliases import NamespacesType
###
# Base classes for managing namespaces
class NamespaceResourcesMap(MutableMapping[str, Any]):
"""
Dictionary for storing information about namespace resources. The values are
lists of objects. Setting an existing value appends the object to the value.
Setting a value with a list sets/replaces the value.
"""
__slots__ = ('_store',)
def __init__(self, *args: Any, **kwargs: Any):
self._store: Dict[str, List[Any]] = {}
self.update(*args, **kwargs)
def __getitem__(self, uri: str) -> Any:
return self._store[uri]
def __setitem__(self, uri: str, value: Any) -> None:
if isinstance(value, list):
self._store[uri] = value[:]
else:
try:
self._store[uri].append(value)
except KeyError:
self._store[uri] = [value]
def __delitem__(self, uri: str) -> None:
del self._store[uri]
def __iter__(self) -> Iterator[str]:
return iter(self._store)
def __len__(self) -> int:
return len(self._store)
def __repr__(self) -> str:
return repr(self._store)
def clear(self) -> None:
self._store.clear()
class NamespaceMapper(MutableMapping[str, str]):
"""
A class to map/unmap namespace prefixes to URIs. The mapped namespaces are
automatically registered when set. Namespaces can be updated overwriting
the existing registration or inserted using an alternative prefix.
:param namespaces: initial data with namespace prefixes and URIs. \
The provided dictionary is bound with the instance, otherwise a new \
empty dictionary is used.
:param strip_namespaces: if set to `True` uses name mapping methods that strip \
namespace information.
"""
__slots__ = '_namespaces', 'strip_namespaces', '__dict__'
_namespaces: NamespacesType
def __init__(self, namespaces: Optional[NamespacesType] = None,
strip_namespaces: bool = False):
if namespaces is None:
self._namespaces = {}
else:
self._namespaces = namespaces
self.strip_namespaces = strip_namespaces
def __setattr__(self, name: str, value: str) -> None:
if name == 'strip_namespaces':
if value:
self.map_qname = self.unmap_qname = self._local_name # type: ignore[assignment]
elif getattr(self, 'strip_namespaces', False):
self.map_qname = self._map_qname # type: ignore[assignment]
self.unmap_qname = self._unmap_qname # type: ignore[assignment]
super(NamespaceMapper, self).__setattr__(name, value)
def __getitem__(self, prefix: str) -> str:
return self._namespaces[prefix]
def __setitem__(self, prefix: str, uri: str) -> None:
self._namespaces[prefix] = uri
def __delitem__(self, prefix: str) -> None:
del self._namespaces[prefix]
def __iter__(self) -> Iterator[str]:
return iter(self._namespaces)
def __len__(self) -> int:
return len(self._namespaces)
@property
def namespaces(self) -> NamespacesType:
return self._namespaces
@property
def default_namespace(self) -> Optional[str]:
return self._namespaces.get('')
def clear(self) -> None:
self._namespaces.clear()
def insert_item(self, prefix: str, uri: str) -> None:
"""
A method for setting an item that checks the prefix before inserting.
In case of collision the prefix is changed adding a numerical suffix.
"""
if not prefix:
if '' not in self._namespaces:
self._namespaces[prefix] = uri
return
elif self._namespaces[''] == uri:
return
prefix = 'default'
while prefix in self._namespaces:
if self._namespaces[prefix] == uri:
return
match = re.search(r'(\d+)$', prefix)
if match:
index = int(match.group()) + 1
prefix = prefix[:match.span()[0]] + str(index)
else:
prefix += '0'
self._namespaces[prefix] = uri
def _map_qname(self, qname: str) -> str:
"""
Converts an extended QName to the prefixed format. Only registered
namespaces are mapped.
:param qname: a QName in extended format or a local name.
:return: a QName in prefixed format or a local name.
"""
try:
if qname[0] != '{' or not self._namespaces:
return qname
namespace, local_part = qname[1:].split('}')
except IndexError:
return qname
except ValueError:
raise XMLSchemaValueError("the argument 'qname' has a wrong format: %r" % qname)
except TypeError:
raise XMLSchemaTypeError("the argument 'qname' must be a string-like object")
for prefix, uri in sorted(self._namespaces.items(), reverse=True):
if uri == namespace:
return '%s:%s' % (prefix, local_part) if prefix else local_part
else:
return qname
map_qname = _map_qname
def _unmap_qname(self, qname: str,
name_table: Optional[Container[Optional[str]]] = None) -> str:
"""
Converts a QName in prefixed format or a local name to the extended QName format.
Local names are converted only if a default namespace is included in the instance.
If a *name_table* is provided a local name is mapped to the default namespace
only if not found in the name table.
:param qname: a QName in prefixed format or a local name
:param name_table: an optional lookup table for checking local names.
:return: a QName in extended format or a local name.
"""
try:
if qname[0] == '{' or not self._namespaces:
return qname
prefix, name = qname.split(':')
except IndexError:
return qname
except ValueError:
if ':' in qname:
raise XMLSchemaValueError("the argument 'qname' has a wrong format: %r" % qname)
if not self._namespaces.get(''):
return qname
elif name_table is None or qname not in name_table:
return '{%s}%s' % (self._namespaces.get(''), qname)
else:
return qname
except (TypeError, AttributeError):
raise XMLSchemaTypeError("the argument 'qname' must be a string-like object")
else:
try:
uri = self._namespaces[prefix]
except KeyError:
return qname
else:
return '{%s}%s' % (uri, name) if uri else name
unmap_qname = _unmap_qname
@staticmethod
def _local_name(qname: str, *_args: Any, **_kwargs: Any) -> str:
return local_name(qname)
def transfer(self, namespaces: NamespacesType) -> None:
"""
Transfers compatible prefix/namespace registrations from a dictionary.
Registrations added to namespace mapper instance are deleted from argument.
:param namespaces: a dictionary containing prefix/namespace registrations.
"""
transferred = []
for k, v in namespaces.items():
if k in self._namespaces:
if v != self._namespaces[k]:
continue
else:
self[k] = v
transferred.append(k)
for k in transferred:
del namespaces[k]
T = TypeVar('T')
class NamespaceView(Mapping[str, T]):
"""
A read-only map for filtered access to a dictionary that stores
objects mapped from QNames in extended format.
"""
__slots__ = 'target_dict', 'namespace', '_key_fmt'
def __init__(self, qname_dict: Dict[str, T], namespace_uri: str):
self.target_dict = qname_dict
self.namespace = namespace_uri
if namespace_uri:
self._key_fmt = '{' + namespace_uri + '}%s'
else:
self._key_fmt = '%s'
def __getitem__(self, key: str) -> T:
return self.target_dict[self._key_fmt % key]
def __len__(self) -> int:
if not self.namespace:
return len([k for k in self.target_dict if not k or k[0] != '{'])
return len([k for k in self.target_dict
if k and k[0] == '{' and self.namespace == k[1:k.rindex('}')]])
def __iter__(self) -> Iterator[str]:
if not self.namespace:
for k in self.target_dict:
if not k or k[0] != '{':
yield k
else:
for k in self.target_dict:
if k and k[0] == '{' and self.namespace == k[1:k.rindex('}')]:
yield k[k.rindex('}') + 1:]
def __repr__(self) -> str:
return '%s(%s)' % (self.__class__.__name__, str(self.as_dict()))
def __contains__(self, key: object) -> bool:
if isinstance(key, str):
return self._key_fmt % key in self.target_dict
return key in self.target_dict
def __eq__(self, other: Any) -> Any:
return self.as_dict() == other
def as_dict(self, fqn_keys: bool = False) -> Dict[str, T]:
if not self.namespace:
return {
k: v for k, v in self.target_dict.items() if not k or k[0] != '{'
}
elif fqn_keys:
return {
k: v for k, v in self.target_dict.items()
if k and k[0] == '{' and self.namespace == k[1:k.rindex('}')]
}
else:
return {
k[k.rindex('}') + 1:]: v for k, v in self.target_dict.items()
if k and k[0] == '{' and self.namespace == k[1:k.rindex('}')]
}
| sissaschool/xmlschema | xmlschema/namespaces.py | Python | mit | 10,509 | 0.001713 |
import pygame
from pygame import event
class Player:
def __init__(self, p_id):
self.points = None
self.p_id = p_id
def turn(self, nr):
return pygame.event.get()
| 1uk/3tsqd | classes/Player.py | Python | mit | 198 | 0.010101 |
"""
homeassistant.components.ifttt
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This component enable you to trigger Maker IFTTT recipes.
Check https://ifttt.com/maker for details.
Configuration:
To use Maker IFTTT you will need to add something like the following to your
config/configuration.yaml.
ifttt:
key: xxxxx-x-xxxxxxxxxxxxx
Variables:
key
*Required
Your api key
"""
import logging
import requests
from homeassistant.helpers import validate_config
_LOGGER = logging.getLogger(__name__)
DOMAIN = "ifttt"
SERVICE_TRIGGER = 'trigger'
ATTR_EVENT = 'event'
ATTR_VALUE1 = 'value1'
ATTR_VALUE2 = 'value2'
ATTR_VALUE3 = 'value3'
DEPENDENCIES = []
REQUIREMENTS = ['pyfttt==0.3']
def trigger(hass, event, value1=None, value2=None, value3=None):
""" Trigger a Maker IFTTT recipe """
data = {
ATTR_EVENT: event,
ATTR_VALUE1: value1,
ATTR_VALUE2: value2,
ATTR_VALUE3: value3,
}
hass.services.call(DOMAIN, SERVICE_TRIGGER, data)
def setup(hass, config):
""" Setup the ifttt service component """
if not validate_config(config, {DOMAIN: ['key']}, _LOGGER):
return False
key = config[DOMAIN]['key']
def trigger_service(call):
""" Handle ifttt trigger service calls. """
event = call.data.get(ATTR_EVENT)
value1 = call.data.get(ATTR_VALUE1)
value2 = call.data.get(ATTR_VALUE2)
value3 = call.data.get(ATTR_VALUE3)
if event is None:
return
try:
import pyfttt as pyfttt
pyfttt.send_event(key, event, value1, value2, value3)
except requests.exceptions.RequestException:
_LOGGER.exception("Error communicating with IFTTT")
hass.services.register(DOMAIN, SERVICE_TRIGGER, trigger_service)
return True
| alexkolar/home-assistant | homeassistant/components/ifttt.py | Python | mit | 1,789 | 0 |
"""Train ILSVRC2017 Data using homemade scripts."""
import cv2
import os
import math
import tensorflow as tf
from multiprocessing import Process, Queue
import os
import sys
FILE_DIR = os.path.dirname(__file__)
sys.path.append(FILE_DIR + '/../')
import config as cfg
from img_dataset.ilsvrc2017_cls_multithread import ilsvrc_cls
from yolo2_nets.darknet import darknet19
from yolo2_nets.net_utils import get_ordered_ckpts
from utils.timer import Timer
slim = tf.contrib.slim
def get_validation_process(imdb, queue_in, queue_out):
"""Get validation dataset. Run in a child process."""
while True:
queue_in.get()
images, labels = imdb.get()
queue_out.put([images, labels])
imdb = ilsvrc_cls('train', data_aug=True, multithread=cfg.MULTITHREAD)
val_imdb = ilsvrc_cls('val', batch_size=64)
# set up child process for getting validation data
queue_in = Queue()
queue_out = Queue()
val_data_process = Process(target=get_validation_process,
args=(val_imdb, queue_in, queue_out))
val_data_process.start()
queue_in.put(True) # start getting the first batch
CKPTS_DIR = cfg.get_ckpts_dir('darknet19', imdb.name)
TENSORBOARD_TRAIN_DIR, TENSORBOARD_VAL_DIR = cfg.get_output_tb_dir(
'darknet19', imdb.name)
input_data = tf.placeholder(tf.float32, [None, 224, 224, 3])
label_data = tf.placeholder(tf.int32, None)
is_training = tf.placeholder(tf.bool)
logits = darknet19(input_data, is_training=is_training)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=label_data, logits=logits)
loss = tf.reduce_mean(loss)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# train_op = tf.train.AdamOptimizer(0.0005).minimize(loss)
train_op = tf.train.MomentumOptimizer(0.001, 0.9).minimize(loss)
correct_pred = tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), label_data)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.summary.scalar('loss', loss)
tf.summary.scalar('accuracy', accuracy)
######################
# Initialize Session #
######################
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
sess = tf.Session(config=tfconfig)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(TENSORBOARD_TRAIN_DIR)
val_writer = tf.summary.FileWriter(TENSORBOARD_VAL_DIR)
# # initialize variables, assume all vars are new now
# init_op = tf.global_variables_initializer()
# sess.run(init_op)
# load previous models
ckpts = get_ordered_ckpts(sess, imdb, 'darknet19')
variables_to_restore = slim.get_variables_to_restore()
# # change optimizer
# print('Initializing variables for the new optimizer')
# optimzer_vars = [var for var in tf.global_variables()
# if "Momentum" in var.name]
# init_op = tf.variables_initializer(optimzer_vars)
# sess.run(init_op)
# for var in optimzer_vars:
# if var in variables_to_restore:
# variables_to_restore.remove(var)
print('Restorining model snapshots from {:s}'.format(ckpts[-1]))
old_saver = tf.train.Saver(variables_to_restore)
old_saver.restore(sess, str(ckpts[-1]))
print('Restored.')
fnames = ckpts[-1].split('_')
old_epoch = int(fnames[-1][:-5])
imdb.epoch = old_epoch + 1
# simple model saver
cur_saver = tf.train.Saver()
T = Timer()
for i in range(imdb.total_batch * 10 + 1):
T.tic()
images, labels = imdb.get()
_, loss_value, acc_value, train_summary = sess.run(
[train_op, loss, accuracy, merged], {input_data: images, label_data: labels, is_training: 1})
_time = T.toc(average=False)
print('epoch {:d}, iter {:d}/{:d}, training loss: {:.3}, training acc: {:.3}, take {:.2}s'
.format(imdb.epoch, (i + 1) % imdb.total_batch,
imdb.total_batch, loss_value, acc_value, _time))
if (i + 1) % 25 == 0:
T.tic()
val_images, val_labels = queue_out.get()
val_loss_value, val_acc_value, val_summary = sess.run(
[loss, accuracy, merged], {input_data: val_images, label_data: val_labels, is_training: 0})
_val_time = T.toc(average=False)
print('###validation loss: {:.3}, validation acc: {:.3}, take {:.2}s'
.format(val_loss_value, val_acc_value, _val_time))
queue_in.put(True)
global_step = imdb.epoch * imdb.total_batch + (i % imdb.total_batch)
train_writer.add_summary(train_summary, global_step)
val_writer.add_summary(val_summary, global_step)
if (i % (imdb.total_batch * 2) == 0):
save_path = cur_saver.save(sess, os.path.join(
CKPTS_DIR,
cfg.TRAIN_SNAPSHOT_PREFIX + '_epoch_' + str(imdb.epoch - 1) + '.ckpt'))
print("Model saved in file: %s" % save_path)
# terminate child processes
if cfg.MULTITHREAD:
imdb.close_all_processes()
queue_in.cancel_join_thread()
queue_out.cancel_join_thread()
val_data_process.terminate()
| wenxichen/tensorflow_yolo2 | src/imagenet/imagenet_train_darknet.py | Python | mit | 4,911 | 0.001833 |
__all__ = []
from common import *
import common
__all__ += common.__all__
from levinson_lpc import *
import levinson_lpc
__all__ += levinson_lpc.__all__
| cournape/talkbox | scikits/talkbox/linpred/__init__.py | Python | mit | 156 | 0.019231 |
import os
import os.path
def pytest_configure(config):
test_db = os.environ.get('DB', 'sqlite')
os.environ['DJANGO_SETTINGS_MODULE'] = 'pytest_django_casperjs.tests.settings' # noqa
from django.conf import settings
if test_db == 'postgresql':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'USER': 'postgres',
'NAME': 'pytest_django_casperjs_test',
})
elif test_db == 'mysql':
import pymysql
pymysql.install_as_MySQLdb()
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.mysql',
'USER': 'root',
'NAME': 'pytest_django_casperjs_test',
})
elif test_db == 'sqlite':
settings.DATABASES['default'].update({
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
})
else:
raise RuntimeError('Unsupported database configuration %s' % test_db)
| EnTeQuAk/pytest-django-casperjs | conftest.py | Python | bsd-3-clause | 1,003 | 0 |
#!/usr/bin/env python
# encoding: utf-8
from ImageFusion import ImageFusion
from PIL import Image
import numpy as np
import pylab as plt
import pywt
class FusionDWB(ImageFusion):
""" Image Fusion based wavelet """
def __init__(self, imageNames = None, zt=2, ap=2, mp=0):
self._imageNames = imageNames
self._images = []
self._fusionImage = None
self._zt = zt # level num
self._ap = ap # 0-average, 1-min, 2-max
self._mp = mp # 0-average, 1-min, 2-max
def _load_images(self):
for name in self._imageNames:
self._images.append(np.array(Image.open(name), 'f'))
def fusion(self):
self._load_images()
coeffss = []
for image in self._images:
coeffss.append(pywt.wavedec2(image, 'db1', level=self._zt))
# low pass
if self._mp == 0:
cAF = coeffss[0][0]
for coeffs in coeffss[1:]:
cAF += coeffs[0]
cAF = cAF/len(coeffs)
# high pass
if self._ap == 2:
hipassF = coeffss[0][1:]
for coeffs in coeffss[1:]: # every image
for idxLevel, HVDs in enumerate(coeffs[1:]): # every level
for idxDirec, HVD in enumerate(HVDs):
maxMap = hipassF[idxLevel][idxDirec] < HVD
hipassF[idxLevel][idxDirec][maxMap] = HVD[maxMap]
coeffsFusion = [cAF,] + hipassF
self._fusionImage = pywt.waverec2(coeffsFusion, 'db1')
return self._fusionImage
def plot(self):
plt.figure(0)
plt.gray()
plt.subplot(131)
plt.imshow(self._images[0])
plt.subplot(132)
plt.imshow(self._images[1])
plt.subplot(133)
plt.imshow(self._fusionImage)
plt.show()
if __name__ == '__main__':
IMAGEPATH = "../../images/multifocus/"
imLists = [IMAGEPATH+"a01_1.tif",IMAGEPATH+"a01_2.tif"]
fu = FusionDWB(imLists)
fu.fusion()
fu.plot()
| nan86150/ImageFusion | src/main/fusion_dwb.py | Python | mit | 2,018 | 0.002973 |
# Copyright (c) 2008, Stefano Taschini <taschini@ieee.org>
# All rights reserved.
# See LICENSE for details.
import unittest
from interval import interval, fpu
class FpuTestCase(unittest.TestCase):
def test_third(self):
"Nearest rounding of 1/3 is downwards."
self.assertEqual(1/3.0, fpu.down(lambda: 1.0 / 3.0))
self.assertTrue(1/3.0 < fpu.up(lambda: 1.0 / 3.0))
self.assertEqual(-1/3.0, fpu.up(lambda: 1.0 / -3.0))
self.assertTrue(-1/3.0 > fpu.down(lambda: 1.0 / -3.0))
def test_fourth(self):
" 1/4 is exact."
self.assertEqual(1/4.0, fpu.down(lambda: 1.0 / 4.0))
self.assertEqual(1/4.0, fpu.up(lambda: 1.0 / 4.0))
self.assertEqual(-1/4.0, fpu.up(lambda: 1.0 / -4.0))
self.assertEqual(-1/4.0, fpu.down(lambda: 1.0 / -4.0))
def test_fifth(self):
"Nearest rounding of 1/5 is upwards."
self.assertEqual(1/5.0, fpu.up(lambda: 1.0 / 5.0))
self.assertTrue(1/5.0 > fpu.down(lambda: 1.0 / 5.0))
self.assertEqual(-1/5.0, fpu.down(lambda: 1.0 / -5.0))
self.assertTrue(-1/5.0 < fpu.up(lambda: 1.0 / -5.0))
def test_ieee754(self):
"fpu.float respect ieee754 semantics."
self.assertEqual(fpu.infinity + fpu.infinity, fpu.infinity)
self.assertTrue(fpu.isnan(fpu.nan))
self.assertTrue(fpu.isnan(0.0 * fpu.infinity))
self.assertTrue(fpu.isnan(fpu.infinity - fpu.infinity))
def test_float_coercion(self):
"Only real-number scalars should be able to coerce as fpu.float"
self.assertRaises(Exception, lambda: float(1,2))
self.assertRaises(Exception, lambda: float((1,2)))
self.assertRaises(Exception, lambda: float([1,2]))
self.assertRaises(Exception, lambda: float('a'))
self.assertRaises(Exception, lambda: float(1+1j))
def test_min(self):
"Verify corner cases with nan, -inf, +inf"
self.assertEqual(fpu.min((1.0, 2.0)), 1.0)
self.assertEqual(fpu.min((1.0, fpu.infinity)), 1.0)
self.assertEqual(fpu.min((1.0, -fpu.infinity)), -fpu.infinity)
self.assertTrue(fpu.isnan(fpu.min((1.0, -fpu.nan))))
def test_max(self):
"Verify corner cases with nan, -inf, +inf"
self.assertEqual(fpu.max((1.0, 2.0)), 2.0)
self.assertEqual(fpu.max((1.0, fpu.infinity)), fpu.infinity)
self.assertEqual(fpu.max((1.0, -fpu.infinity)), 1.0)
self.assertTrue(fpu.isnan(fpu.max((1.0, fpu.nan))))
def test_power(self):
x = 1/3.0
# The cube of one third should depend on the rounding mode
self.assertTrue(fpu.down(lambda: x*x*x) < fpu.up(lambda: x*x*x))
# But using the built-in power operator, it doesn't necessarily do it
# print fpu.down(lambda: x**3) < fpu.up(lambda: x**3))
# So we define an integer power methods that does
self.assertTrue(fpu.power_rd(x, 3) < fpu.power_ru(x, 3))
self.assertTrue(fpu.power_rd(-x, 3) < fpu.power_ru(-x, 3))
self.assertTrue(fpu.power_rd(x, 4) < fpu.power_ru(x, 4))
self.assertTrue(fpu.power_rd(-x, 4) < fpu.power_ru(-x, 4))
self.assertEqual(
(fpu.down(lambda: x*x*x), fpu.up(lambda: x*x*x)),
(fpu.power_rd(x, 3), fpu.power_ru(x, 3)))
class ModuleTestCase(unittest.TestCase):
def test_namespace(self):
import interval
self.assertEqual(
dir(interval),
['__builtins__', '__doc__', '__file__', '__name__', '__path__', 'fpu', 'imath', 'inf', 'interval'])
class IntervalTestCase(unittest.TestCase):
def test_trivial_constructor(self):
self.assertEqual(interval[1], ((1, 1),))
self.assertEqual(interval(1), ((1, 1),))
self.assertEqual(interval[1, 2], ((1, 2),))
self.assertEqual(interval(1, 2), ((1, 1), (2, 2)))
self.assertEqual(interval([1, 2], [3, 4]), ((1, 2), (3, 4)))
self.assertEqual(interval([1,2]), interval(interval([1, 2])))
def test_nan_constructor(self):
self.assertEqual(interval[2, fpu.nan], ((-fpu.infinity, fpu.infinity),))
self.assertEqual(interval[2, fpu.nan], ((-fpu.infinity, fpu.infinity),))
self.assertEqual(interval(2, fpu.nan, 9), ((-fpu.infinity, fpu.infinity),))
def test_failing_constructor(self):
self.assertRaises(interval.ComponentError, lambda: interval[1, [2, 3]])
self.assertRaises(interval.ComponentError, lambda: interval[1, 2, 3])
self.assertRaises(interval.ComponentError, lambda: interval(0, [1, 2, 3]))
self.assertRaises(interval.ComponentError, lambda: interval(0, [1, [2, 3]]))
self.assertRaises(interval.ComponentError, lambda: interval['a', 1])
def test_canonical_constructor(self):
self.assertEqual(interval([1, 3], [4, 6], [2, 5], 9), ((1, 6), (9, 9)))
self.assertEqual(interval[2 ** (52 + 1) - 1], interval[9007199254740991.0])
self.assertEqual(interval[2 ** (52 + 1) + 1], interval[4503599627370496 * 2.0, 4503599627370497 * 2.0])
self.assertEqual(interval[-2 ** (52 + 1) + 1], interval[-9007199254740991.0])
self.assertEqual(interval[-2 ** (52 + 1) - 1], interval[-4503599627370497 * 2.0, -4503599627370496 * 2.0])
self.assertEqual(interval[2 ** (52 + 2) + 1], interval[4503599627370496 * 4.0, 4503599627370497 * 4.0])
self.assertEqual(interval[2 ** (52 + 2) + 2], interval[4503599627370496 * 4.0, 4503599627370497 * 4.0])
self.assertEqual(interval[2 ** (52 + 2) + 3], interval[4503599627370496 * 4.0, 4503599627370497 * 4.0])
self.assertEqual(interval[-2 ** (52 + 2) - 1], interval[-4503599627370497 * 4.0, -4503599627370496 * 4.0])
self.assertEqual(interval[-2 ** (52 + 2) - 2], interval[-4503599627370497 * 4.0, -4503599627370496 * 4.0])
self.assertEqual(interval[-2 ** (52 + 2) - 3], interval[-4503599627370497 * 4.0, -4503599627370496 * 4.0])
def test_unary(self):
self.assertEqual(interval[1, 2], +interval[1, 2])
self.assertEqual(interval[-2, -1], -interval[1, 2])
def test_sum(self):
self.assertEqual(interval[-fpu.infinity, +fpu.infinity], interval[-fpu.infinity] + interval[fpu.infinity])
self.assertEqual(interval[4, 6], interval[1, 2] + interval[3, 4])
self.assertEqual(interval[3, fpu.infinity], interval[1, fpu.infinity] + interval[2])
self.assertEqual(interval[-fpu.infinity, +fpu.infinity], interval[-fpu.infinity, -1] + interval[2, +fpu.infinity])
self.assertEqual(interval[-fpu.infinity, +fpu.infinity], interval[-fpu.infinity] + interval[8, +fpu.infinity])
self.assertEqual(interval([1, 2], [10, fpu.infinity]) + interval([1,9],[-2,-1]), interval([-1, 1], [2, fpu.infinity]))
self.assertEqual(interval[1, 9] + interval([1, 2], [10, fpu.infinity]), interval[2, fpu.infinity])
def test_sum_coercion(self):
self.assertEqual(interval[1,2] + 2, interval[3, 4])
self.assertRaises(TypeError, lambda: interval[1,2] + 1j)
self.assertEqual(1 + interval[4, 5], interval[5, 6])
self.assertRaises(TypeError, lambda: (1, 2) + interval[1,2])
self.assertEqual(fpu.infinity + interval[4, 5], interval[fpu.infinity])
def test_sub(self):
self.assertEqual(interval[1, 2] - interval[3, 4], interval[-3.0, -1.0])
self.assertEqual(interval[1, 2] - 0.5, interval[0.5, 1.5])
self.assertEqual(1.5 - interval[1, 2], interval[-0.5, 0.5])
def test_mul(self):
self.assertEqual(interval[-fpu.infinity, +fpu.infinity], fpu.infinity * interval[0])
self.assertEqual(interval[+fpu.infinity], interval[+fpu.infinity] * interval[3])
self.assertEqual(interval[-8, +10], interval[1, 2] * interval[-4, 5])
self.assertEqual(interval[3, 8], interval[1, 2] * interval[3, 4])
self.assertEqual(interval[-fpu.infinity, +fpu.infinity], interval[0,1 ] * interval[2, +fpu.infinity])
self.assertEqual(interval[2, fpu.infinity], interval[-fpu.infinity,-2] * interval[-fpu.infinity,-1])
self.assertEqual(interval([1, 2], [3, 4]) * interval[0.5, 2], interval[0.5, 8])
self.assertEqual(interval[1, 2] * 2, interval[2, 4])
def test_inverse(self):
self.assertEqual(interval[0.5, 1], interval[1, 2].inverse())
self.assertEqual(interval[-1, -0.5],(-interval[1, 2]).inverse())
self.assertEqual(interval([-fpu.infinity, -1], [0.5, +fpu.infinity]), interval[-1,2].inverse())
self.assertEqual(interval(-fpu.infinity, [1, +fpu.infinity]), interval[0,1].inverse())
self.assertEqual(interval([-fpu.infinity, -2.0], [0.0, fpu.infinity]),
interval([-0.5, 0.5], [0.2, fpu.infinity]).inverse())
def test_division(self):
self.assertEqual(interval[-fpu.infinity, fpu.infinity], interval[0,1] / interval[0,1])
self.assertEqual(interval[0.5], interval[1] / 2)
self.assertEqual(interval[0.5], 1 / interval[2])
def test_power(self):
self.assertRaises(TypeError, lambda: interval[1, 2] ** (1.3))
self.assertEqual((-interval[1, 2]).inverse(), (-interval[1, 2]) ** -1)
self.assertEqual(interval[0, 4], interval[-1, 2] ** 2)
self.assertEqual(interval[-27, 8], interval[-3, 2] ** 3)
self.assertEqual(interval[-1, 2], (interval[-1,2]**-1)**-1)
self.assertEqual(interval([-0.38712442133802405]) ** 3, interval([-0.058016524353106828, -0.058016524353106808]))
self.assertEqual(
interval[fpu.down(lambda: (1/3.0)*(1/3.0)), fpu.up(lambda: (1/3.0)*(1/3.0))],
(interval[1]/3.0) ** 2)
self.assertEqual(
interval[fpu.down(lambda: (1/3.0)*(1/3.0)*(1/3.0)), fpu.up(lambda: (1/3.0)*(1/3.0)*(1/3.0))],
(interval[1]/3.0) ** 3)
def test_format(self):
for x in interval[1], interval[1,2], interval([1,2], [3,4]):
self.assertEqual(x, eval(repr(x)))
def test_intersection(self):
self.assertEqual(interval[1, 2] & interval[0, 3], interval[1, 2])
self.assertEqual(interval[1.1, 1.9] & interval[1.3, 2.5], interval[1.3, 1.9])
self.assertEqual(interval[1.1, 1.9] & interval[0.3, 0.7], interval())
self.assertEqual(interval([1, 3], [4, 5]) & interval[2], interval[2])
self.assertEqual(interval([1, 3], [4, 5]) & interval(2,4.5), interval(2, 4.5))
self.assertEqual(interval[1, 2] & 1.2, interval(1.2))
self.assertEqual(2.1 & interval[1, 2], interval())
def test_union(self):
self.assertEqual(interval([1, 6], 9), interval([1, 3], [4, 6]) | interval([2, 5], 9))
self.assertEqual(interval[1, 2] | 2.1, interval([1, 2], 2.1))
self.assertEqual(2.1 | interval[1, 2], interval([1, 2], 2.1))
def test_hull(self):
self.assertEqual(interval([1, 9]), interval.hull((interval([1, 3], [4, 6]), interval([2, 5], 9))))
def test_inclusion(self):
def verify_in(x, y):
self.assertTrue(x in y)
self.assertEqual(x & y, interval(x))
verify_in(1.5, interval[1, 2])
verify_in(1, interval[1, 2])
verify_in(2, interval[1, 2])
verify_in(interval[1, 2], interval[1, 2])
verify_in(interval[1.1, 2], interval[1, 2])
verify_in(interval[1, 1.8], interval[1, 2])
verify_in(interval([1.1, 2.2], [3.3, 4.4]), interval(-1, [0, 2.5], [3, 5], [7, 9]))
def verify_out(x, y):
self.assertFalse(x in y)
self.assertNotEqual(x & y, x)
verify_out(0, interval[1, 2])
verify_out(4, interval[1, 2])
verify_out(interval[1, 3], interval[2, 4])
verify_out(interval(1, 3), interval(2, 4))
def test_extrema(self):
self.assertEqual(interval(1, [2, 3], 4).extrema, interval(1, 2, 3, 4))
def test_midpoint(self):
self.assertEqual(interval[0, 4].midpoint, interval[2])
self.assertEqual(interval(-1, 1, 4), interval(-1, [0, 2], [3, 5]).midpoint)
class NewtonTestCase(unittest.TestCase):
def test_opts(self):
self.assertRaises(TypeError, lambda: interval(0,1).newton(None, None, nonexisting=True))
def test_cubic(self):
self.assertEqual(
interval[-2, 2].newton(lambda x: x**3 - x, lambda x: 3*x**2-1),
interval(-1, 0, 1))
self.assertEqual(
interval[-5, 5].newton(lambda x: x**3 + x - 10, lambda x: 3*x**2 + 1),
interval[2])
self.assertEqual(
interval[-5, 5].newton(lambda x: x**3 + x - 15, lambda x: 3*x**2 + 1),
interval[5249383869325653 * 2.0 ** -51, 5249383869325655 * 2.0 ** -51])
# The sharpest result would be with 5249383869325654 * 2.0 ** -51 as sup.
def test_sqrt2(self):
import math
f, p = lambda x: x**2 - 2, lambda x: 2 * x
u, v = 6369051672525772 * 2.0 **-52, 6369051672525773 * 2.0 **-52
self.assertEqual(v, math.sqrt(2))
s = interval[u, v]
self.assertEqual(s, interval[0.1, 5].newton(f, p))
self.assertEqual(s, interval[0, 2].newton(f, p))
self.assertEqual(s, interval[-1, 10].newton(f, p))
self.assertEqual(interval(), interval[2, 5].newton(f, p))
self.assertEqual(-s, interval[-5, 0].newton(f, p))
self.assertEqual(-s|s, interval[-5, +5].newton(f, p))
if __name__ == '__main__':
unittest.main()
| abarnert/pyinterval | test/test_basic.py | Python | bsd-3-clause | 13,302 | 0.005488 |
""" Cisco_IOS_XR_clns_isis_datatypes
This module contains a collection of generally useful
derived YANG data types.
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class IsisAddressFamilyEnum(Enum):
"""
IsisAddressFamilyEnum
Isis address family
.. data:: ipv4 = 0
IPv4
.. data:: ipv6 = 1
IPv6
"""
ipv4 = 0
ipv6 = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_datatypes as meta
return meta._meta_table['IsisAddressFamilyEnum']
class IsisInternalLevelEnum(Enum):
"""
IsisInternalLevelEnum
Isis internal level
.. data:: not_set = 0
Level not set
.. data:: level1 = 1
Level1
.. data:: level2 = 2
Level2
"""
not_set = 0
level1 = 1
level2 = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_datatypes as meta
return meta._meta_table['IsisInternalLevelEnum']
class IsisSubAddressFamilyEnum(Enum):
"""
IsisSubAddressFamilyEnum
Isis sub address family
.. data:: unicast = 0
Unicast
.. data:: multicast = 1
Multicast
"""
unicast = 0
multicast = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_clns_isis_datatypes as meta
return meta._meta_table['IsisSubAddressFamilyEnum']
| 111pontes/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_clns_isis_datatypes.py | Python | apache-2.0 | 1,675 | 0.014328 |
"""TcEx Notification Module"""
# standard library
import json
import logging
from typing import TYPE_CHECKING
# first-party
from tcex.exit.error_codes import handle_error
if TYPE_CHECKING:
# third-party
from requests import Session
# get tcex logger
logger = logging.getLogger('tcex')
class Notifications:
"""TcEx Notification Class"""
def __init__(self, session_tc: 'Session'):
"""Initialize the Class properties.
Args:
session_tc: An configured instance of request.Session with TC API Auth.
"""
self.session_tc = session_tc
# properties
self._is_organization = False
self._notification_type = None
self._recipients = None
self._priority = 'Low'
self.log = logger
def recipients(self, notification_type, recipients, priority='Low'):
"""Set vars for the passed in data. Used for one or more recipient notification.
.. code-block:: javascript
{
"notificationType": notification_type,
"priority": priority
"isOrganization": false,
"recipients": recipients
}
Args:
notification_type (str): The type of notification being sent.
recipients (str): A comma delimited string of recipients.
priority (str): The priority: Low, Medium, High.
"""
self._notification_type = notification_type
self._recipients = recipients
self._priority = priority
self._is_organization = False
def org(self, notification_type, priority='Low'):
"""Set vars for the passed in data. Used for org notification.
.. code-block:: javascript
{
"notificationType": notification_type,
"priority": priority
"isOrganization": true
}
Args:
notification_type (str): The notification type.
priority (str): The priority: Low, Medium, High.
"""
self._notification_type = notification_type
self._recipients = None
self._priority = priority
self._is_organization = True
def send(self, message):
"""Send our message
Args:
message (str): The message to be sent.
Returns:
requests.models.Response: The response from the request.
"""
body = {
'notificationType': self._notification_type,
'priority': self._priority,
'isOrganization': self._is_organization,
'message': message,
}
if self._recipients:
body['recipients'] = self._recipients
self.log.debug(f'notification body: {json.dumps(body)}')
# create our tcex resource
r = self.session_tc.post('/v2/notifications', json=body)
if r.status_code == 400:
# specifically handle unknown users
self.log.error(f'Failed to send notification ({r.text})')
elif not r.ok: # pragma: no cover
handle_error(750, [r.status_code, r.text])
# return response body
return r.json()
| ThreatConnect-Inc/tcex | tcex/api/tc/v2/notifications/notifications.py | Python | apache-2.0 | 3,187 | 0.000628 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
:mod:`MDAnalysis` --- analysis of molecular simulations in python
=================================================================
MDAnalysis (https://www.mdanalysis.org) is a python toolkit to analyze
molecular dynamics trajectories generated by CHARMM, NAMD, Amber,
Gromacs, or LAMMPS.
It allows one to read molecular dynamics trajectories and access the
atomic coordinates through numpy arrays. This provides a flexible and
relatively fast framework for complex analysis tasks. In addition,
CHARMM-style atom selection commands are implemented. Trajectories can
also be manipulated (for instance, fit to a reference structure) and
written out. Time-critical code is written in C for speed.
Help is also available through the mailinglist at
http://groups.google.com/group/mdnalysis-discussion
Please report bugs and feature requests through the issue tracker at
https://github.com/MDAnalysis/mdanalysis/issues
Citation
--------
When using MDAnalysis in published work, please cite
R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
MDAnalysis: A Python package for the rapid analysis of molecular dynamics
simulations. In S. Benthall and S. Rostrup, editors, Proceedings of the 15th
Python in Science Conference, pages 98-105, Austin, TX, 2016. SciPy,
doi:10.25080/majora-629e541a-00e
N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and
O. Beckstein. MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics
Simulations. J. Comput. Chem. 32 (2011), 2319--2327, doi:`10.1002/jcc.21787`_
https://www.mdanalysis.org
For citations of included algorithms and sub-modules please see the references_.
.. _`10.1002/jcc.21787`: http://dx.doi.org/10.1002/jcc.21787
.. _references: https://docs.mdanalysis.org/documentation_pages/references.html
Getting started
---------------
Import the package::
>>> import MDAnalysis
(note that not everything in MDAnalysis is imported right away; for
additional functionality you might have to import sub-modules
separately, e.g. for RMS fitting ``import MDAnalysis.analysis.align``.)
Build a "universe" from a topology (PSF, PDB) and a trajectory (DCD, XTC/TRR);
here we are assuming that PSF, DCD, etc contain file names. If you don't have
trajectories at hand you can play with the ones that come with MDAnalysis for
testing (see below under `Examples`_)::
>>> u = MDAnalysis.Universe(PSF, DCD)
Select the C-alpha atoms and store them as a group of atoms::
>>> ca = u.select_atoms('name CA')
>>> len(ca)
214
Calculate the centre of mass of the CA and of all atoms::
>>> ca.center_of_mass()
array([ 0.06873595, -0.04605918, -0.24643682])
>>> u.atoms.center_of_mass()
array([-0.01094035, 0.05727601, -0.12885778])
Calculate the CA end-to-end distance (in angstroem)::
>>> import numpy as np
>>> coord = ca.positions
>>> v = coord[-1] - coord[0] # last Ca minus first one
>>> np.sqrt(np.dot(v, v,))
10.938133
Define a function eedist():
>>> def eedist(atoms):
... coord = atoms.positions
... v = coord[-1] - coord[0]
... return sqrt(dot(v, v,))
...
>>> eedist(ca)
10.938133
and analyze all timesteps *ts* of the trajectory::
>>> for ts in u.trajectory:
... print eedist(ca)
10.9381
10.8459
10.4141
9.72062
....
See Also
--------
:class:`MDAnalysis.core.universe.Universe` for details
Examples
--------
MDAnalysis comes with a number of real trajectories for testing. You
can also use them to explore the functionality and ensure that
everything is working properly::
from MDAnalysis import *
from MDAnalysis.tests.datafiles import PSF,DCD, PDB,XTC
u_dims_adk = Universe(PSF,DCD)
u_eq_adk = Universe(PDB, XTC)
The PSF and DCD file are a closed-form-to-open-form transition of
Adenylate Kinase (from [Beckstein2009]_) and the PDB+XTC file are ten
frames from a Gromacs simulation of AdK solvated in TIP4P water with
the OPLS/AA force field.
.. [Beckstein2009] O. Beckstein, E.J. Denning, J.R. Perilla and T.B. Woolf,
Zipping and Unzipping of Adenylate Kinase: Atomistic Insights into the
Ensemble of Open <--> Closed Transitions. J Mol Biol 394 (2009), 160--176,
doi:10.1016/j.jmb.2009.09.009
"""
__all__ = ['Universe', 'Writer', 'fetch_mmtf',
'AtomGroup', 'ResidueGroup', 'SegmentGroup']
import logging
import warnings
logger = logging.getLogger("MDAnalysis.__init__")
from .version import __version__
try:
from .authors import __authors__
except ImportError:
logger.info('Could not find authors.py, __authors__ will be empty.')
__authors__ = []
# Registry of Readers, Parsers and Writers known to MDAnalysis
# Metaclass magic fills these as classes are declared.
_READERS = {}
_READER_HINTS = {}
_SINGLEFRAME_WRITERS = {}
_MULTIFRAME_WRITERS = {}
_PARSERS = {}
_PARSER_HINTS = {}
_SELECTION_WRITERS = {}
_CONVERTERS = {}
# Registry of TopologyAttributes
_TOPOLOGY_ATTRS = {} # {attrname: cls}
_TOPOLOGY_TRANSPLANTS = {} # {name: [attrname, method, transplant class]}
_TOPOLOGY_ATTRNAMES = {} # {lower case name w/o _ : name}
# custom exceptions and warnings
from .exceptions import (
SelectionError, NoDataError, ApplicationError, SelectionWarning,
MissingDataWarning, ConversionWarning, FileFormatWarning,
StreamWarning
)
from .lib import log
from .lib.log import start_logging, stop_logging
logging.getLogger("MDAnalysis").addHandler(log.NullHandler())
del logging
# only MDAnalysis DeprecationWarnings are loud by default
warnings.filterwarnings(action='once', category=DeprecationWarning,
module='MDAnalysis')
from . import units
# Bring some often used objects into the current namespace
from .core.universe import Universe, Merge
from .core.groups import AtomGroup, ResidueGroup, SegmentGroup
from .coordinates.core import writer as Writer
# After Universe import
from .coordinates.MMTF import fetch_mmtf
from . import converters
from .due import due, Doi, BibTeX
due.cite(Doi("10.25080/majora-629e541a-00e"),
description="Molecular simulation analysis library",
path="MDAnalysis", cite_module=True)
due.cite(Doi("10.1002/jcc.21787"),
description="Molecular simulation analysis library",
path="MDAnalysis", cite_module=True)
del Doi, BibTeX
| MDAnalysis/mdanalysis | package/MDAnalysis/__init__.py | Python | gpl-2.0 | 7,425 | 0.002559 |
import sys
import wx
import wx.dataview as dv
#import os; print('PID:'+str(os.getpid())); raw_input("Press enter...")
#----------------------------------------------------------------------
class MyCustomRenderer(dv.DataViewCustomRenderer):
def __init__(self, log, *args, **kw):
dv.DataViewCustomRenderer.__init__(self, *args, **kw)
self.log = log
self.value = None
def SetValue(self, value):
#self.log.write('MyCustomRenderer.SetValue: %s\n' % value)
self.value = value
return True
def GetValue(self):
#self.log.write('MyCustomRenderer.GetValue\n')
return self.value
def GetSize(self):
# Return the size needed to display the value. The renderer
# has a helper function we can use for measuring text that is
# aware of any custom attributes that may have been set for
# this item.
value = self.value if self.value else ""
size = self.GetTextExtent(value)
return size
def Render(self, rect, dc, state):
if state != 0:
self.log.write('Render: %s, %d\n' % (rect, state))
if not state & dv.DATAVIEW_CELL_SELECTED:
# we'll draw a shaded background to see if the rect correctly
# fills the cell
dc.SetBrush(wx.Brush('light grey'))
dc.SetPen(wx.TRANSPARENT_PEN)
rect.Deflate(1, 1)
dc.DrawRoundedRectangle(rect, 2)
# And then finish up with this helper function that draws the
# text for us, dealing with alignment, font and color
# attributes, etc
value = self.value if self.value else ""
self.RenderText(value,
4, # x-offset, to compensate for the rounded rectangles
rect,
dc,
state # wxDataViewCellRenderState flags
)
return True
# The HasEditorCtrl, CreateEditorCtrl and GetValueFromEditorCtrl
# methods need to be implemented if this renderer is going to
# support in-place editing of the cell value, otherwise they can
# be omitted.
def HasEditorCtrl(self):
self.log.write('HasEditorCtrl')
return True
def CreateEditorCtrl(self, parent, labelRect, value):
self.log.write('CreateEditorCtrl: %s' % labelRect)
ctrl = wx.TextCtrl(parent,
value=value,
pos=labelRect.Position,
size=labelRect.Size)
# select the text and put the caret at the end
ctrl.SetInsertionPointEnd()
ctrl.SelectAll()
return ctrl
def GetValueFromEditorCtrl(self, editor):
self.log.write('GetValueFromEditorCtrl: %s' % editor)
value = editor.GetValue()
return True, value
# The LeftClick and Activate methods serve as notifications
# letting you know that the user has either clicked or
# double-clicked on an item. Implementing them in your renderer
# is optional.
def LeftClick(self, pos, cellRect, model, item, col):
self.log.write('LeftClick')
return False
def Activate(self, cellRect, model, item, col):
self.log.write('Activate')
return False
#----------------------------------------------------------------------
# To help focus this sample on the custom renderer, we'll reuse the
# model class from another sample.
from IndexListModel import TestModel
class TestPanel(wx.Panel):
def __init__(self, parent, log, model=None, data=None):
self.log = log
wx.Panel.__init__(self, parent, -1)
# Create a dataview control
self.dvc = dv.DataViewCtrl(self, style=wx.BORDER_THEME
| dv.DV_ROW_LINES
#| dv.DV_HORIZ_RULES
| dv.DV_VERT_RULES
| dv.DV_MULTIPLE
)
# Create an instance of the model
if model is None:
self.model = TestModel(data, log)
else:
self.model = model
self.dvc.AssociateModel(self.model)
# Now we create some columns.
c0 = self.dvc.AppendTextColumn("Id", 0, width=40)
c0.Alignment = wx.ALIGN_RIGHT
c0.MinWidth = 40
# We'll use our custom renderer for these columns
for title, col, width in [ ('Artist', 1, 170),
('Title', 2, 260),
('Genre', 3, 80)]:
renderer = MyCustomRenderer(self.log, mode=dv.DATAVIEW_CELL_EDITABLE)
column = dv.DataViewColumn(title, renderer, col, width=width)
column.Alignment = wx.ALIGN_LEFT
self.dvc.AppendColumn(column)
# Layout
self.Sizer = wx.BoxSizer(wx.VERTICAL)
self.Sizer.Add(self.dvc, 1, wx.EXPAND)
#----------------------------------------------------------------------
def main():
from data import musicdata
app = wx.App()
frm = wx.Frame(None, title="CustomRenderer sample", size=(700,500))
pnl = TestPanel(frm, sys.stdout, data=musicdata)
frm.Show()
app.MainLoop()
if __name__ == '__main__':
main()
#----------------------------------------------------------------------
| dnxbjyj/python-basic | gui/wxpython/wxPython-demo-4.0.1/samples/dataview/CustomRenderer.py | Python | mit | 5,439 | 0.005516 |
from sgfs import SGFS
from sgactions.utils import notify, progress, alert
def run_create(**kwargs):
_run(False, **kwargs)
def run_preview(**kwargs):
_run(True, **kwargs)
def _run(dry_run, entity_type, selected_ids, **kwargs):
title='Preview Folders' if dry_run else 'Creating Folders'
verb = 'previewing' if dry_run else 'creating'
progress(message=('Previewing' if dry_run else 'Creating') + ' folders for %s %ss; please wait...' % (len(selected_ids), entity_type))
sgfs = SGFS()
entities = sgfs.session.merge([dict(type=entity_type, id=id_) for id_ in selected_ids])
heirarchy = sgfs.session.fetch_heirarchy(entities)
sgfs.session.fetch_core(heirarchy)
command_log = sgfs.create_structure(entities, dry_run=dry_run)
if command_log:
details = '\n'.join(command_log)
if dry_run:
alert(title='Folder Preview', message=details)
else:
notify(
message='Created folders for %s %ss.' % (len(selected_ids), entity_type),
details=details,
)
else:
notify(message='Folders are already up to date.')
| westernx/sgfs | sgfs/actions/create_structure.py | Python | bsd-3-clause | 1,165 | 0.009442 |
# -*- coding: utf-8 -*-
"""
Django settings for nhweb project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('nhweb')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# Apps specific for this project go here.
LOCAL_APPS = (
'nhweb.users', # custom users app
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'nhweb.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Jesse Butcher""", 'boweeb@gmail.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres:///nhweb"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Your common stuff: Below this line define 3rd party library settings
| boweeb/nhweb | config/settings/common.py | Python | bsd-3-clause | 9,076 | 0.001102 |
# Copyright 2017 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module implements the combination of the eHata and ITM models
# according to the requirements developed in the Winnforum WG1 Propagation
# task group.
import math
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from ehata import ehata
from itm import pytm
from geo import tropoClim
from geo import refractivity
from geo import ned_indexer
from geo import nlcd_indexer
from geo import land_use
from geo import vincenty
# f in MHz; d and h1/h2 all in meters
def FreeSpacePathLoss(f, d, h1, h2):
r = math.sqrt(d*d + (h1-h2)*(h1-h2))
return 20*math.log10(r) + 20*math.log10(f) - 27.56
class PropagationLossModel:
def __init__(self, itu_dir, ned_dir, nlcd_dir):
self.climIndx = tropoClim.ClimateIndexer(itu_dir)
self.refractivityIndx = refractivity.RefractivityIndexer(itu_dir)
self.nedIndx = ned_indexer.NedIndexer(ned_dir)
self.nlcdIndx = nlcd_indexer.NlcdIndexer(nlcd_dir)
# Calculate the ITM adjusted propagation loss given the
# assumptions on the ITM model.
def ITM_AdjustedPropagationLoss(self, lat1, lng1, h1, lat2, lng2, h2, f, reliability):
dielectric_constant = 25.0 # good ground
soil_conductivity = 0.02 # good ground
polarization = 1
confidence = 0.5
# get surface refractivity and radio climate from path midpoint
dist, bearing, rev_bearing = vincenty.dist_bear_vincenty(lat1, lng1, lat2, lng2)
lat_c, lng_c, alpha2 = vincenty.to_dist_bear_vincenty(lat1, lng1, dist/2.0, bearing)
print 'Midpoint = %f, %f' % (lat_c, lng_c)
radio_climate = self.climIndx.TropoClim(lat_c, lng_c)
refractivity = self.refractivityIndx.Refractivity(lat_c, lng_c)
print 'Using climate %d' % radio_climate
print 'Using refractivity %f' % refractivity
print 'Using freq %f' % f
profile = self.nedIndx.Profile(lat1, lng1, lat2, lng2)
print profile[0], profile[1]
#print profile
print 'total distance is ', profile[0]*profile[1]
loss = pytm.point_to_point(profile, h1, h2,
dielectric_constant,
soil_conductivity,
refractivity,
f,
radio_climate,
polarization,
confidence,
reliability)
print 'ITM P2P is ', loss
return loss
# Adjusted propagation loss according to the adjustments in R2-SGN-04
# distance d, heights h1, h2 all in meters
# frequency f in MHz
def ExtendedHata_AdjustedPropagationLoss(self, lat1, lng1, h1, lat2, lng2, h2, f, land_cat):
d, bearing, rev_bearing = vincenty.dist_bear_vincenty(lat1, lng1, lat2, lng2)
d = d*1000.0
print 'EHata distance=', d
if d <= 100.0:
# return FSPL
print 'FSPL'
return FreeSpacePathLoss(f, d, h1, h2)
if d > 100.0 and d <= 1000.0:
print 'interp FSPL and ehata'
# interpolate FSPL and ehata
fspl_loss = FreeSpacePathLoss(f, 100.0, h1, h2)
print ' fspl_loss=', fspl_loss
ehata_loss, abm = ehata.ExtendedHata_MedianBasicPropLoss(f, 1.0, h1, h2, land_cat)
print ' ehata_loss=', ehata_loss
print ' ( abm=', abm
return fspl_loss + (1.0 + math.log10(d/1000.0))*(ehata_loss - fspl_loss)
if d > 1000.0 and d < 80000.0:
# return eHata value without adjustment.
print 'EHata only for d=%f' % d
profile = self.nedIndx.Profile(lat1, lng1, lat2, lng2)
return ehata.ExtendedHata_PropagationLoss(f, h1, h2, land_cat, profile)
if d >= 80000.0:
print 'EHata for distance %f > 80km' % d
# Derive profile_80km
lat_80, lng_80, heading = vincenty.to_dist_bear_vincenty(lat1, lng1, 80.0, bearing)
print '80km point is %f %f' % (lat_80, lng_80)
profile_80km = self.nedIndx.Profile(lat1, lng1, lat_80, lng_80)
# Find J adjustment...
ehata_loss = ehata.ExtendedHata_PropagationLoss(f, h1, h2, land_cat, profile_80km)
itm_loss = self.ITM_AdjustedPropagationLoss(lat1, lng1, h1, lat_80, lng_80, h2, f, 0.5)
J = ehata_loss - itm_loss
print 'Got ehata=%f itm=%f J=%f' % (ehata_loss, itm_loss, J)
if J < 0.0:
J = 0.0
return self.ITM_AdjustedPropagationLoss(lat1, lng1, h1, lat2, lng2, h2, f, 0.5) + J
def LandClassification(self, lat, lng):
code = self.nlcdIndx.NlcdCode(lat, lng)
return self.nlcdIndx.NlcdLandCategory(code)
# This is the oracle for propagation loss from point 1 to point 2 at frequency f (Mhz).
def PropagationLoss(self, f, lat1, lng1, h1, lat2, lng2, h2, land_cat=''):
if land_cat == '':
code = self.nlcdIndx.NlcdCode(lat2, lng2)
if code == 11:
code = self.nlcdIndx.NlcdCode(lat1, lng1)
land_cat = land_use.NlcdLandCategory(code)
print 'Using land_cat =', land_cat
# Calculate effective heights of tx and rx:
profile = self.nedIndx.Profile(lat1, lng1, lat2, lng2)
h1eff, h2eff = EffectiveHeights(h1, h2, profile)
if land_cat == 'RURAL' or h1eff >= 200: # Only h1eff (CBSD effective height) counts
itm_loss = self.ITM_AdjustedPropagationLoss(lat1, lng1, h1, lat2, lng2, h2, f, 0.5)
print 'Returning itm_loss for rural > 200: ', itm_loss
return itm_loss
else:
itm_loss = self.ITM_AdjustedPropagationLoss(lat1, lng1, h1, lat2, lng2, h2, f, 0.5)
ehata_loss = self.ExtendedHata_AdjustedPropagationLoss(lat1, lng1, h1, lat2, lng2, h2, f, land_cat)
if ehata_loss > itm_loss:
return ehata_loss
return itm_loss
# Run directly, takes args of "lat1, lng1, h1, lat2, lng2, h2, f" and prints the
# (median) propagation loss in dB.
if __name__ == '__main__':
dir = os.path.dirname(os.path.realpath(__file__))
rootDir = os.path.dirname(os.path.dirname(dir))
ituDir = os.path.join(os.path.join(rootDir, 'data'), 'itu')
nedDir = os.path.join(os.path.join(rootDir, 'data'), 'ned')
nlcdDir = os.path.join(os.path.join(rootDir, 'data'), 'nlcd')
prop = PropagationLossModel(ituDir, nedDir, nlcdDir)
loss = prop.PropagationLoss(float(sys.argv[1]), float(sys.argv[2]), float(sys.argv[3]),
float(sys.argv[4]), float(sys.argv[5]), float(sys.argv[6]),
float(sys.argv[7]))
print 'Propagation Loss = ', loss, ' dB'
| gregbillock/Spectrum-Access-System | src/prop/model.py | Python | apache-2.0 | 6,985 | 0.013314 |
"""Scenario Outline tests."""
import textwrap
from pytest_bdd.utils import collect_dumped_objects
from tests.utils import assert_outcomes
STEPS = """\
from pytest_bdd import parsers, given, when, then
from pytest_bdd.utils import dump_obj
@given(parsers.parse("there are {start:d} cucumbers"), target_fixture="cucumbers")
def given_cucumbers(start):
assert isinstance(start, int)
dump_obj(start)
return {"start": start}
@when(parsers.parse("I eat {eat:g} cucumbers"))
def eat_cucumbers(cucumbers, eat):
assert isinstance(eat, float)
dump_obj(eat)
cucumbers["eat"] = eat
@then(parsers.parse("I should have {left} cucumbers"))
def should_have_left_cucumbers(cucumbers, left):
assert isinstance(left, str)
dump_obj(left)
assert cucumbers["start"] - cucumbers["eat"] == int(left)
"""
def test_outlined(testdir):
testdir.makefile(
".feature",
outline=textwrap.dedent(
"""\
Feature: Outline
Scenario Outline: Outlined given, when, thens
Given there are <start> cucumbers
When I eat <eat> cucumbers
Then I should have <left> cucumbers
Examples:
| start | eat | left |
| 12 | 5 | 7 | # a comment
| 5 | 4 | 1 |
"""
),
)
testdir.makeconftest(textwrap.dedent(STEPS))
testdir.makepyfile(
textwrap.dedent(
"""\
from pytest_bdd import scenario
@scenario(
"outline.feature",
"Outlined given, when, thens",
)
def test_outline(request):
pass
"""
)
)
result = testdir.runpytest("-s")
result.assert_outcomes(passed=2)
# fmt: off
assert collect_dumped_objects(result) == [
12, 5.0, "7",
5, 4.0, "1",
]
# fmt: on
def test_unused_params(testdir):
"""Test parametrized scenario when the test function lacks parameters."""
testdir.makefile(
".feature",
outline=textwrap.dedent(
"""\
Feature: Outline
Scenario Outline: Outlined with unused params
Given there are <start> cucumbers
When I eat <eat> cucumbers
# And commented out step with <unused_param>
Then I should have <left> cucumbers
Examples:
| start | eat | left | unused_param |
| 12 | 5 | 7 | value |
"""
),
)
testdir.makeconftest(textwrap.dedent(STEPS))
testdir.makepyfile(
textwrap.dedent(
"""\
from pytest_bdd import scenario
@scenario("outline.feature", "Outlined with unused params")
def test_outline(request):
pass
"""
)
)
result = testdir.runpytest()
assert_outcomes(result, passed=1)
def test_outlined_with_other_fixtures(testdir):
"""Test outlined scenario also using other parametrized fixture."""
testdir.makefile(
".feature",
outline=textwrap.dedent(
"""\
Feature: Outline
Scenario Outline: Outlined given, when, thens
Given there are <start> cucumbers
When I eat <eat> cucumbers
Then I should have <left> cucumbers
Examples:
| start | eat | left |
| 12 | 5 | 7 |
| 5 | 4 | 1 |
"""
),
)
testdir.makeconftest(textwrap.dedent(STEPS))
testdir.makepyfile(
textwrap.dedent(
"""\
import pytest
from pytest_bdd import scenario
@pytest.fixture(params=[1, 2, 3])
def other_fixture(request):
return request.param
@scenario(
"outline.feature",
"Outlined given, when, thens",
)
def test_outline(other_fixture):
pass
"""
)
)
result = testdir.runpytest()
result.assert_outcomes(passed=6)
def test_outline_with_escaped_pipes(testdir):
"""Test parametrized feature example table with escaped pipe characters in input."""
testdir.makefile(
".feature",
outline=textwrap.dedent(
r"""\
Feature: Outline With Special characters
Scenario Outline: Outline with escaped pipe character
# Just print the string so that we can assert later what it was by reading the output
Given I print the <string>
Examples:
| string |
| bork |
| \|bork |
| bork \| |
| bork\|\|bork |
| \| |
| bork \\ |
| bork \\\| |
"""
),
)
testdir.makepyfile(
textwrap.dedent(
"""\
from pytest_bdd import scenario, given, parsers
from pytest_bdd.utils import dump_obj
@scenario("outline.feature", "Outline with escaped pipe character")
def test_outline_with_escaped_pipe_character(request):
pass
@given(parsers.parse("I print the {string}"))
def i_print_the_string(string):
dump_obj(string)
"""
)
)
result = testdir.runpytest("-s")
result.assert_outcomes(passed=7)
assert collect_dumped_objects(result) == [
r"bork",
r"|bork",
r"bork |",
r"bork||bork",
r"|",
r"bork \\",
r"bork \\|",
]
| pytest-dev/pytest-bdd | tests/feature/test_outline.py | Python | mit | 5,878 | 0.00051 |
#
# calculator.py : A calculator module for the deskbar applet.
#
# Copyright (C) 2008 by Johannes Buchner
# Copyright (C) 2007 by Michael Hofmann
# Copyright (C) 2006 by Callum McKenzie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Callum McKenzie <callum@spooky-possum.org> - Original author
# Michael Hofmann <mh21@piware.de> - compatibility changes for deskbar 2.20
# Johannes Buchner <buchner.johannes@gmx.at> - Made externally usable
#
# This version of calculator can be used with converter
# read how at http://twoday.tuwien.ac.at/jo/search?q=calculator+converter+deskbar
#
from __future__ import division
from deskbar.handlers.actions.CopyToClipboardAction import CopyToClipboardAction
from deskbar.defs import VERSION
from gettext import gettext as _
import deskbar.core.Utils
import deskbar.interfaces.Match
import deskbar.interfaces.Module
import logging
import math
import re
LOGGER = logging.getLogger(__name__)
HANDLERS = ["CalculatorModule"]
def bin (n):
"""A local binary equivalent of the hex and oct builtins."""
if (n == 0):
return "0b0"
s = ""
if (n < 0):
while n != -1:
s = str (n & 1) + s
n >>= 1
return "0b" + "...111" + s
else:
while n != 0:
s = str (n & 1) + s
n >>= 1
return "0b" + s
# These next three make sure {hex, oct, bin} can handle floating point,
# by rounding. This makes sure things like hex(255/2) behave as a
# programmer would expect while allowing 255/2 to equal 127.5 for normal
# people. Abstracting out the body of these into a single function which
# takes hex, oct or bin as an argument seems to run into problems with
# those functions not being defined correctly in the resticted eval (?).
def lenient_hex (c):
try:
return hex (c)
except TypeError:
return hex (int (c))
def lenient_oct (c):
try:
return oct (c)
except TypeError:
return oct (int (c))
def lenient_bin (c):
try:
return bin (c)
except TypeError:
return bin (int (c))
class CalculatorAction (CopyToClipboardAction):
def __init__ (self, text, answer):
CopyToClipboardAction.__init__ (self, answer, answer)
self.text = text
def get_verb(self):
return _("Copy <b>%(origtext)s = %(name)s</b> to clipboard")
def get_name(self, text = None):
"""Because the text variable for history entries contains the text
typed for the history search (and not the text of the orginal action),
we store the original text seperately."""
result = CopyToClipboardAction.get_name (self, text)
result["origtext"] = self.text
return result
def get_tooltip(self, text=None):
return self._name
class CalculatorMatch (deskbar.interfaces.Match):
def __init__ (self, text, answer, **kwargs):
deskbar.interfaces.Match.__init__ (self, name = text,
icon = "gtk-add", category = "calculator", **kwargs)
self.answer = str (answer)
self.add_action (CalculatorAction (text, self.answer))
def get_hash (self):
return self.answer
class CalculatorModule (deskbar.interfaces.Module):
INFOS = {"icon": deskbar.core.Utils.load_icon ("gtk-add"),
"name": _("Calculator"),
"description": _("Calculate simple equations"),
"version" : VERSION,
"categories" : { "calculator" : { "name" : _("Calculator") }}}
def __init__ (self):
deskbar.interfaces.Module.__init__ (self)
self.hexre = re.compile ("0[Xx][0-9a-fA-F_]*[0-9a-fA-F]")
self.binre = re.compile ("0[bB][01_]*[01]")
def _number_parser (self, match, base):
"""A generic number parser, regardless of base. It also ignores the
'_' character so it can be used as a separator. Note how we skip
the first two characters since we assume it is something like '0x'
or '0b' and identifies the base."""
table = { '0' : 0, '1' : 1, '2' : 2, '3' : 3, '4' : 4,
'5' : 5, '6' : 6, '7' : 7, '8' : 8, '9' : 9,
'a' : 10, 'b' : 11, 'c' : 12, 'd' : 13,
'e' : 14, 'f' : 15 }
d = 0
for c in match.group()[2:]:
if c != "_":
d = d * base + table[c]
return str (d)
def _binsub (self, match):
"""Because python doesn't handle binary literals, we parse it
ourselves and replace it with a decimal representation."""
return self._number_parser (match, 2)
def _hexsub (self, match):
"""Parse the hex literal ourselves. We could let python do it, but
since we have a generic parser we use that instead."""
return self._number_parser (match, 16)
def run_query (self, query):
"""We evaluate the equation by first replacing hex and binary literals
with their decimal representation. (We need to check hex, so we can
distinguish 0x10b1 as a hex number, not 0x1 followed by 0b1.) We
severely restrict the eval environment. Any errors are ignored."""
restricted_dictionary = { "__builtins__" : None, "abs" : abs,
"acos" : math.acos, "asin" : math.asin,
"atan" : math.atan, "atan2" : math.atan2,
"bin" : lenient_bin,"ceil" : math.ceil,
"cos" : math.cos, "cosh" : math.cosh,
"degrees" : math.degrees,
"exp" : math.exp, "floor" : math.floor,
"hex" : lenient_hex, "int" : int,
"log" : math.log, "pow" : math.pow,
"log10" : math.log10, "oct" : lenient_oct,
"pi" : math.pi, "radians" : math.radians,
"round": round, "sin" : math.sin,
"sinh" : math.sinh, "sqrt" : math.sqrt,
"tan" : math.tan, "tanh" : math.tanh}
try:
scrubbedquery = query.lower()
scrubbedquery = self.hexre.sub (self._hexsub, scrubbedquery)
scrubbedquery = self.binre.sub (self._binsub, scrubbedquery)
for (c1, c2) in (("[", "("), ("{", "("), ("]", ")"), ("}", ")")):
scrubbedquery = scrubbedquery.replace (c1, c2)
answer = eval (scrubbedquery, restricted_dictionary)
# Try and avoid echoing back simple numbers. Note that this
# doesn't work well for floating point, e.g. '3.' behaves badly.
if str (answer) == query:
return None
# We need this check because the eval can return function objects
# when we are halfway through typing the expression.
if isinstance (answer, (float, int, long, str)):
return answer
else:
return None
except Exception, e:
LOGGER.debug (str(e))
return None
def query (self, query):
answer = self.run_query(query)
if answer != None:
result = [CalculatorMatch (query, answer)]
self._emit_query_ready (query, result)
return answer
else:
return []
| benpicco/mate-deskbar-applet | deskbar/handlers/calculator.py | Python | gpl-2.0 | 8,080 | 0.017946 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Card'
db.create_table('matches_card', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date', self.gf('django.db.models.fields.DateField')()),
('promotion', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['promotions.Promotion'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=127, null=True, blank=True)),
))
db.send_create_signal('matches', ['Card'])
# Adding model 'Role'
db.create_table('matches_role', (
('description', self.gf('django.db.models.fields.CharField')(max_length=255, primary_key=True)),
))
db.send_create_signal('matches', ['Role'])
# Adding model 'Participation'
db.create_table('matches_participation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('event', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['matches.CardEvent'])),
('participant', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wrestlers.WrestlingEntity'])),
('role', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['matches.Role'])),
))
db.send_create_signal('matches', ['Participation'])
# Adding model 'EventType'
db.create_table('matches_eventtype', (
('description', self.gf('django.db.models.fields.CharField')(max_length=127, primary_key=True)),
))
db.send_create_signal('matches', ['EventType'])
# Adding model 'CardEvent'
db.create_table('matches_cardevent', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('reviewed_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('reviewed_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('order', self.gf('django.db.models.fields.IntegerField')()),
('card', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['matches.Card'])),
('event_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['matches.EventType'])),
))
db.send_create_signal('matches', ['CardEvent'])
# Adding model 'MatchTypeAspect'
db.create_table('matches_matchtypeaspect', (
('description', self.gf('django.db.models.fields.CharField')(max_length=127, primary_key=True)),
))
db.send_create_signal('matches', ['MatchTypeAspect'])
# Adding model 'MatchType'
db.create_table('matches_matchtype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=127)),
))
db.send_create_signal('matches', ['MatchType'])
# Adding M2M table for field aspects on 'MatchType'
db.create_table('matches_matchtype_aspects', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('matchtype', models.ForeignKey(orm['matches.matchtype'], null=False)),
('matchtypeaspect', models.ForeignKey(orm['matches.matchtypeaspect'], null=False))
))
db.create_unique('matches_matchtype_aspects', ['matchtype_id', 'matchtypeaspect_id'])
# Adding model 'Match'
db.create_table('matches_match', (
('cardevent_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['matches.CardEvent'], unique=True, primary_key=True)),
('match_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['matches.MatchType'])),
('winner', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='won_matches', null=True, to=orm['wrestlers.WrestlingEntity'])),
))
db.send_create_signal('matches', ['Match'])
def backwards(self, orm):
# Deleting model 'Card'
db.delete_table('matches_card')
# Deleting model 'Role'
db.delete_table('matches_role')
# Deleting model 'Participation'
db.delete_table('matches_participation')
# Deleting model 'EventType'
db.delete_table('matches_eventtype')
# Deleting model 'CardEvent'
db.delete_table('matches_cardevent')
# Deleting model 'MatchTypeAspect'
db.delete_table('matches_matchtypeaspect')
# Deleting model 'MatchType'
db.delete_table('matches_matchtype')
# Removing M2M table for field aspects on 'MatchType'
db.delete_table('matches_matchtype_aspects')
# Deleting model 'Match'
db.delete_table('matches_match')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'matches.card': {
'Meta': {'object_name': 'Card'},
'date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '127', 'null': 'True', 'blank': 'True'}),
'promotion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['promotions.Promotion']"})
},
'matches.cardevent': {
'Meta': {'object_name': 'CardEvent'},
'card': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['matches.Card']"}),
'event_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['matches.EventType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'participants': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['wrestlers.WrestlingEntity']", 'through': "orm['matches.Participation']", 'symmetrical': 'False'}),
'reviewed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'reviewed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'matches.eventtype': {
'Meta': {'object_name': 'EventType'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '127', 'primary_key': 'True'})
},
'matches.match': {
'Meta': {'object_name': 'Match', '_ormbases': ['matches.CardEvent']},
'cardevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['matches.CardEvent']", 'unique': 'True', 'primary_key': 'True'}),
'match_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['matches.MatchType']"}),
'winner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'won_matches'", 'null': 'True', 'to': "orm['wrestlers.WrestlingEntity']"})
},
'matches.matchtype': {
'Meta': {'object_name': 'MatchType'},
'aspects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['matches.MatchTypeAspect']", 'symmetrical': 'False'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '127'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'matches.matchtypeaspect': {
'Meta': {'object_name': 'MatchTypeAspect'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '127', 'primary_key': 'True'})
},
'matches.participation': {
'Meta': {'object_name': 'Participation'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['matches.CardEvent']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participant': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wrestlers.WrestlingEntity']"}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['matches.Role']"})
},
'matches.role': {
'Meta': {'object_name': 'Role'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'})
},
'promotions.promotion': {
'Meta': {'object_name': 'Promotion'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {})
},
'wrestlers.wrestlingentity': {
'Meta': {'object_name': 'WrestlingEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
}
}
complete_apps = ['matches']
| OddBloke/moore | matches/migrations/0001_initial.py | Python | agpl-3.0 | 12,520 | 0.006709 |
"""Functions to construct sparse matrices
"""
__docformat__ = "restructuredtext en"
__all__ = [ 'spdiags', 'eye', 'identity', 'kron', 'kronsum',
'hstack', 'vstack', 'bmat', 'rand']
from warnings import warn
import numpy as np
from sputils import upcast
from csr import csr_matrix
from csc import csc_matrix
from bsr import bsr_matrix
from coo import coo_matrix
from lil import lil_matrix
from dia import dia_matrix
def spdiags(data, diags, m, n, format=None):
"""
Return a sparse matrix from diagonals.
Parameters
----------
data : array_like
matrix diagonals stored row-wise
diags : diagonals to set
- k = 0 the main diagonal
- k > 0 the k-th upper diagonal
- k < 0 the k-th lower diagonal
m, n : int
shape of the result
format : format of the result (e.g. "csr")
By default (format=None) an appropriate sparse matrix
format is returned. This choice is subject to change.
See Also
--------
dia_matrix : the sparse DIAgonal format.
Examples
--------
>>> data = array([[1,2,3,4],[1,2,3,4],[1,2,3,4]])
>>> diags = array([0,-1,2])
>>> spdiags(data, diags, 4, 4).todense()
matrix([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
"""
return dia_matrix((data, diags), shape=(m,n)).asformat(format)
def identity(n, dtype='d', format=None):
"""Identity matrix in sparse format
Returns an identity matrix with shape (n,n) using a given
sparse format and dtype.
Parameters
----------
n : integer
Shape of the identity matrix.
dtype :
Data type of the matrix
format : string
Sparse format of the result, e.g. format="csr", etc.
Examples
--------
>>> identity(3).todense()
matrix([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> identity(3, dtype='int8', format='dia')
<3x3 sparse matrix of type '<type 'numpy.int8'>'
with 3 stored elements (1 diagonals) in DIAgonal format>
"""
if format in ['csr','csc']:
indptr = np.arange(n+1, dtype=np.intc)
indices = np.arange(n, dtype=np.intc)
data = np.ones(n, dtype=dtype)
cls = eval('%s_matrix' % format)
return cls((data,indices,indptr),(n,n))
elif format == 'coo':
row = np.arange(n, dtype=np.intc)
col = np.arange(n, dtype=np.intc)
data = np.ones(n, dtype=dtype)
return coo_matrix((data,(row,col)),(n,n))
elif format == 'dia':
data = np.ones(n, dtype=dtype)
diags = [0]
return dia_matrix((data,diags), shape=(n,n))
else:
return identity(n, dtype=dtype, format='csr').asformat(format)
def eye(m, n, k=0, dtype='d', format=None):
"""eye(m, n) returns a sparse (m x n) matrix where the k-th diagonal
is all ones and everything else is zeros.
"""
m,n = int(m),int(n)
diags = np.ones((1, max(0, min(m + k, n))), dtype=dtype)
return spdiags(diags, k, m, n).asformat(format)
def kron(A, B, format=None):
"""kronecker product of sparse matrices A and B
Parameters
----------
A : sparse or dense matrix
first matrix of the product
B : sparse or dense matrix
second matrix of the product
format : string
format of the result (e.g. "csr")
Returns
-------
kronecker product in a sparse matrix format
Examples
--------
>>> A = csr_matrix(array([[0,2],[5,0]]))
>>> B = csr_matrix(array([[1,2],[3,4]]))
>>> kron(A,B).todense()
matrix([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
>>> kron(A,[[1,2],[3,4]]).todense()
matrix([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
"""
B = coo_matrix(B)
if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]:
#B is fairly dense, use BSR
A = csr_matrix(A,copy=True)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix( output_shape )
B = B.toarray()
data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1])
data = data * B
return bsr_matrix((data,A.indices,A.indptr), shape=output_shape)
else:
#use COO
A = coo_matrix(A)
output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix( output_shape )
# expand entries of a into blocks
row = A.row.repeat(B.nnz)
col = A.col.repeat(B.nnz)
data = A.data.repeat(B.nnz)
row *= B.shape[0]
col *= B.shape[1]
# increment block indices
row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz)
row += B.row
col += B.col
row,col = row.reshape(-1),col.reshape(-1)
# compute block entries
data = data.reshape(-1,B.nnz) * B.data
data = data.reshape(-1)
return coo_matrix((data,(row,col)), shape=output_shape).asformat(format)
def kronsum(A, B, format=None):
"""kronecker sum of sparse matrices A and B
Kronecker sum of two sparse matrices is a sum of two Kronecker
products kron(I_n,A) + kron(B,I_m) where A has shape (m,m)
and B has shape (n,n) and I_m and I_n are identity matrices
of shape (m,m) and (n,n) respectively.
Parameters
----------
A
square matrix
B
square matrix
format : string
format of the result (e.g. "csr")
Returns
-------
kronecker sum in a sparse matrix format
Examples
--------
"""
A = coo_matrix(A)
B = coo_matrix(B)
if A.shape[0] != A.shape[1]:
raise ValueError('A is not square')
if B.shape[0] != B.shape[1]:
raise ValueError('B is not square')
dtype = upcast(A.dtype, B.dtype)
L = kron(identity(B.shape[0],dtype=dtype), A, format=format)
R = kron(B, identity(A.shape[0],dtype=dtype), format=format)
return (L+R).asformat(format) #since L + R is not always same format
def hstack(blocks, format=None, dtype=None):
"""
Stack sparse matrices horizontally (column wise)
Parameters
----------
blocks
sequence of sparse matrices with compatible shapes
format : string
sparse format of the result (e.g. "csr")
by default an appropriate sparse matrix format is returned.
This choice is subject to change.
See Also
--------
vstack : stack sparse matrices vertically (row wise)
Examples
--------
>>> from scipy.sparse import coo_matrix, vstack
>>> A = coo_matrix([[1,2],[3,4]])
>>> B = coo_matrix([[5],[6]])
>>> hstack( [A,B] ).todense()
matrix([[1, 2, 5],
[3, 4, 6]])
"""
return bmat([blocks], format=format, dtype=dtype)
def vstack(blocks, format=None, dtype=None):
"""
Stack sparse matrices vertically (row wise)
Parameters
----------
blocks
sequence of sparse matrices with compatible shapes
format : string
sparse format of the result (e.g. "csr")
by default an appropriate sparse matrix format is returned.
This choice is subject to change.
See Also
--------
hstack : stack sparse matrices horizontally (column wise)
Examples
--------
>>> from scipy.sparse import coo_matrix, vstack
>>> A = coo_matrix([[1,2],[3,4]])
>>> B = coo_matrix([[5,6]])
>>> vstack( [A,B] ).todense()
matrix([[1, 2],
[3, 4],
[5, 6]])
"""
return bmat([ [b] for b in blocks ], format=format, dtype=dtype)
def bmat(blocks, format=None, dtype=None):
"""
Build a sparse matrix from sparse sub-blocks
Parameters
----------
blocks
grid of sparse matrices with compatible shapes
an entry of None implies an all-zero matrix
format : sparse format of the result (e.g. "csr")
by default an appropriate sparse matrix format is returned.
This choice is subject to change.
Examples
--------
>>> from scipy.sparse import coo_matrix, bmat
>>> A = coo_matrix([[1,2],[3,4]])
>>> B = coo_matrix([[5],[6]])
>>> C = coo_matrix([[7]])
>>> bmat( [[A,B],[None,C]] ).todense()
matrix([[1, 2, 5],
[3, 4, 6],
[0, 0, 7]])
>>> bmat( [[A,None],[None,C]] ).todense()
matrix([[1, 2, 0],
[3, 4, 0],
[0, 0, 7]])
"""
blocks = np.asarray(blocks, dtype='object')
if np.rank(blocks) != 2:
raise ValueError('blocks must have rank 2')
M,N = blocks.shape
block_mask = np.zeros(blocks.shape, dtype=np.bool)
brow_lengths = np.zeros(blocks.shape[0], dtype=np.intc)
bcol_lengths = np.zeros(blocks.shape[1], dtype=np.intc)
# convert everything to COO format
for i in range(M):
for j in range(N):
if blocks[i,j] is not None:
A = coo_matrix(blocks[i,j])
blocks[i,j] = A
block_mask[i,j] = True
if brow_lengths[i] == 0:
brow_lengths[i] = A.shape[0]
else:
if brow_lengths[i] != A.shape[0]:
raise ValueError('blocks[%d,:] has incompatible row dimensions' % i)
if bcol_lengths[j] == 0:
bcol_lengths[j] = A.shape[1]
else:
if bcol_lengths[j] != A.shape[1]:
raise ValueError('blocks[:,%d] has incompatible column dimensions' % j)
# ensure that at least one value in each row and col is not None
if brow_lengths.min() == 0:
raise ValueError('blocks[%d,:] is all None' % brow_lengths.argmin() )
if bcol_lengths.min() == 0:
raise ValueError('blocks[:,%d] is all None' % bcol_lengths.argmin() )
nnz = sum([ A.nnz for A in blocks[block_mask] ])
if dtype is None:
dtype = upcast( *tuple([A.dtype for A in blocks[block_mask]]) )
row_offsets = np.concatenate(([0], np.cumsum(brow_lengths)))
col_offsets = np.concatenate(([0], np.cumsum(bcol_lengths)))
data = np.empty(nnz, dtype=dtype)
row = np.empty(nnz, dtype=np.intc)
col = np.empty(nnz, dtype=np.intc)
nnz = 0
for i in range(M):
for j in range(N):
if blocks[i,j] is not None:
A = blocks[i,j]
data[nnz:nnz + A.nnz] = A.data
row[nnz:nnz + A.nnz] = A.row
col[nnz:nnz + A.nnz] = A.col
row[nnz:nnz + A.nnz] += row_offsets[i]
col[nnz:nnz + A.nnz] += col_offsets[j]
nnz += A.nnz
shape = (np.sum(brow_lengths), np.sum(bcol_lengths))
return coo_matrix((data, (row, col)), shape=shape).asformat(format)
def rand(m, n, density=0.01, format="coo", dtype=None):
"""Generate a sparse matrix of the given shape and density with uniformely
distributed values.
Parameters
----------
m, n: int
shape of the matrix
density: real
density of the generated matrix: density equal to one means a full
matrix, density of 0 means a matrix with no non-zero items.
format: str
sparse matrix format.
dtype: dtype
type of the returned matrix values.
Notes
-----
Only float types are supported for now.
"""
if density < 0 or density > 1:
raise ValueError("density expected to be 0 <= density <= 1")
if dtype and not dtype in [np.float32, np.float64, np.longdouble]:
raise NotImplementedError("type %s not supported" % dtype)
mn = m * n
# XXX: sparse uses intc instead of intp...
tp = np.intp
if mn > np.iinfo(tp).max:
msg = """\
Trying to generate a random sparse matrix such as the product of dimensions is
greater than %d - this is not supported on this machine
"""
raise ValueError(msg % np.iinfo(tp).max)
# Number of non zero values
k = long(density * m * n)
# Generate a few more values than k so that we can get unique values
# afterwards.
# XXX: one could be smarter here
mlow = 5
fac = 1.02
gk = min(k + mlow, fac * k)
def _gen_unique_rand(_gk):
id = np.random.rand(_gk)
return np.unique(np.floor(id * mn))[:k]
id = _gen_unique_rand(gk)
while id.size < k:
gk *= 1.05
id = _gen_unique_rand(gk)
j = np.floor(id * 1. / m).astype(tp)
i = (id - j * m).astype(tp)
vals = np.random.rand(k).astype(dtype)
return coo_matrix((vals, (i, j)), shape=(m, n)).asformat(format)
| lesserwhirls/scipy-cwt | scipy/sparse/construct.py | Python | bsd-3-clause | 12,885 | 0.006054 |
# -*- coding: iso-8859-15 -*-
"""
A wxValidator that matches APDU in hexadecimal such as:
A4 A0 00 00 02
A4A0000002
__author__ = "http://www.gemalto.com"
Copyright 2001-2010 gemalto
Author: Jean-Daniel Aussel, mailto:jean-daniel.aussel@gemalto.com
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import re
import string
import wx
# a regexp to match ATRs and APDUs
hexbyte = "[0-9a-fA-F]{1,2}"
apduregexp = re.compile("((%s)[ ]*)*" % hexbyte)
class APDUHexValidator(wx.PyValidator):
'''A wxValidator that matches APDU in hexadecimal such as:
A4 A0 00 00 02
A4A0000002'''
def __init__(self):
wx.PyValidator.__init__(self)
self.Bind(wx.EVT_CHAR, self.OnChar)
def Clone(self):
return APDUHexValidator()
def Validate(self, win):
tc = self.GetWindow()
val = tc.GetValue()
if not apduregexp.match(value):
return False
return True
def OnChar(self, event):
key = event.GetKeyCode()
if wx.WXK_SPACE == key or chr(key) in string.hexdigits:
value = event.GetEventObject().GetValue() + chr(key)
if apduregexp.match(value):
event.Skip()
return
if key < wx.WXK_SPACE or key == wx.WXK_DELETE or key > 255:
event.Skip()
return
if not wx.Validator_IsSilent():
wx.Bell()
return
| 12019/pyscard | smartcard/wx/APDUHexValidator.py | Python | lgpl-2.1 | 2,088 | 0 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic LLDP Processing Hook"""
import binascii
from construct import core
import netaddr
from openstack import exceptions
from oslo_config import cfg
from oslo_utils import netutils
from ironic_inspector.common import lldp_parsers
from ironic_inspector.common import lldp_tlvs as tlv
from ironic_inspector.plugins import base
from ironic_inspector import utils
LOG = utils.getProcessingLogger(__name__)
CONF = cfg.CONF
PORT_ID_ITEM_NAME = "port_id"
SWITCH_ID_ITEM_NAME = "switch_id"
LLDP_PROC_DATA_MAPPING =\
{lldp_parsers.LLDP_CHASSIS_ID_NM: SWITCH_ID_ITEM_NAME,
lldp_parsers.LLDP_PORT_ID_NM: PORT_ID_ITEM_NAME}
class GenericLocalLinkConnectionHook(base.ProcessingHook):
"""Process mandatory LLDP packet fields
Non-vendor specific LLDP packet fields processed for each NIC found for a
baremetal node, port ID and chassis ID. These fields if found and if valid
will be saved into the local link connection info port id and switch id
fields on the Ironic port that represents that NIC.
"""
def _get_local_link_patch(self, tlv_type, tlv_value, port, node_info):
try:
data = bytearray(binascii.unhexlify(tlv_value))
except TypeError:
LOG.warning("TLV value for TLV type %d not in correct"
"format, ensure TLV value is in "
"hexidecimal format when sent to "
"inspector", tlv_type, node_info=node_info)
return
item = value = None
if tlv_type == tlv.LLDP_TLV_PORT_ID:
try:
port_id = tlv.PortId.parse(data)
except (core.MappingError, netaddr.AddrFormatError) as e:
LOG.warning("TLV parse error for Port ID: %s", e,
node_info=node_info)
return
item = PORT_ID_ITEM_NAME
value = port_id.value.value if port_id.value else None
elif tlv_type == tlv.LLDP_TLV_CHASSIS_ID:
try:
chassis_id = tlv.ChassisId.parse(data)
except (core.MappingError, netaddr.AddrFormatError) as e:
LOG.warning("TLV parse error for Chassis ID: %s", e,
node_info=node_info)
return
# Only accept mac address for chassis ID
if 'mac_address' in chassis_id.subtype:
item = SWITCH_ID_ITEM_NAME
value = chassis_id.value.value
if item and value:
if (not CONF.processing.overwrite_existing and
item in port.local_link_connection):
return
return {'op': 'add',
'path': '/local_link_connection/%s' % item,
'value': value}
def _get_lldp_processed_patch(self, name, item, lldp_proc_data, port,
node_info):
if 'lldp_processed' not in lldp_proc_data:
return
value = lldp_proc_data['lldp_processed'].get(name)
if value:
# Only accept mac address for chassis ID
if (item == SWITCH_ID_ITEM_NAME and
not netutils.is_valid_mac(value)):
LOG.info("Skipping switch_id since it's not a MAC: %s", value,
node_info=node_info)
return
if (not CONF.processing.overwrite_existing and
item in port.local_link_connection):
return
return {'op': 'add',
'path': '/local_link_connection/%s' % item,
'value': value}
def before_update(self, introspection_data, node_info, **kwargs):
"""Process LLDP data and patch Ironic port local link connection"""
inventory = utils.get_inventory(introspection_data)
ironic_ports = node_info.ports()
for iface in inventory['interfaces']:
if iface['name'] not in introspection_data['all_interfaces']:
continue
mac_address = iface['mac_address']
port = ironic_ports.get(mac_address)
if not port:
LOG.debug("Skipping LLC processing for interface %s, matching "
"port not found in Ironic.", mac_address,
node_info=node_info, data=introspection_data)
continue
lldp_data = iface.get('lldp')
if lldp_data is None:
LOG.warning("No LLDP Data found for interface %s",
mac_address, node_info=node_info,
data=introspection_data)
continue
patches = []
# First check if lldp data was already processed by lldp_basic
# plugin which stores data in 'all_interfaces'
proc_data = introspection_data['all_interfaces'][iface['name']]
for name, item in LLDP_PROC_DATA_MAPPING.items():
patch = self._get_lldp_processed_patch(name, item,
proc_data, port,
node_info)
if patch is not None:
patches.append(patch)
# If no processed lldp data was available then parse raw lldp data
if not patches:
for tlv_type, tlv_value in lldp_data:
patch = self._get_local_link_patch(tlv_type, tlv_value,
port, node_info)
if patch is not None:
patches.append(patch)
try:
node_info.patch_port(port, patches)
except exceptions.BadRequestException as e:
LOG.warning("Failed to update port %(uuid)s: %(error)s",
{'uuid': port.id, 'error': e},
node_info=node_info)
| openstack/ironic-inspector | ironic_inspector/plugins/local_link_connection.py | Python | apache-2.0 | 6,493 | 0 |
'''
This file is part of GEAR_mc.
GEAR_mc is a fork of Jeremie Passerin's GEAR project.
GEAR is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/lgpl.html>.
Author: Jeremie Passerin geerem@hotmail.com www.jeremiepasserin.com
Fork Author: Miquel Campos hello@miqueltd.com www.miqueltd.com
Date: 2013 / 08 / 16
'''
## @package gear.xsi.curve
# @author Jeremie Passerin
#
# @brief create, merge, split curves...
##########################################################
# GLOBAL
##########################################################
# gear
from gear.xsi import xsi, c, XSIMath, XSIFactory
import gear.xsi.utils as uti
import gear.xsi.transform as tra
##########################################################
# DRAW
##########################################################
# ========================================================
## Create a curve attached to given centers. One point per center.\n
# Do to so we use a cluster center operator per point. We could use an envelope (method to do so is in the code), but there was a reason I can't remember why it was better to use clustercenter.
# @param parent X3DObject - Parent object.
# @param name String - Name.
# @param centers List of X3DObject or Collection - Object that will drive the curve.
# @param close Boolean - True to close the fcurve.
# @param degree Integer - 1 for linear curve, 3 for Cubic.
# @return NurbCurve - The newly created curve.
def addCnsCurve(parent, name, centers, close=False, degree=1):
# convert collections to list
centers = [center for center in centers]
if degree == 3:
if len(centers) == 2:
centers.insert(0, centers[0])
centers.append(centers[-1])
elif len(centers) == 3:
centers.append(centers[-1])
points = []
for center in centers:
points.append(center.Kinematics.Global.Transform.PosX)
points.append(center.Kinematics.Global.Transform.PosY)
points.append(center.Kinematics.Global.Transform.PosZ)
points.append(1)
curve = parent.AddNurbsCurve(points, None, close, degree, c.siNonUniformParameterization, c.siSINurbs, name)
crv_geo = curve.ActivePrimitive.Geometry
for i, center in enumerate(centers):
cluster = crv_geo.AddCluster( c.siVertexCluster, "center_%s"%i, [i] )
xsi.ApplyOp( "ClusterCenter", cluster.FullName+";"+center.FullName, 0, 0, None, 2)
# Here is a method to replace the cluster centers with an envelope
# envelopeop = curve.ApplyEnvelope(cCenters)
#
# aWeights = []
# for i in range(cCenters.Count):
# for j in range(cCenters.Count):
# if i == j:
# aWeights.append(100)
# else:
# aWeights.append(0)
#
# envelopeop.Weights.Array = aWeights
return curve
# ========================================================
## Create a NurbsCurve with a single subcurve.
# @param parent X3DObject - Parent object.
# @param name String - Name.
# @param points List of Double - positions of the curve in a one dimension array [point0X, point0Y, point0Z, 1, point1X, point1Y, point1Z, 1, ...].
# @param close Boolean - True to close the curve.
# @param degree Integer - 1 for linear curve, 3 for Cubic.
# @param t SITransformation - Global transform.
# @param color List of Double - The RGB color of the Null (ie. [1,0,0] for red).
# @return NurbCurve - The newly created curve.
def addCurve(parent, name, points, close=False, degree=1, t=XSIMath.CreateTransform(), color=[0,0,0]):
curve = parent.AddNurbsCurve(points, None, close, degree, c.siNonUniformParameterization, c.siSINurbs, name)
uti.setColor(curve, color)
curve.Kinematics.Global.Transform = t
return curve
# ========================================================
## Create a NurbsCurve with multiple subcurve.
# @param parent X3DObject - Parent object.
# @param name String - Name.
# @param points List of Double - positions of the curve in a one dimension array [point0X, point0Y, point0Z, 1, point1X, point1Y, point1Z, 1, ...].
# @param ncp List of Double - See XSI SDK Docv for AddNurbsCurveList2.
# @param kn List of Double - See XSI SDK Docv for AddNurbsCurveList2.
# @param nkn List of Double - See XSI SDK Docv for AddNurbsCurveList2.
# @param close List of Boolean - True to close the curve.
# @param degree List of Integer - 1 for linear curve, 3 for Cubic.
# @param t SITransformation - Global transform.
# @param color List of Double - The RGB color of the Null (ie. [1,0,0] for red).
# @return NurbCurve - The newly created curve.
def addCurve2(parent, name, points, ncp=[], kn=[], nkn=[], close=[], degree=[], t=XSIMath.CreateTransform(), color=[0,0,0]):
pointCount = len(ncp)
aPar = [c.siNonUniformParameterization for i in range(pointCount)]
curve = parent.AddNurbsCurveList2(pointCount, points, ncp, kn, nkn, close, degree, aPar, c.siSINurbs, name)
uti.setColor(curve, color)
curve.Kinematics.Global.Transform = t
return curve
# ========================================================
## Create a NurbsCurve with a single subcurve from a list of position.
# @param parent X3DObject - Parent object.
# @param name String - Name.
# @param positions List of SIVector3 - positions of the curve points.
# @param close Boolean - True to close the curve.
# @param degree Integer - 1 for linear curve, 3 for Cubic.
# @param knotsPara - knots parametrization in the curve
# @param t SITransformation - Global transform.
# @param color List of Double - The RGB color of the object (ie. [1,0,0] for red).
# @return NurbCurve - The newly created curve.
def addCurveFromPos(parent, name, positions, close=False, degree=1, knotsPara=c.siNonUniformParameterization, t=XSIMath.CreateTransform(), color=[0,0,0]):
points = []
for v in positions:
points.append(v.X)
points.append(v.Y)
points.append(v.Z)
points.append(1)
curve = parent.AddNurbsCurve(points, None, close, degree, knotsPara, c.siSINurbs, name)
uti.setColor(curve, color)
curve.Kinematics.Global.Transform = t
return curve
##########################################################
# SUBCURVES
##########################################################
# Merge Curves ===========================================
## Merge given curve in one unique curve.
# @param curve List of NurbsCurve - The curves to merge.
# @return NurbsCurve.
def mergeCurves(curves):
points = []
ncp = []
kn = []
nkn = []
closed = []
degree = []
for curve in curves:
curve_matrix = curve.Kinematics.Global.Transform.Matrix4
for nurbscrv in curve.ActivePrimitive.Geometry.Curves:
ncp.append(nurbscrv.ControlPoints.Count)
kn.extend(nurbscrv.Knots.Array)
nkn.append(len(nurbscrv.Knots.Array))
closed.append(isClosed(nurbscrv))
degree.append(nurbscrv.Degree)
for point in nurbscrv.ControlPoints:
point_pos = point.Position
point_pos.MulByMatrix4InPlace(curve_matrix)
points.extend([point_pos.X, point_pos.Y,point_pos.Z, 1])
if len(ncp) > 1:
curve = addCurve2(xsi.ActiveSceneRoot, "curve", points, ncp, kn, nkn, closed, degree)
else:
curve = addCurve(xsi.ActiveSceneRoot, "curve", points, closed[0], degree[0])
return curve
# Split Curves ===========================================
## Split the sub curve of given curve.
# @param curve NurbsCurve - The curves to split.
# @return List of NurbsCurve.
def splitCurve(curve):
t = curve.Kinematics.Global.Transform
curves = [addCurve(curve.Parent, curve.Name+str(i), nurbscrv.ControlPoints.Array, isClosed(nurbscrv), nurbscrv.Degree, t) for i, nurbscrv in enumerate(curve.ActivePrimitive.Geometry.Curves)]
return curves
# Is Closed ==============================================
## Return true if the given nurbscurve is closed.
# @param nurbscrv NurbsCurve - The nurbs curves to check.
# @return Boolean.
def isClosed(nurbscrv):
if nurbscrv.Degree == 3:
return not nurbscrv.ControlPoints.Count == (len(nurbscrv.Knots.Array)-2)
else:
return not nurbscrv.ControlPoints.Count == len(nurbscrv.Knots.Array)
##########################################################
# OPERATOR
##########################################################
# Apply Curve Resampler Op ===============================
## Resample the curve on itself, code of the operator is in the plugin sn_CurveTools
# @param curve NurbsCurve - The curve to resample.
# @return Operator
def applyCurveResamplerOp(curve):
op = XSIFactory.CreateObject("gear_CurveResamplerOp")
op.AddIOPort(curve.ActivePrimitive)
op.Connect()
return op
##########################################################
# EVAL CURVE
##########################################################
# ========================================================
def getGlobalPositionFromPercentage(percentage, crv, subcurve=0):
crv_geo = crv.ActivePrimitive.Geometry
crv_sub = crv_geo.Curves(subcurve)
crv_tra = crv.Kinematics.Global.Transform
position = crv_sub.EvaluatePositionFromPercentage(percentage)[0]
position = XSIMath.MapObjectPositionToWorldSpace(crv_tra, position)
return position
# ========================================================
# @param position SIVector3 - The global position
# @param crv NurbsCurve - The curve to eval
# @return Double
def getClosestU(position, crv, normalized=False):
crv_geo = crv.ActivePrimitive.Geometry
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapWorldPositionToObjectSpace(crv_tra, position)
rtn = crv_geo.GetClosestCurvePosition2(pos)
crv_sub = crv_geo.Curves(rtn[0])
u = rtn[2]
if normalized:
u = crv_sub.GetNormalizedUFromU(u)
return u
# ========================================================
# @param position SIVector3 - The global position
# @param crv NurbsCurve - The curve to eval
# @return Double
def getClosestPercentage(position, crv):
crv_geo = crv.ActivePrimitive.Geometry
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapWorldPositionToObjectSpace(crv_tra, position)
rtn = crv_geo.GetClosestCurvePosition2(pos)
crv_sub = crv_geo.Curves(rtn[0])
perc = crv_sub.GetPercentageFromU(rtn[2])
return perc
# ========================================================
# @param position SIVector3 - The global position
# @param crv NurbsCurve - The curve to eval
# @param subcurve int - The index of subcurve to eval
# @return SIVector3 - The closest Global position
def getClosestGlobalTransform(position, crv, subcurve=0, tan_axis="x", upv_axis="y", normal=XSIMath.CreateVector3(0,1,0)):
crv_geo = crv.ActivePrimitive.Geometry
crv_sub = crv_geo.Curves(subcurve)
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapWorldPositionToObjectSpace(crv_tra, position)
rtn = crv_geo.GetClosestCurvePosition2(pos)
u = rtn[2]
pos = rtn[3]
pos = XSIMath.MapObjectPositionToWorldSpace(crv_tra, pos)
tan = crv_sub.EvaluatePosition(u)[1]
r = crv_tra.Rotation
r.InvertInPlace()
tan.MulByRotationInPlace(r)
tan.AddInPlace(pos)
t = tra.getTransformLookingAt(pos, tan, normal, tan_axis+upv_axis, False)
return t
# ========================================================
# @param position SIVector3 - The global position
# @param crv NurbsCurve - The curve to eval
# @param subcurve int - The index of subcurve to eval
# @return SIVector3 - The closest Global position
def getClosestGlobalPosition(position, crv, subcurve=0):
crv_geo = crv.ActivePrimitive.Geometry
crv_sub = crv_geo.Curves(subcurve)
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapWorldPositionToObjectSpace(crv_tra, position)
pos = crv_geo.GetClosestCurvePosition2(pos)[3]
pos = XSIMath.MapObjectPositionToWorldSpace(crv_tra, pos)
return pos
# ========================================================
# @param position SIVector3 - The global position
# @param crv NurbsCurve - The curve to eval
# @param subcurve int - The index of subcurve to eval
# @return SIVector3 - The closest tangent
def getClosestGlobalTangent(position, crv, subcurve=0):
crv_geo = crv.ActivePrimitive.Geometry
crv_sub = crv_geo.Curves(subcurve)
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapWorldPositionToObjectSpace(crv_tra, position)
u = crv_geo.GetClosestCurvePosition2(pos)[2]
tan = crv_sub.EvaluatePosition(u)[1]
tan.MulByRotationInPlace(crv_tra.Rotation)
return tan
# ========================================================
# @param position SIVector3 - The global position
# @param crv NurbsCurve - The curve to eval
# @param subcurve int - The index of subcurve to eval
# @return SIVector3 - The closest tangent
def getClosestGlobalNormal(position, crv, subcurve=0):
crv_geo = crv.ActivePrimitive.Geometry
crv_sub = crv_geo.Curves(subcurve)
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapWorldPositionToObjectSpace(crv_tra, position)
u = crv_geo.GetClosestCurvePosition2(pos)[2]
nor = crv_sub.EvaluatePosition(u)[2]
nor.MulByRotationInPlace(crv_tra.Rotation)
return nor
# ========================================================
# @param position SIVector3 - The global position
# @param crv NurbsCurve - The curve to eval
# @param subcurve int - The index of subcurve to eval
# @return SIVector3 - The closest tangent
def getClosestGlobalBiNormal(position, crv, subcurve=0):
crv_geo = crv.ActivePrimitive.Geometry
crv_sub = crv_geo.Curves(subcurve)
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapWorldPositionToObjectSpace(crv_tra, position)
u = crv_geo.GetClosestCurvePosition2(pos)[2]
bin = crv_sub.EvaluatePosition(u)[3]
bin.MulByRotationInPlace(crv_tra.Rotation)
return bin
# ========================================================
def getGlobalPointPosition(index, crv):
crv_geo = crv.ActivePrimitive.Geometry
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapObjectPositionToWorldSpace(crv_tra, crv_geo.Points(index).Position)
return pos | miquelcampos/GEAR_mc | gear/xsi/curve.py | Python | lgpl-3.0 | 14,878 | 0.004772 |
## Copyright (c) 2012 Szymon Zmilczak
##
##
## This file is part of Odtwarzacz.
##
## Odtwarzacz is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## Odtwarzacz is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Odtwarzacz; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import wx, os
from player import MusicPlayer
from explorator import LfileExplorer
from library import QueueUI
from timer import TimeKeeper, TimePicker
def config(filename):
f = open(filename)
c = {}
for l in f:
t = l.split("=")
if len(t[1]) > 0 and t[1][-1] == "\n":
t[1] = t[1][:-1]
c[t[0]] = t[1]
return c
#MyFilePattern = r"\A.*\.(((m|M)(p|P)3)|((o|O)(g|G)(g|G)))\Z" #".*\.(((m|M)(p|P)3)|((m|M)(p|P)2)|((w|W)(m|M)(a|A))|((a|A)(c|C)3)|((o|O)(g|G)(g|G))|((a|A)(c|C)(c|C)))" #".*\.((mp3|mp2|wma|ac3|ogg|acc)"
class myframe(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, wx.ID_ANY, title=u'Odtwarzacz', size = (800,600))
self.SetBackgroundColour((220,220,255))
self.SetMinSize((400, 300))
c = config("config.txt")
self.CreateStatusBar()
filemenu = wx.Menu()
menuAbout = filemenu.Append(wx.ID_ABOUT, u"O programie",u" Informacje o tym programie")
menuExit = filemenu.Append(wx.ID_EXIT, u"Wyjście",u" Wychondzi z programu")
menuBar = wx.MenuBar()
menuBar.Append(filemenu, u"&Plik")
self.SetMenuBar(menuBar)
self.Bind(wx.EVT_MENU, self.onAbout, menuAbout)
self.Bind(wx.EVT_MENU, self.onExit, menuExit)
self.Bind(wx.EVT_CLOSE, self.onExit)
#startPath = "D:\\Gas n' Metal"
sizer2 = wx.BoxSizer(wx.VERTICAL)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer3 = wx.BoxSizer(wx.VERTICAL)
self.te = wx.StaticText(self, -1, u"Biblioteka:", (0, 0))
f = self.te.GetFont()
f.SetPixelSize((10,25))
self.te.SetFont(f)
sizer3.Add(self.te, 0, wx.BOTTOM, 0)
self.d = LfileExplorer(self, (0,0), (500,600), c["paths"].split(","), c["file_pattern"], 1, self.OnFilePick)
sizer3.Add(self.d, 1, wx.ALL|wx.EXPAND, 0)
sizer.Add(sizer3, 1, wx.RIGHT|wx.EXPAND, 0)
sizer4 = wx.BoxSizer(wx.VERTICAL)
self.tq = wx.StaticText(self, -1, u"Kolejka:", (0, 0))
self.tq.SetFont(f)
sizer4.Add(self.tq, 0, wx.BOTTOM, 0)
self.q = QueueUI(self, c["paths"].split(","), c["file_pattern"], (505,0), (500,600))
sizer4.Add(self.q, 1, wx.ALL|wx.EXPAND, 0)
sizer.Add(sizer4, 1, wx.ALL|wx.EXPAND, 0)
sizer2.Add(sizer, 6, wx.ALL|wx.EXPAND, 0)
tp = TimePicker(self, wx.DefaultPosition)
tp.ShowModal()
self.lag = tp.GetLag()
tp.Destroy()
print "Lag set to", self.lag
self.tk = TimeKeeper("przerwy.txt", self.lag, self.OnTStart, self.OnTEnd, self.UpdateClock)
self.mp = MusicPlayer(self, self.OnAskNext, (0,450), (700,100))
self.mp.SetMinSize((200, 100))
sizer2.Add(self.mp, 0, wx.TOP|wx.EXPAND, 0)
self.SetSizer(sizer2)
self.SetAutoLayout(True)
def UpdateClock(self, time):
t = (time, 0)
self.GetStatusBar().SetFields(t[:1])
def onAbout(self, e):
d = wx.MessageDialog(self, u"Ten program został stworzony w celach edukacyjnych przez Sim1234", u"O programie", wx.OK)
d.ShowModal()
d.Destroy()
#e.Skip()
def onExit(self, e):
self.tk.stop()
self.mp.clean()
e.Skip()
def OnFilePick(self, path):
self.q.add(path)
def OnAskNext(self):
return self.q.next()
def OnTStart(self):
self.mp.next()
self.mp.epp(1)
print "Start"
def OnTEnd(self):
self.mp.epp(-1)
print "End"
def main():
app = wx.PySimpleApp()
frame = myframe()
frame.Show()
app.MainLoop()
if __name__ == '__main__':
main()
| sim1234/Odtwarzacz | prog.py | Python | gpl-3.0 | 4,653 | 0.012261 |
# Lint as: python3
"""Unit tests for dmiparse."""
import os
import dmiparse
from google3.pyglib import resources
from google3.testing.pybase import googletest
TEST_PATH = 'google3/third_party/py/dmiparse/test_data'
class DmiParserTest(googletest.TestCase):
def setUp(self):
super(DmiParserTest, self).setUp()
data_path = os.path.join(TEST_PATH, 'less_compliant_smbios_records.txt')
self.data_file = resources.GetResourceFilename(data_path)
def testDmiParseNoDumpFileRaisesException(self):
with self.assertRaises(FileNotFoundError):
dmiparse.DmiParser('').parse()
def testDmiParseReturnsExpectedRecords(self):
records, _ = dmiparse.DmiParser(self.data_file).parse()
self.assertLen(records, 4)
self.assertIn('0x0002', records)
self.assertIn('0x0125', records)
self.assertIn('0x0126', records)
def testDmiParseReturnsValidBaseBoardRecord(self):
records, _ = dmiparse.DmiParser(self.data_file).parse()
self.assertIn('0x0002', records)
base_board_record = records['0x0002']
self.assertEqual('0x0002', base_board_record.handle_id)
self.assertEqual(2, base_board_record.type_id)
self.assertLen(base_board_record.props, 9)
self.assertIn('Product Name', base_board_record.props)
self.assertEqual('Magnesium', base_board_record.props['Product Name'].val)
self.assertEqual([], base_board_record.props['Product Name'].items)
self.assertIn('Version', base_board_record.props)
self.assertEqual('1234567890', base_board_record.props['Version'].val)
self.assertEqual([], base_board_record.props['Version'].items)
self.assertIn('UUID', base_board_record.props)
self.assertEqual('03000200-0400-0500-0006-000700080009',
base_board_record.props['UUID'].val)
self.assertEqual([], base_board_record.props['UUID'].items)
self.assertIn('Location In Chassis', base_board_record.props)
self.assertEqual('Riser1',
base_board_record.props['Location In Chassis'].val)
self.assertEqual([], base_board_record.props['Location In Chassis'].items)
self.assertIn('Chassis Handle', base_board_record.props)
self.assertEqual('0x0003', base_board_record.props['Chassis Handle'].val)
self.assertEqual([], base_board_record.props['Chassis Handle'].items)
self.assertIn('MAC Address', base_board_record.props)
self.assertEqual('00:1b:83:15:a3:24',
base_board_record.props['MAC Address'].val)
self.assertEqual([], base_board_record.props['MAC Address'].items)
self.assertIn('Contained Object Handles', base_board_record.props)
self.assertEqual('5',
base_board_record.props['Contained Object Handles'].val)
self.assertEqual(['0x009A', '0x009B', '0x009C', '0x009D', '0x009E'],
base_board_record.props['Contained Object Handles'].items)
self.assertIn('Characteristics', base_board_record.props)
self.assertEqual('', base_board_record.props['Characteristics'].val)
self.assertEqual([
'PCI is supported', 'BIOS is upgradeable', 'ACPI is supported',
'UEFI is supported'
], base_board_record.props['Characteristics'].items)
def testDmiParseIndentation(self):
records, _ = dmiparse.DmiParser(self.data_file).parse()
self.assertIn('0x0058', records)
oem_specific_record = records['0x0058']
self.assertIn('Strings', oem_specific_record.props)
self.assertEqual([
'WLYDCRB.86B.WR.64.2019.19.3.03.1837', '0. 0. 0', '4:2.1.21', 'N/A',
'FRU: Ver 1.21', 'N/A', 'N/A'
], oem_specific_record.props['Strings'].items)
def testDmiParseReturnsValidGroups(self):
_, groups = dmiparse.DmiParser(self.data_file).parse()
self.assertIn(2, groups)
self.assertEqual(['0x0002'], groups[2])
self.assertIn(14, groups)
self.assertEqual(['0x0125', '0x0126'], groups[14])
if __name__ == '__main__':
googletest.main()
| google/smbios-validation-tool | dmiparse/dmiparse_test.py | Python | apache-2.0 | 3,929 | 0.002036 |
# -*- coding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1440369075.543512
_enable_loop = True
_template_filename = u'themes/monospace/templates/index.tmpl'
_template_uri = u'index.tmpl'
_source_encoding = 'utf-8'
_exports = [u'content']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
ns = runtime.TemplateNamespace(u'comments', context._clean_inheritance_tokens(), templateuri=u'comments_helper.tmpl', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, u'comments')] = ns
ns = runtime.TemplateNamespace(u'helper', context._clean_inheritance_tokens(), templateuri=u'index_helper.tmpl', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, u'helper')] = ns
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, u'base.tmpl', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
date_format = context.get('date_format', UNDEFINED)
helper = _mako_get_namespace(context, 'helper')
messages = context.get('messages', UNDEFINED)
posts = context.get('posts', UNDEFINED)
_link = context.get('_link', UNDEFINED)
def content():
return render_content(context._locals(__M_locals))
comments = _mako_get_namespace(context, 'comments')
index_teasers = context.get('index_teasers', UNDEFINED)
__M_writer = context.writer()
__M_writer(u'\n')
__M_writer(u'\n')
__M_writer(u'\n')
if 'parent' not in context._data or not hasattr(context._data['parent'], 'content'):
context['self'].content(**pageargs)
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_content(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
date_format = context.get('date_format', UNDEFINED)
helper = _mako_get_namespace(context, 'helper')
messages = context.get('messages', UNDEFINED)
posts = context.get('posts', UNDEFINED)
_link = context.get('_link', UNDEFINED)
def content():
return render_content(context)
comments = _mako_get_namespace(context, 'comments')
index_teasers = context.get('index_teasers', UNDEFINED)
__M_writer = context.writer()
__M_writer(u'\n')
for post in posts:
__M_writer(u' <div class="postbox">\n <h1><a href="')
__M_writer(unicode(post.permalink()))
__M_writer(u'">')
__M_writer(unicode(post.title()))
__M_writer(u'</a></h1>\n <div class="meta" style="background-color: rgb(234, 234, 234); ">\n <span class="authordate">\n ')
__M_writer(unicode(messages("Posted:")))
__M_writer(u' <time class="published" datetime="')
__M_writer(unicode(post.date.isoformat()))
__M_writer(u'">')
__M_writer(unicode(post.formatted_date(date_format)))
__M_writer(u'</time>\n </span>\n <br>\n <span class="tags">Tags: \n')
if post.tags:
for tag in post.tags:
__M_writer(u' <a class="tag" href="')
__M_writer(unicode(_link('tag', tag)))
__M_writer(u'"><span>')
__M_writer(unicode(tag))
__M_writer(u'</span></a>\n')
__M_writer(u' </span>\n </div>\n ')
__M_writer(unicode(post.text(teaser_only=index_teasers)))
__M_writer(u'\n')
if not post.meta('nocomments'):
__M_writer(u' ')
__M_writer(unicode(comments.comment_link(post.permalink(), post.base_path)))
__M_writer(u'\n')
__M_writer(u' </div>\n')
__M_writer(u' ')
__M_writer(unicode(helper.html_pager()))
__M_writer(u'\n ')
__M_writer(unicode(comments.comment_link_script()))
__M_writer(u'\n\t')
__M_writer(unicode(helper.mathjax_script(posts)))
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"source_encoding": "utf-8", "line_map": {"22": 3, "25": 2, "31": 0, "45": 2, "46": 3, "47": 4, "52": 31, "58": 5, "71": 5, "72": 6, "73": 7, "74": 8, "75": 8, "76": 8, "77": 8, "78": 11, "79": 11, "80": 11, "81": 11, "82": 11, "83": 11, "84": 15, "85": 16, "86": 17, "87": 17, "88": 17, "89": 17, "90": 17, "91": 20, "92": 22, "93": 22, "94": 23, "95": 24, "96": 24, "97": 24, "98": 26, "99": 28, "100": 28, "101": 28, "102": 29, "103": 29, "104": 30, "105": 30, "111": 105}, "uri": "index.tmpl", "filename": "themes/monospace/templates/index.tmpl"}
__M_END_METADATA
"""
| wcmckee/brobeurdotcom | cache/.mako.tmp/index.tmpl.py | Python | mit | 5,371 | 0.003351 |
#!/usr/bin/env python
from api.camara.orgaos import *
from api.camara.deputados import *
from core.models import *
import django
django.setup()
## Orgaos Webservice
orgaos = OrgaosCamara()
orgaos.importar_tipos_orgaos()
orgaos.importar_orgaos()
orgaos.importar_cargos()
## Deputados Webservice
deputados = DeputadosCamara()
deputados.importar_partidos()
deputados.importar_deputados()
#deputados.importar_detalhes_deputados()
# Google Images download
#for parlamentar in Parlamentar.objects.all():
# parlamentar.download_photos()
# Wikipedia data
#for parlamentar in Parlamentar.objects.all():
# parlamentar.get_wikipedia_data()
| beraldoleal/entendaobrasil | scripts/import.py | Python | gpl-2.0 | 642 | 0.007788 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Generated message classes for storage version v1.
Stores and retrieves potentially large, immutable data objects.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import message_types as _message_types
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'storage'
class Bucket(_messages.Message):
"""A bucket.
Messages:
CorsValueListEntry: A CorsValueListEntry object.
LifecycleValue: The bucket's lifecycle configuration. See lifecycle
management for more information.
LoggingValue: The bucket's logging configuration, which defines the
destination bucket and optional name prefix for the current bucket's
logs.
OwnerValue: The owner of the bucket. This is always the project team's
owner group.
VersioningValue: The bucket's versioning configuration.
WebsiteValue: The bucket's website configuration.
Fields:
acl: Access controls on the bucket.
cors: The bucket's Cross-Origin Resource Sharing (CORS) configuration.
defaultObjectAcl: Default access controls to apply to new objects when no
ACL is provided.
etag: HTTP 1.1 Entity tag for the bucket.
id: The ID of the bucket.
kind: The kind of item this is. For buckets, this is always
storage#bucket.
lifecycle: The bucket's lifecycle configuration. See lifecycle management
for more information.
location: The location of the bucket. Object data for objects in the
bucket resides in physical storage within this region. Defaults to US.
See the developer's guide for the authoritative list.
logging: The bucket's logging configuration, which defines the destination
bucket and optional name prefix for the current bucket's logs.
metageneration: The metadata generation of this bucket.
name: The name of the bucket.
owner: The owner of the bucket. This is always the project team's owner
group.
projectNumber: The project number of the project the bucket belongs to.
selfLink: The URI of this bucket.
storageClass: The bucket's storage class. This defines how objects in the
bucket are stored and determines the SLA and the cost of storage. Values
include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY. Defaults to
STANDARD. For more information, see storage classes.
timeCreated: The creation time of the bucket in RFC 3339 format.
updated: The modification time of the bucket in RFC 3339 format.
versioning: The bucket's versioning configuration.
website: The bucket's website configuration.
"""
class CorsValueListEntry(_messages.Message):
"""A CorsValueListEntry object.
Fields:
maxAgeSeconds: The value, in seconds, to return in the Access-Control-
Max-Age header used in preflight responses.
method: The list of HTTP methods on which to include CORS response
headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list
of methods, and means "any method".
origin: The list of Origins eligible to receive CORS response headers.
Note: "*" is permitted in the list of origins, and means "any Origin".
responseHeader: The list of HTTP headers other than the simple response
headers to give permission for the user-agent to share across domains.
"""
maxAgeSeconds = _messages.IntegerField(1, variant=_messages.Variant.INT32)
method = _messages.StringField(2, repeated=True)
origin = _messages.StringField(3, repeated=True)
responseHeader = _messages.StringField(4, repeated=True)
class LifecycleValue(_messages.Message):
"""The bucket's lifecycle configuration. See lifecycle management for more
information.
Messages:
RuleValueListEntry: A RuleValueListEntry object.
Fields:
rule: A lifecycle management rule, which is made of an action to take
and the condition(s) under which the action will be taken.
"""
class RuleValueListEntry(_messages.Message):
"""A RuleValueListEntry object.
Messages:
ActionValue: The action to take.
ConditionValue: The condition(s) under which the action will be taken.
Fields:
action: The action to take.
condition: The condition(s) under which the action will be taken.
"""
class ActionValue(_messages.Message):
"""The action to take.
Fields:
type: Type of the action. Currently, only Delete is supported.
"""
type = _messages.StringField(1)
class ConditionValue(_messages.Message):
"""The condition(s) under which the action will be taken.
Fields:
age: Age of an object (in days). This condition is satisfied when an
object reaches the specified age.
createdBefore: A date in RFC 3339 format with only the date part
(for instance, "2013-01-15"). This condition is satisfied when an
object is created before midnight of the specified date in UTC.
isLive: Relevant only for versioned objects. If the value is true,
this condition matches live objects; if the value is false, it
matches archived objects.
numNewerVersions: Relevant only for versioned objects. If the value
is N, this condition is satisfied when there are at least N
versions (including the live version) newer than this version of
the object.
"""
age = _messages.IntegerField(1, variant=_messages.Variant.INT32)
createdBefore = extra_types.DateField(2)
isLive = _messages.BooleanField(3)
numNewerVersions = _messages.IntegerField(4, variant=_messages.Variant.INT32)
action = _messages.MessageField('ActionValue', 1)
condition = _messages.MessageField('ConditionValue', 2)
rule = _messages.MessageField('RuleValueListEntry', 1, repeated=True)
class LoggingValue(_messages.Message):
"""The bucket's logging configuration, which defines the destination
bucket and optional name prefix for the current bucket's logs.
Fields:
logBucket: The destination bucket where the current bucket's logs should
be placed.
logObjectPrefix: A prefix for log object names.
"""
logBucket = _messages.StringField(1)
logObjectPrefix = _messages.StringField(2)
class OwnerValue(_messages.Message):
"""The owner of the bucket. This is always the project team's owner group.
Fields:
entity: The entity, in the form project-owner-projectId.
entityId: The ID for the entity.
"""
entity = _messages.StringField(1)
entityId = _messages.StringField(2)
class VersioningValue(_messages.Message):
"""The bucket's versioning configuration.
Fields:
enabled: While set to true, versioning is fully enabled for this bucket.
"""
enabled = _messages.BooleanField(1)
class WebsiteValue(_messages.Message):
"""The bucket's website configuration.
Fields:
mainPageSuffix: Behaves as the bucket's directory index where missing
objects are treated as potential directories.
notFoundPage: The custom object to return when a requested resource is
not found.
"""
mainPageSuffix = _messages.StringField(1)
notFoundPage = _messages.StringField(2)
acl = _messages.MessageField('BucketAccessControl', 1, repeated=True)
cors = _messages.MessageField('CorsValueListEntry', 2, repeated=True)
defaultObjectAcl = _messages.MessageField('ObjectAccessControl', 3, repeated=True)
etag = _messages.StringField(4)
id = _messages.StringField(5)
kind = _messages.StringField(6, default=u'storage#bucket')
lifecycle = _messages.MessageField('LifecycleValue', 7)
location = _messages.StringField(8)
logging = _messages.MessageField('LoggingValue', 9)
metageneration = _messages.IntegerField(10)
name = _messages.StringField(11)
owner = _messages.MessageField('OwnerValue', 12)
projectNumber = _messages.IntegerField(13, variant=_messages.Variant.UINT64)
selfLink = _messages.StringField(14)
storageClass = _messages.StringField(15)
timeCreated = _message_types.DateTimeField(16)
updated = _message_types.DateTimeField(17)
versioning = _messages.MessageField('VersioningValue', 18)
website = _messages.MessageField('WebsiteValue', 19)
class BucketAccessControl(_messages.Message):
"""An access-control entry.
Messages:
ProjectTeamValue: The project team associated with the entity, if any.
Fields:
bucket: The name of the bucket.
domain: The domain associated with the entity, if any.
email: The email address associated with the entity, if any.
entity: The entity holding the permission, in one of the following forms:
- user-userId - user-email - group-groupId - group-email - domain-
domain - project-team-projectId - allUsers - allAuthenticatedUsers
Examples: - The user liz@example.com would be user-liz@example.com. -
The group example@googlegroups.com would be group-
example@googlegroups.com. - To refer to all members of the Google Apps
for Business domain example.com, the entity would be domain-example.com.
entityId: The ID for the entity, if any.
etag: HTTP 1.1 Entity tag for the access-control entry.
id: The ID of the access-control entry.
kind: The kind of item this is. For bucket access control entries, this is
always storage#bucketAccessControl.
projectTeam: The project team associated with the entity, if any.
role: The access permission for the entity. Can be READER, WRITER, or
OWNER.
selfLink: The link to this access-control entry.
"""
class ProjectTeamValue(_messages.Message):
"""The project team associated with the entity, if any.
Fields:
projectNumber: The project number.
team: The team. Can be owners, editors, or viewers.
"""
projectNumber = _messages.StringField(1)
team = _messages.StringField(2)
bucket = _messages.StringField(1)
domain = _messages.StringField(2)
email = _messages.StringField(3)
entity = _messages.StringField(4)
entityId = _messages.StringField(5)
etag = _messages.StringField(6)
id = _messages.StringField(7)
kind = _messages.StringField(8, default=u'storage#bucketAccessControl')
projectTeam = _messages.MessageField('ProjectTeamValue', 9)
role = _messages.StringField(10)
selfLink = _messages.StringField(11)
class BucketAccessControls(_messages.Message):
"""An access-control list.
Fields:
items: The list of items.
kind: The kind of item this is. For lists of bucket access control
entries, this is always storage#bucketAccessControls.
"""
items = _messages.MessageField('BucketAccessControl', 1, repeated=True)
kind = _messages.StringField(2, default=u'storage#bucketAccessControls')
class Buckets(_messages.Message):
"""A list of buckets.
Fields:
items: The list of items.
kind: The kind of item this is. For lists of buckets, this is always
storage#buckets.
nextPageToken: The continuation token, used to page through large result
sets. Provide this value in a subsequent request to return the next page
of results.
"""
items = _messages.MessageField('Bucket', 1, repeated=True)
kind = _messages.StringField(2, default=u'storage#buckets')
nextPageToken = _messages.StringField(3)
class Channel(_messages.Message):
"""An notification channel used to watch for resource changes.
Messages:
ParamsValue: Additional parameters controlling delivery channel behavior.
Optional.
Fields:
address: The address where notifications are delivered for this channel.
expiration: Date and time of notification channel expiration, expressed as
a Unix timestamp, in milliseconds. Optional.
id: A UUID or similar unique string that identifies this channel.
kind: Identifies this as a notification channel used to watch for changes
to a resource. Value: the fixed string "api#channel".
params: Additional parameters controlling delivery channel behavior.
Optional.
payload: A Boolean value to indicate whether payload is wanted. Optional.
resourceId: An opaque ID that identifies the resource being watched on
this channel. Stable across different API versions.
resourceUri: A version-specific identifier for the watched resource.
token: An arbitrary string delivered to the target address with each
notification delivered over this channel. Optional.
type: The type of delivery mechanism used for this channel.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ParamsValue(_messages.Message):
"""Additional parameters controlling delivery channel behavior. Optional.
Messages:
AdditionalProperty: An additional property for a ParamsValue object.
Fields:
additionalProperties: Declares a new parameter by name.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ParamsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
address = _messages.StringField(1)
expiration = _messages.IntegerField(2)
id = _messages.StringField(3)
kind = _messages.StringField(4, default=u'api#channel')
params = _messages.MessageField('ParamsValue', 5)
payload = _messages.BooleanField(6)
resourceId = _messages.StringField(7)
resourceUri = _messages.StringField(8)
token = _messages.StringField(9)
type = _messages.StringField(10)
class ComposeRequest(_messages.Message):
"""A Compose request.
Messages:
SourceObjectsValueListEntry: A SourceObjectsValueListEntry object.
Fields:
destination: Properties of the resulting object.
kind: The kind of item this is.
sourceObjects: The list of source objects that will be concatenated into a
single object.
"""
class SourceObjectsValueListEntry(_messages.Message):
"""A SourceObjectsValueListEntry object.
Messages:
ObjectPreconditionsValue: Conditions that must be met for this operation
to execute.
Fields:
generation: The generation of this object to use as the source.
name: The source object's name. The source object's bucket is implicitly
the destination bucket.
objectPreconditions: Conditions that must be met for this operation to
execute.
"""
class ObjectPreconditionsValue(_messages.Message):
"""Conditions that must be met for this operation to execute.
Fields:
ifGenerationMatch: Only perform the composition if the generation of
the source object that would be used matches this value. If this
value and a generation are both specified, they must be the same
value or the call will fail.
"""
ifGenerationMatch = _messages.IntegerField(1)
generation = _messages.IntegerField(1)
name = _messages.StringField(2)
objectPreconditions = _messages.MessageField('ObjectPreconditionsValue', 3)
destination = _messages.MessageField('Object', 1)
kind = _messages.StringField(2, default=u'storage#composeRequest')
sourceObjects = _messages.MessageField('SourceObjectsValueListEntry', 3, repeated=True)
class Object(_messages.Message):
"""An object.
Messages:
CustomerEncryptionValue: Metadata of customer-supplied encryption key, if
the object is encrypted by such a key.
MetadataValue: User-provided metadata, in key/value pairs.
OwnerValue: The owner of the object. This will always be the uploader of
the object.
Fields:
acl: Access controls on the object.
bucket: The name of the bucket containing this object.
cacheControl: Cache-Control directive for the object data.
componentCount: Number of underlying components that make up this object.
Components are accumulated by compose operations.
contentDisposition: Content-Disposition of the object data.
contentEncoding: Content-Encoding of the object data.
contentLanguage: Content-Language of the object data.
contentType: Content-Type of the object data.
crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; encoded
using base64 in big-endian byte order. For more information about using
the CRC32c checksum, see Hashes and ETags: Best Practices.
customerEncryption: Metadata of customer-supplied encryption key, if the
object is encrypted by such a key.
etag: HTTP 1.1 Entity tag for the object.
generation: The content generation of this object. Used for object
versioning.
id: The ID of the object.
kind: The kind of item this is. For objects, this is always
storage#object.
md5Hash: MD5 hash of the data; encoded using base64. For more information
about using the MD5 hash, see Hashes and ETags: Best Practices.
mediaLink: Media download link.
metadata: User-provided metadata, in key/value pairs.
metageneration: The version of the metadata for this object at this
generation. Used for preconditions and for detecting changes in
metadata. A metageneration number is only meaningful in the context of a
particular generation of a particular object.
name: The name of this object. Required if not specified by URL parameter.
owner: The owner of the object. This will always be the uploader of the
object.
selfLink: The link to this object.
size: Content-Length of the data in bytes.
storageClass: Storage class of the object.
timeCreated: The creation time of the object in RFC 3339 format.
timeDeleted: The deletion time of the object in RFC 3339 format. Will be
returned if and only if this version of the object has been deleted.
updated: The modification time of the object metadata in RFC 3339 format.
"""
class CustomerEncryptionValue(_messages.Message):
"""Metadata of customer-supplied encryption key, if the object is
encrypted by such a key.
Fields:
encryptionAlgorithm: The encryption algorithm.
keySha256: SHA256 hash value of the encryption key.
"""
encryptionAlgorithm = _messages.StringField(1)
keySha256 = _messages.StringField(2)
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
"""User-provided metadata, in key/value pairs.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: An individual metadata entry.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
class OwnerValue(_messages.Message):
"""The owner of the object. This will always be the uploader of the
object.
Fields:
entity: The entity, in the form user-userId.
entityId: The ID for the entity.
"""
entity = _messages.StringField(1)
entityId = _messages.StringField(2)
acl = _messages.MessageField('ObjectAccessControl', 1, repeated=True)
bucket = _messages.StringField(2)
cacheControl = _messages.StringField(3)
componentCount = _messages.IntegerField(4, variant=_messages.Variant.INT32)
contentDisposition = _messages.StringField(5)
contentEncoding = _messages.StringField(6)
contentLanguage = _messages.StringField(7)
contentType = _messages.StringField(8)
crc32c = _messages.StringField(9)
customerEncryption = _messages.MessageField('CustomerEncryptionValue', 10)
etag = _messages.StringField(11)
generation = _messages.IntegerField(12)
id = _messages.StringField(13)
kind = _messages.StringField(14, default=u'storage#object')
md5Hash = _messages.StringField(15)
mediaLink = _messages.StringField(16)
metadata = _messages.MessageField('MetadataValue', 17)
metageneration = _messages.IntegerField(18)
name = _messages.StringField(19)
owner = _messages.MessageField('OwnerValue', 20)
selfLink = _messages.StringField(21)
size = _messages.IntegerField(22, variant=_messages.Variant.UINT64)
storageClass = _messages.StringField(23)
timeCreated = _message_types.DateTimeField(24)
timeDeleted = _message_types.DateTimeField(25)
updated = _message_types.DateTimeField(26)
class ObjectAccessControl(_messages.Message):
"""An access-control entry.
Messages:
ProjectTeamValue: The project team associated with the entity, if any.
Fields:
bucket: The name of the bucket.
domain: The domain associated with the entity, if any.
email: The email address associated with the entity, if any.
entity: The entity holding the permission, in one of the following forms:
- user-userId - user-email - group-groupId - group-email - domain-
domain - project-team-projectId - allUsers - allAuthenticatedUsers
Examples: - The user liz@example.com would be user-liz@example.com. -
The group example@googlegroups.com would be group-
example@googlegroups.com. - To refer to all members of the Google Apps
for Business domain example.com, the entity would be domain-example.com.
entityId: The ID for the entity, if any.
etag: HTTP 1.1 Entity tag for the access-control entry.
generation: The content generation of the object.
id: The ID of the access-control entry.
kind: The kind of item this is. For object access control entries, this is
always storage#objectAccessControl.
object: The name of the object.
projectTeam: The project team associated with the entity, if any.
role: The access permission for the entity. Can be READER or OWNER.
selfLink: The link to this access-control entry.
"""
class ProjectTeamValue(_messages.Message):
"""The project team associated with the entity, if any.
Fields:
projectNumber: The project number.
team: The team. Can be owners, editors, or viewers.
"""
projectNumber = _messages.StringField(1)
team = _messages.StringField(2)
bucket = _messages.StringField(1)
domain = _messages.StringField(2)
email = _messages.StringField(3)
entity = _messages.StringField(4)
entityId = _messages.StringField(5)
etag = _messages.StringField(6)
generation = _messages.IntegerField(7)
id = _messages.StringField(8)
kind = _messages.StringField(9, default=u'storage#objectAccessControl')
object = _messages.StringField(10)
projectTeam = _messages.MessageField('ProjectTeamValue', 11)
role = _messages.StringField(12)
selfLink = _messages.StringField(13)
class ObjectAccessControls(_messages.Message):
"""An access-control list.
Fields:
items: The list of items.
kind: The kind of item this is. For lists of object access control
entries, this is always storage#objectAccessControls.
"""
items = _messages.MessageField('extra_types.JsonValue', 1, repeated=True)
kind = _messages.StringField(2, default=u'storage#objectAccessControls')
class Objects(_messages.Message):
"""A list of objects.
Fields:
items: The list of items.
kind: The kind of item this is. For lists of objects, this is always
storage#objects.
nextPageToken: The continuation token, used to page through large result
sets. Provide this value in a subsequent request to return the next page
of results.
prefixes: The list of prefixes of objects matching-but-not-listed up to
and including the requested delimiter.
"""
items = _messages.MessageField('Object', 1, repeated=True)
kind = _messages.StringField(2, default=u'storage#objects')
nextPageToken = _messages.StringField(3)
prefixes = _messages.StringField(4, repeated=True)
class RewriteResponse(_messages.Message):
"""A rewrite response.
Fields:
done: true if the copy is finished; otherwise, false if the copy is in
progress. This property is always present in the response.
kind: The kind of item this is.
objectSize: The total size of the object being copied in bytes. This
property is always present in the response.
resource: A resource containing the metadata for the copied-to object.
This property is present in the response only when copying completes.
rewriteToken: A token to use in subsequent requests to continue copying
data. This token is present in the response only when there is more data
to copy.
totalBytesRewritten: The total bytes written so far, which can be used to
provide a waiting user with a progress indicator. This property is
always present in the response.
"""
done = _messages.BooleanField(1)
kind = _messages.StringField(2, default=u'storage#rewriteResponse')
objectSize = _messages.IntegerField(3, variant=_messages.Variant.UINT64)
resource = _messages.MessageField('Object', 4)
rewriteToken = _messages.StringField(5)
totalBytesRewritten = _messages.IntegerField(6, variant=_messages.Variant.UINT64)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
AltValueValuesEnum: Data format for the response.
Fields:
alt: Data format for the response.
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters. Overrides userIp if both are provided.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
userIp: IP address of the site where the request originates. Use this if
you want to enforce per-user limits.
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for the response.
Values:
json: Responses with Content-Type of application/json
"""
json = 0
alt = _messages.EnumField('AltValueValuesEnum', 1, default=u'json')
fields = _messages.StringField(2)
key = _messages.StringField(3)
oauth_token = _messages.StringField(4)
prettyPrint = _messages.BooleanField(5, default=True)
quotaUser = _messages.StringField(6)
trace = _messages.StringField(7)
userIp = _messages.StringField(8)
class StorageBucketAccessControlsDeleteRequest(_messages.Message):
"""A StorageBucketAccessControlsDeleteRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
class StorageBucketAccessControlsDeleteResponse(_messages.Message):
"""An empty StorageBucketAccessControlsDelete response."""
class StorageBucketAccessControlsGetRequest(_messages.Message):
"""A StorageBucketAccessControlsGetRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
class StorageBucketAccessControlsListRequest(_messages.Message):
"""A StorageBucketAccessControlsListRequest object.
Fields:
bucket: Name of a bucket.
"""
bucket = _messages.StringField(1, required=True)
class StorageBucketsDeleteRequest(_messages.Message):
"""A StorageBucketsDeleteRequest object.
Fields:
bucket: Name of a bucket.
ifMetagenerationMatch: If set, only deletes the bucket if its
metageneration matches this value.
ifMetagenerationNotMatch: If set, only deletes the bucket if its
metageneration does not match this value.
"""
bucket = _messages.StringField(1, required=True)
ifMetagenerationMatch = _messages.IntegerField(2)
ifMetagenerationNotMatch = _messages.IntegerField(3)
class StorageBucketsDeleteResponse(_messages.Message):
"""An empty StorageBucketsDelete response."""
class StorageBucketsGetRequest(_messages.Message):
"""A StorageBucketsGetRequest object.
Enums:
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl.
Fields:
bucket: Name of a bucket.
ifMetagenerationMatch: Makes the return of the bucket metadata conditional
on whether the bucket's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the return of the bucket metadata
conditional on whether the bucket's current metageneration does not
match the given value.
projection: Set of properties to return. Defaults to noAcl.
"""
class ProjectionValueValuesEnum(_messages.Enum):
"""Set of properties to return. Defaults to noAcl.
Values:
full: Include all properties.
noAcl: Omit acl and defaultObjectAcl properties.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
ifMetagenerationMatch = _messages.IntegerField(2)
ifMetagenerationNotMatch = _messages.IntegerField(3)
projection = _messages.EnumField('ProjectionValueValuesEnum', 4)
class StorageBucketsInsertRequest(_messages.Message):
"""A StorageBucketsInsertRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this bucket.
PredefinedDefaultObjectAclValueValuesEnum: Apply a predefined set of
default object access controls to this bucket.
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl,
unless the bucket resource specifies acl or defaultObjectAcl properties,
when it defaults to full.
Fields:
bucket: A Bucket resource to be passed as the request body.
predefinedAcl: Apply a predefined set of access controls to this bucket.
predefinedDefaultObjectAcl: Apply a predefined set of default object
access controls to this bucket.
project: A valid API project identifier.
projection: Set of properties to return. Defaults to noAcl, unless the
bucket resource specifies acl or defaultObjectAcl properties, when it
defaults to full.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
"""Apply a predefined set of access controls to this bucket.
Values:
authenticatedRead: Project team owners get OWNER access, and
allAuthenticatedUsers get READER access.
private: Project team owners get OWNER access.
projectPrivate: Project team members get access according to their
roles.
publicRead: Project team owners get OWNER access, and allUsers get
READER access.
publicReadWrite: Project team owners get OWNER access, and allUsers get
WRITER access.
"""
authenticatedRead = 0
private = 1
projectPrivate = 2
publicRead = 3
publicReadWrite = 4
class PredefinedDefaultObjectAclValueValuesEnum(_messages.Enum):
"""Apply a predefined set of default object access controls to this
bucket.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
"""Set of properties to return. Defaults to noAcl, unless the bucket
resource specifies acl or defaultObjectAcl properties, when it defaults to
full.
Values:
full: Include all properties.
noAcl: Omit acl and defaultObjectAcl properties.
"""
full = 0
noAcl = 1
bucket = _messages.MessageField('Bucket', 1)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 2)
predefinedDefaultObjectAcl = _messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 3)
project = _messages.StringField(4, required=True)
projection = _messages.EnumField('ProjectionValueValuesEnum', 5)
class StorageBucketsListRequest(_messages.Message):
"""A StorageBucketsListRequest object.
Enums:
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl.
Fields:
maxResults: Maximum number of buckets to return.
pageToken: A previously-returned page token representing part of the
larger set of results to view.
prefix: Filter results to buckets whose names begin with this prefix.
project: A valid API project identifier.
projection: Set of properties to return. Defaults to noAcl.
"""
class ProjectionValueValuesEnum(_messages.Enum):
"""Set of properties to return. Defaults to noAcl.
Values:
full: Include all properties.
noAcl: Omit acl and defaultObjectAcl properties.
"""
full = 0
noAcl = 1
maxResults = _messages.IntegerField(1, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(2)
prefix = _messages.StringField(3)
project = _messages.StringField(4, required=True)
projection = _messages.EnumField('ProjectionValueValuesEnum', 5)
class StorageBucketsPatchRequest(_messages.Message):
"""A StorageBucketsPatchRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this bucket.
PredefinedDefaultObjectAclValueValuesEnum: Apply a predefined set of
default object access controls to this bucket.
ProjectionValueValuesEnum: Set of properties to return. Defaults to full.
Fields:
bucket: Name of a bucket.
bucketResource: A Bucket resource to be passed as the request body.
ifMetagenerationMatch: Makes the return of the bucket metadata conditional
on whether the bucket's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the return of the bucket metadata
conditional on whether the bucket's current metageneration does not
match the given value.
predefinedAcl: Apply a predefined set of access controls to this bucket.
predefinedDefaultObjectAcl: Apply a predefined set of default object
access controls to this bucket.
projection: Set of properties to return. Defaults to full.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
"""Apply a predefined set of access controls to this bucket.
Values:
authenticatedRead: Project team owners get OWNER access, and
allAuthenticatedUsers get READER access.
private: Project team owners get OWNER access.
projectPrivate: Project team members get access according to their
roles.
publicRead: Project team owners get OWNER access, and allUsers get
READER access.
publicReadWrite: Project team owners get OWNER access, and allUsers get
WRITER access.
"""
authenticatedRead = 0
private = 1
projectPrivate = 2
publicRead = 3
publicReadWrite = 4
class PredefinedDefaultObjectAclValueValuesEnum(_messages.Enum):
"""Apply a predefined set of default object access controls to this
bucket.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
"""Set of properties to return. Defaults to full.
Values:
full: Include all properties.
noAcl: Omit acl and defaultObjectAcl properties.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
bucketResource = _messages.MessageField('Bucket', 2)
ifMetagenerationMatch = _messages.IntegerField(3)
ifMetagenerationNotMatch = _messages.IntegerField(4)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 5)
predefinedDefaultObjectAcl = _messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 6)
projection = _messages.EnumField('ProjectionValueValuesEnum', 7)
class StorageBucketsUpdateRequest(_messages.Message):
"""A StorageBucketsUpdateRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this bucket.
PredefinedDefaultObjectAclValueValuesEnum: Apply a predefined set of
default object access controls to this bucket.
ProjectionValueValuesEnum: Set of properties to return. Defaults to full.
Fields:
bucket: Name of a bucket.
bucketResource: A Bucket resource to be passed as the request body.
ifMetagenerationMatch: Makes the return of the bucket metadata conditional
on whether the bucket's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the return of the bucket metadata
conditional on whether the bucket's current metageneration does not
match the given value.
predefinedAcl: Apply a predefined set of access controls to this bucket.
predefinedDefaultObjectAcl: Apply a predefined set of default object
access controls to this bucket.
projection: Set of properties to return. Defaults to full.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
"""Apply a predefined set of access controls to this bucket.
Values:
authenticatedRead: Project team owners get OWNER access, and
allAuthenticatedUsers get READER access.
private: Project team owners get OWNER access.
projectPrivate: Project team members get access according to their
roles.
publicRead: Project team owners get OWNER access, and allUsers get
READER access.
publicReadWrite: Project team owners get OWNER access, and allUsers get
WRITER access.
"""
authenticatedRead = 0
private = 1
projectPrivate = 2
publicRead = 3
publicReadWrite = 4
class PredefinedDefaultObjectAclValueValuesEnum(_messages.Enum):
"""Apply a predefined set of default object access controls to this
bucket.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
"""Set of properties to return. Defaults to full.
Values:
full: Include all properties.
noAcl: Omit acl and defaultObjectAcl properties.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
bucketResource = _messages.MessageField('Bucket', 2)
ifMetagenerationMatch = _messages.IntegerField(3)
ifMetagenerationNotMatch = _messages.IntegerField(4)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 5)
predefinedDefaultObjectAcl = _messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 6)
projection = _messages.EnumField('ProjectionValueValuesEnum', 7)
class StorageChannelsStopResponse(_messages.Message):
"""An empty StorageChannelsStop response."""
class StorageDefaultObjectAccessControlsDeleteRequest(_messages.Message):
"""A StorageDefaultObjectAccessControlsDeleteRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
class StorageDefaultObjectAccessControlsDeleteResponse(_messages.Message):
"""An empty StorageDefaultObjectAccessControlsDelete response."""
class StorageDefaultObjectAccessControlsGetRequest(_messages.Message):
"""A StorageDefaultObjectAccessControlsGetRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
class StorageDefaultObjectAccessControlsListRequest(_messages.Message):
"""A StorageDefaultObjectAccessControlsListRequest object.
Fields:
bucket: Name of a bucket.
ifMetagenerationMatch: If present, only return default ACL listing if the
bucket's current metageneration matches this value.
ifMetagenerationNotMatch: If present, only return default ACL listing if
the bucket's current metageneration does not match the given value.
"""
bucket = _messages.StringField(1, required=True)
ifMetagenerationMatch = _messages.IntegerField(2)
ifMetagenerationNotMatch = _messages.IntegerField(3)
class StorageObjectAccessControlsDeleteRequest(_messages.Message):
"""A StorageObjectAccessControlsDeleteRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
generation = _messages.IntegerField(3)
object = _messages.StringField(4, required=True)
class StorageObjectAccessControlsDeleteResponse(_messages.Message):
"""An empty StorageObjectAccessControlsDelete response."""
class StorageObjectAccessControlsGetRequest(_messages.Message):
"""A StorageObjectAccessControlsGetRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
generation = _messages.IntegerField(3)
object = _messages.StringField(4, required=True)
class StorageObjectAccessControlsInsertRequest(_messages.Message):
"""A StorageObjectAccessControlsInsertRequest object.
Fields:
bucket: Name of a bucket.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
objectAccessControl: A ObjectAccessControl resource to be passed as the
request body.
"""
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
object = _messages.StringField(3, required=True)
objectAccessControl = _messages.MessageField('ObjectAccessControl', 4)
class StorageObjectAccessControlsListRequest(_messages.Message):
"""A StorageObjectAccessControlsListRequest object.
Fields:
bucket: Name of a bucket.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
"""
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
object = _messages.StringField(3, required=True)
class StorageObjectAccessControlsPatchRequest(_messages.Message):
"""A StorageObjectAccessControlsPatchRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
objectAccessControl: A ObjectAccessControl resource to be passed as the
request body.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
generation = _messages.IntegerField(3)
object = _messages.StringField(4, required=True)
objectAccessControl = _messages.MessageField('ObjectAccessControl', 5)
class StorageObjectAccessControlsUpdateRequest(_messages.Message):
"""A StorageObjectAccessControlsUpdateRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
objectAccessControl: A ObjectAccessControl resource to be passed as the
request body.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
generation = _messages.IntegerField(3)
object = _messages.StringField(4, required=True)
objectAccessControl = _messages.MessageField('ObjectAccessControl', 5)
class StorageObjectsComposeRequest(_messages.Message):
"""A StorageObjectsComposeRequest object.
Enums:
DestinationPredefinedAclValueValuesEnum: Apply a predefined set of access
controls to the destination object.
Fields:
composeRequest: A ComposeRequest resource to be passed as the request
body.
destinationBucket: Name of the bucket in which to store the new object.
destinationObject: Name of the new object. For information about how to
URL encode object names to be path safe, see Encoding URI Path Parts.
destinationPredefinedAcl: Apply a predefined set of access controls to the
destination object.
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
"""
class DestinationPredefinedAclValueValuesEnum(_messages.Enum):
"""Apply a predefined set of access controls to the destination object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
composeRequest = _messages.MessageField('ComposeRequest', 1)
destinationBucket = _messages.StringField(2, required=True)
destinationObject = _messages.StringField(3, required=True)
destinationPredefinedAcl = _messages.EnumField('DestinationPredefinedAclValueValuesEnum', 4)
ifGenerationMatch = _messages.IntegerField(5)
ifMetagenerationMatch = _messages.IntegerField(6)
class StorageObjectsCopyRequest(_messages.Message):
"""A StorageObjectsCopyRequest object.
Enums:
DestinationPredefinedAclValueValuesEnum: Apply a predefined set of access
controls to the destination object.
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl,
unless the object resource specifies the acl property, when it defaults
to full.
Fields:
destinationBucket: Name of the bucket in which to store the new object.
Overrides the provided object metadata's bucket value, if any.For
information about how to URL encode object names to be path safe, see
Encoding URI Path Parts.
destinationObject: Name of the new object. Required when the object
metadata is not otherwise provided. Overrides the object metadata's name
value, if any.
destinationPredefinedAcl: Apply a predefined set of access controls to the
destination object.
ifGenerationMatch: Makes the operation conditional on whether the
destination object's current generation matches the given value.
ifGenerationNotMatch: Makes the operation conditional on whether the
destination object's current generation does not match the given value.
ifMetagenerationMatch: Makes the operation conditional on whether the
destination object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
destination object's current metageneration does not match the given
value.
ifSourceGenerationMatch: Makes the operation conditional on whether the
source object's generation matches the given value.
ifSourceGenerationNotMatch: Makes the operation conditional on whether the
source object's generation does not match the given value.
ifSourceMetagenerationMatch: Makes the operation conditional on whether
the source object's current metageneration matches the given value.
ifSourceMetagenerationNotMatch: Makes the operation conditional on whether
the source object's current metageneration does not match the given
value.
object: A Object resource to be passed as the request body.
projection: Set of properties to return. Defaults to noAcl, unless the
object resource specifies the acl property, when it defaults to full.
sourceBucket: Name of the bucket in which to find the source object.
sourceGeneration: If present, selects a specific revision of the source
object (as opposed to the latest version, the default).
sourceObject: Name of the source object. For information about how to URL
encode object names to be path safe, see Encoding URI Path Parts.
"""
class DestinationPredefinedAclValueValuesEnum(_messages.Enum):
"""Apply a predefined set of access controls to the destination object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
"""Set of properties to return. Defaults to noAcl, unless the object
resource specifies the acl property, when it defaults to full.
Values:
full: Include all properties.
noAcl: Omit the acl property.
"""
full = 0
noAcl = 1
destinationBucket = _messages.StringField(1, required=True)
destinationObject = _messages.StringField(2, required=True)
destinationPredefinedAcl = _messages.EnumField('DestinationPredefinedAclValueValuesEnum', 3)
ifGenerationMatch = _messages.IntegerField(4)
ifGenerationNotMatch = _messages.IntegerField(5)
ifMetagenerationMatch = _messages.IntegerField(6)
ifMetagenerationNotMatch = _messages.IntegerField(7)
ifSourceGenerationMatch = _messages.IntegerField(8)
ifSourceGenerationNotMatch = _messages.IntegerField(9)
ifSourceMetagenerationMatch = _messages.IntegerField(10)
ifSourceMetagenerationNotMatch = _messages.IntegerField(11)
object = _messages.MessageField('Object', 12)
projection = _messages.EnumField('ProjectionValueValuesEnum', 13)
sourceBucket = _messages.StringField(14, required=True)
sourceGeneration = _messages.IntegerField(15)
sourceObject = _messages.StringField(16, required=True)
class StorageObjectsDeleteRequest(_messages.Message):
"""A StorageObjectsDeleteRequest object.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, permanently deletes a specific revision of this
object (as opposed to the latest version, the default).
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value.
ifGenerationNotMatch: Makes the operation conditional on whether the
object's current generation does not match the given value.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
object's current metageneration does not match the given value.
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
"""
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
ifGenerationMatch = _messages.IntegerField(3)
ifGenerationNotMatch = _messages.IntegerField(4)
ifMetagenerationMatch = _messages.IntegerField(5)
ifMetagenerationNotMatch = _messages.IntegerField(6)
object = _messages.StringField(7, required=True)
class StorageObjectsDeleteResponse(_messages.Message):
"""An empty StorageObjectsDelete response."""
class StorageObjectsGetRequest(_messages.Message):
"""A StorageObjectsGetRequest object.
Enums:
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
ifGenerationMatch: Makes the operation conditional on whether the object's
generation matches the given value.
ifGenerationNotMatch: Makes the operation conditional on whether the
object's generation does not match the given value.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
object's current metageneration does not match the given value.
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
projection: Set of properties to return. Defaults to noAcl.
"""
class ProjectionValueValuesEnum(_messages.Enum):
"""Set of properties to return. Defaults to noAcl.
Values:
full: Include all properties.
noAcl: Omit the acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
ifGenerationMatch = _messages.IntegerField(3)
ifGenerationNotMatch = _messages.IntegerField(4)
ifMetagenerationMatch = _messages.IntegerField(5)
ifMetagenerationNotMatch = _messages.IntegerField(6)
object = _messages.StringField(7, required=True)
projection = _messages.EnumField('ProjectionValueValuesEnum', 8)
class StorageObjectsInsertRequest(_messages.Message):
"""A StorageObjectsInsertRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this object.
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl,
unless the object resource specifies the acl property, when it defaults
to full.
Fields:
bucket: Name of the bucket in which to store the new object. Overrides the
provided object metadata's bucket value, if any.
contentEncoding: If set, sets the contentEncoding property of the final
object to this value. Setting this parameter is equivalent to setting
the contentEncoding metadata property. This can be useful when uploading
an object with uploadType=media to indicate the encoding of the content
being uploaded.
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value.
ifGenerationNotMatch: Makes the operation conditional on whether the
object's current generation does not match the given value.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
object's current metageneration does not match the given value.
name: Name of the object. Required when the object metadata is not
otherwise provided. Overrides the object metadata's name value, if any.
For information about how to URL encode object names to be path safe,
see Encoding URI Path Parts.
object: A Object resource to be passed as the request body.
predefinedAcl: Apply a predefined set of access controls to this object.
projection: Set of properties to return. Defaults to noAcl, unless the
object resource specifies the acl property, when it defaults to full.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
"""Apply a predefined set of access controls to this object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
"""Set of properties to return. Defaults to noAcl, unless the object
resource specifies the acl property, when it defaults to full.
Values:
full: Include all properties.
noAcl: Omit the acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
contentEncoding = _messages.StringField(2)
ifGenerationMatch = _messages.IntegerField(3)
ifGenerationNotMatch = _messages.IntegerField(4)
ifMetagenerationMatch = _messages.IntegerField(5)
ifMetagenerationNotMatch = _messages.IntegerField(6)
name = _messages.StringField(7)
object = _messages.MessageField('Object', 8)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 9)
projection = _messages.EnumField('ProjectionValueValuesEnum', 10)
class StorageObjectsListRequest(_messages.Message):
"""A StorageObjectsListRequest object.
Enums:
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl.
Fields:
bucket: Name of the bucket in which to look for objects.
delimiter: Returns results in a directory-like mode. items will contain
only objects whose names, aside from the prefix, do not contain
delimiter. Objects whose names, aside from the prefix, contain delimiter
will have their name, truncated after the delimiter, returned in
prefixes. Duplicate prefixes are omitted.
maxResults: Maximum number of items plus prefixes to return. As duplicate
prefixes are omitted, fewer total results may be returned than
requested. The default value of this parameter is 1,000 items.
pageToken: A previously-returned page token representing part of the
larger set of results to view.
prefix: Filter results to objects whose names begin with this prefix.
projection: Set of properties to return. Defaults to noAcl.
versions: If true, lists all versions of an object as distinct results.
The default is false. For more information, see Object Versioning.
"""
class ProjectionValueValuesEnum(_messages.Enum):
"""Set of properties to return. Defaults to noAcl.
Values:
full: Include all properties.
noAcl: Omit the acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
delimiter = _messages.StringField(2)
maxResults = _messages.IntegerField(3, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(4)
prefix = _messages.StringField(5)
projection = _messages.EnumField('ProjectionValueValuesEnum', 6)
versions = _messages.BooleanField(7)
class StorageObjectsPatchRequest(_messages.Message):
"""A StorageObjectsPatchRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this object.
ProjectionValueValuesEnum: Set of properties to return. Defaults to full.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value.
ifGenerationNotMatch: Makes the operation conditional on whether the
object's current generation does not match the given value.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
object's current metageneration does not match the given value.
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
objectResource: A Object resource to be passed as the request body.
predefinedAcl: Apply a predefined set of access controls to this object.
projection: Set of properties to return. Defaults to full.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
"""Apply a predefined set of access controls to this object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
"""Set of properties to return. Defaults to full.
Values:
full: Include all properties.
noAcl: Omit the acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
ifGenerationMatch = _messages.IntegerField(3)
ifGenerationNotMatch = _messages.IntegerField(4)
ifMetagenerationMatch = _messages.IntegerField(5)
ifMetagenerationNotMatch = _messages.IntegerField(6)
object = _messages.StringField(7, required=True)
objectResource = _messages.MessageField('Object', 8)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 9)
projection = _messages.EnumField('ProjectionValueValuesEnum', 10)
class StorageObjectsRewriteRequest(_messages.Message):
"""A StorageObjectsRewriteRequest object.
Enums:
DestinationPredefinedAclValueValuesEnum: Apply a predefined set of access
controls to the destination object.
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl,
unless the object resource specifies the acl property, when it defaults
to full.
Fields:
destinationBucket: Name of the bucket in which to store the new object.
Overrides the provided object metadata's bucket value, if any.
destinationObject: Name of the new object. Required when the object
metadata is not otherwise provided. Overrides the object metadata's name
value, if any. For information about how to URL encode object names to
be path safe, see Encoding URI Path Parts.
destinationPredefinedAcl: Apply a predefined set of access controls to the
destination object.
ifGenerationMatch: Makes the operation conditional on whether the
destination object's current generation matches the given value.
ifGenerationNotMatch: Makes the operation conditional on whether the
destination object's current generation does not match the given value.
ifMetagenerationMatch: Makes the operation conditional on whether the
destination object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
destination object's current metageneration does not match the given
value.
ifSourceGenerationMatch: Makes the operation conditional on whether the
source object's generation matches the given value.
ifSourceGenerationNotMatch: Makes the operation conditional on whether the
source object's generation does not match the given value.
ifSourceMetagenerationMatch: Makes the operation conditional on whether
the source object's current metageneration matches the given value.
ifSourceMetagenerationNotMatch: Makes the operation conditional on whether
the source object's current metageneration does not match the given
value.
maxBytesRewrittenPerCall: The maximum number of bytes that will be
rewritten per rewrite request. Most callers shouldn't need to specify
this parameter - it is primarily in place to support testing. If
specified the value must be an integral multiple of 1 MiB (1048576).
Also, this only applies to requests where the source and destination
span locations and/or storage classes. Finally, this value must not
change across rewrite calls else you'll get an error that the
rewriteToken is invalid.
object: A Object resource to be passed as the request body.
projection: Set of properties to return. Defaults to noAcl, unless the
object resource specifies the acl property, when it defaults to full.
rewriteToken: Include this field (from the previous rewrite response) on
each rewrite request after the first one, until the rewrite response
'done' flag is true. Calls that provide a rewriteToken can omit all
other request fields, but if included those fields must match the values
provided in the first rewrite request.
sourceBucket: Name of the bucket in which to find the source object.
sourceGeneration: If present, selects a specific revision of the source
object (as opposed to the latest version, the default).
sourceObject: Name of the source object. For information about how to URL
encode object names to be path safe, see Encoding URI Path Parts.
"""
class DestinationPredefinedAclValueValuesEnum(_messages.Enum):
"""Apply a predefined set of access controls to the destination object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
"""Set of properties to return. Defaults to noAcl, unless the object
resource specifies the acl property, when it defaults to full.
Values:
full: Include all properties.
noAcl: Omit the acl property.
"""
full = 0
noAcl = 1
destinationBucket = _messages.StringField(1, required=True)
destinationObject = _messages.StringField(2, required=True)
destinationPredefinedAcl = _messages.EnumField('DestinationPredefinedAclValueValuesEnum', 3)
ifGenerationMatch = _messages.IntegerField(4)
ifGenerationNotMatch = _messages.IntegerField(5)
ifMetagenerationMatch = _messages.IntegerField(6)
ifMetagenerationNotMatch = _messages.IntegerField(7)
ifSourceGenerationMatch = _messages.IntegerField(8)
ifSourceGenerationNotMatch = _messages.IntegerField(9)
ifSourceMetagenerationMatch = _messages.IntegerField(10)
ifSourceMetagenerationNotMatch = _messages.IntegerField(11)
maxBytesRewrittenPerCall = _messages.IntegerField(12)
object = _messages.MessageField('Object', 13)
projection = _messages.EnumField('ProjectionValueValuesEnum', 14)
rewriteToken = _messages.StringField(15)
sourceBucket = _messages.StringField(16, required=True)
sourceGeneration = _messages.IntegerField(17)
sourceObject = _messages.StringField(18, required=True)
class StorageObjectsUpdateRequest(_messages.Message):
"""A StorageObjectsUpdateRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this object.
ProjectionValueValuesEnum: Set of properties to return. Defaults to full.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value.
ifGenerationNotMatch: Makes the operation conditional on whether the
object's current generation does not match the given value.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
object's current metageneration does not match the given value.
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
objectResource: A Object resource to be passed as the request body.
predefinedAcl: Apply a predefined set of access controls to this object.
projection: Set of properties to return. Defaults to full.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
"""Apply a predefined set of access controls to this object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
"""Set of properties to return. Defaults to full.
Values:
full: Include all properties.
noAcl: Omit the acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
ifGenerationMatch = _messages.IntegerField(3)
ifGenerationNotMatch = _messages.IntegerField(4)
ifMetagenerationMatch = _messages.IntegerField(5)
ifMetagenerationNotMatch = _messages.IntegerField(6)
object = _messages.StringField(7, required=True)
objectResource = _messages.MessageField('Object', 8)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 9)
projection = _messages.EnumField('ProjectionValueValuesEnum', 10)
class StorageObjectsWatchAllRequest(_messages.Message):
"""A StorageObjectsWatchAllRequest object.
Enums:
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl.
Fields:
bucket: Name of the bucket in which to look for objects.
channel: A Channel resource to be passed as the request body.
delimiter: Returns results in a directory-like mode. items will contain
only objects whose names, aside from the prefix, do not contain
delimiter. Objects whose names, aside from the prefix, contain delimiter
will have their name, truncated after the delimiter, returned in
prefixes. Duplicate prefixes are omitted.
maxResults: Maximum number of items plus prefixes to return. As duplicate
prefixes are omitted, fewer total results may be returned than
requested. The default value of this parameter is 1,000 items.
pageToken: A previously-returned page token representing part of the
larger set of results to view.
prefix: Filter results to objects whose names begin with this prefix.
projection: Set of properties to return. Defaults to noAcl.
versions: If true, lists all versions of an object as distinct results.
The default is false. For more information, see Object Versioning.
"""
class ProjectionValueValuesEnum(_messages.Enum):
"""Set of properties to return. Defaults to noAcl.
Values:
full: Include all properties.
noAcl: Omit the acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
channel = _messages.MessageField('Channel', 2)
delimiter = _messages.StringField(3)
maxResults = _messages.IntegerField(4, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(5)
prefix = _messages.StringField(6)
projection = _messages.EnumField('ProjectionValueValuesEnum', 7)
versions = _messages.BooleanField(8)
| staslev/beam | sdks/python/apache_beam/io/gcp/internal/clients/storage/storage_v1_messages.py | Python | apache-2.0 | 78,320 | 0.004941 |
from django.conf import settings
from rest_framework import serializers
from rest_framework.decorators import api_view
from rest_framework.response import Response
from kitsune.products.models import Product
from kitsune.questions.models import Question, QuestionMappingType
from kitsune.questions.api import QuestionSerializer
from kitsune.search import es_utils
from kitsune.sumo.api_utils import GenericAPIException
from kitsune.wiki.api import DocumentDetailSerializer
from kitsune.wiki.models import Document, DocumentMappingType
def positive_integer(value):
if value < 0:
raise serializers.ValidationError('This field must be positive.')
def valid_product(value):
if not value:
return
if not Product.objects.filter(slug=value).exists():
raise serializers.ValidationError(
'Could not find product with slug "{0}".'.format(value)
)
def valid_locale(value):
if not value:
return
if value not in settings.SUMO_LANGUAGES:
if value in settings.NON_SUPPORTED_LOCALES:
fallback = settings.NON_SUPPORTED_LOCALES[value] or settings.WIKI_DEFAULT_LANGUAGE
raise serializers.ValidationError(
'"{0}" is not supported, but has fallback locale "{1}".'.format(
value, fallback))
else:
raise serializers.ValidationError(
'Could not find locale "{0}".'.format(value)
)
class SuggestSerializer(serializers.Serializer):
q = serializers.CharField(required=True)
locale = serializers.CharField(
required=False, default=settings.WIKI_DEFAULT_LANGUAGE,
validators=[valid_locale])
product = serializers.CharField(
required=False, default='',
validators=[valid_product])
max_questions = serializers.IntegerField(
required=False, default=10,
validators=[positive_integer])
max_documents = serializers.IntegerField(
required=False, default=10,
validators=[positive_integer])
@api_view(['GET', 'POST'])
def suggest(request):
if request.data and request.GET:
raise GenericAPIException(
400, 'Put all parameters either in the querystring or the HTTP request body.')
serializer = SuggestSerializer(data=(request.data or request.GET))
if not serializer.is_valid():
raise GenericAPIException(400, serializer.errors)
searcher = (
es_utils.AnalyzerS()
.es(urls=settings.ES_URLS)
.indexes(es_utils.read_index('default')))
data = serializer.validated_data
return Response({
'questions': _question_suggestions(
searcher, data['q'], data['locale'], data['product'], data['max_questions']),
'documents': _document_suggestions(
searcher, data['q'], data['locale'], data['product'], data['max_documents']),
})
def _question_suggestions(searcher, text, locale, product, max_results):
if max_results <= 0:
return []
search_filter = es_utils.F(
model='questions_question',
question_is_archived=False,
question_is_locked=False,
question_is_solved=True)
if product:
search_filter &= es_utils.F(product=product)
if locale:
search_filter &= es_utils.F(question_locale=locale)
questions = []
searcher = _query(searcher, QuestionMappingType, search_filter, text, locale)
question_ids = [result['id'] for result in searcher[:max_results]]
questions = [
QuestionSerializer(instance=q).data
for q in Question.objects.filter(id__in=question_ids)
]
return questions
def _document_suggestions(searcher, text, locale, product, max_results):
if max_results <= 0:
return []
search_filter = es_utils.F(
model='wiki_document',
document_category__in=settings.SEARCH_DEFAULT_CATEGORIES,
document_locale=locale,
document_is_archived=False)
if product:
search_filter &= es_utils.F(product=product)
documents = []
searcher = _query(searcher, DocumentMappingType, search_filter, text, locale)
doc_ids = [result['id'] for result in searcher[:max_results]]
documents = [
DocumentDetailSerializer(instance=doc).data
for doc in Document.objects.filter(id__in=doc_ids)
]
return documents
def _query(searcher, mapping_type, search_filter, query_text, locale):
query_fields = mapping_type.get_query_fields()
query = {}
for field in query_fields:
for query_type in ['match', 'match_phrase']:
key = '{0}__{1}'.format(field, query_type)
query[key] = query_text
# Transform query to be locale aware.
query = es_utils.es_query_with_analyzer(query, locale)
return (searcher
.doctypes(mapping_type.get_mapping_type_name())
.filter(search_filter)
.query(should=True, **query))
| Osmose/kitsune | kitsune/search/api.py | Python | bsd-3-clause | 4,930 | 0.00142 |
from csacompendium.csa_practice.models import PracticeLevel
from csacompendium.utils.pagination import APILimitOffsetPagination
from csacompendium.utils.permissions import IsOwnerOrReadOnly
from csacompendium.utils.viewsutils import DetailViewUpdateDelete, CreateAPIViewHook
from rest_framework.filters import DjangoFilterBackend
from rest_framework.generics import CreateAPIView, ListAPIView
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from .filters import PracticeLevelListFilter
from csacompendium.csa_practice.api.practicelevel.practicelevelserializers import practice_level_serializers
def practice_level_views():
"""
Practice level views
:return: All practice level views
:rtype: Object
"""
practice_level_serializer = practice_level_serializers()
class PracticeLevelCreateAPIView(CreateAPIViewHook):
"""
Creates a single record.
"""
queryset = PracticeLevel.objects.all()
serializer_class = practice_level_serializer['PracticeLevelDetailSerializer']
permission_classes = [IsAuthenticated]
class PracticeLevelListAPIView(ListAPIView):
"""
API list view. Gets all records API.
"""
queryset = PracticeLevel.objects.all()
serializer_class = practice_level_serializer['PracticeLevelListSerializer']
filter_backends = (DjangoFilterBackend,)
filter_class = PracticeLevelListFilter
pagination_class = APILimitOffsetPagination
class PracticeLevelDetailAPIView(DetailViewUpdateDelete):
"""
Updates a record.
"""
queryset = PracticeLevel.objects.all()
serializer_class = practice_level_serializer['PracticeLevelDetailSerializer']
permission_classes = [IsAuthenticated, IsAdminUser]
lookup_field = 'slug'
return {
'PracticeLevelListAPIView': PracticeLevelListAPIView,
'PracticeLevelDetailAPIView': PracticeLevelDetailAPIView,
'PracticeLevelCreateAPIView': PracticeLevelCreateAPIView
}
| nkoech/csacompendium | csacompendium/csa_practice/api/practicelevel/practicelevelviews.py | Python | mit | 2,046 | 0.002444 |
from scipy.cluster.hierarchy import dendrogram
import matplotlib.pyplot as plt
def augmented_dendrogram(*args, **kwargs):
ddata = dendrogram(*args, **kwargs)
if not kwargs.get('no_plot', False):
for i, d in zip(ddata['icoord'], ddata['dcoord']):
x = 0.5 * sum(i[1:3])
y = d[1]
#plt.plot(x, y, 'ro')
plt.annotate("%.3g" % y, (x, y), xytext=(0, -8),
textcoords='offset points',
va='top', ha='center')
return ddata
| toonn/capselai | clustering/augmented_dendrogram.py | Python | bsd-2-clause | 540 | 0.007407 |
__author__ = 'dako'
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_css_selector('input[type="submit"]').click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout")) > 0
def is_logged_in_as(self, username):
wd = self.app.wd
return self.get_logged_user() == username
def get_logged_user(self):
wd = self.app.wd
return wd.find_element_by_xpath("//div/div[1]/form/b").text[1:-1]
def ensure_logout(self):
wd = self.app.wd
if self.is_logged_in():
self.logout()
def ensure_login(self, username, password):
wd = self.app.wd
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username, password) | EvilDako/PyTraining | fixture/session.py | Python | gpl-2.0 | 1,421 | 0.003519 |
from sqlalchemy import Column, String, Integer, ForeignKey, SmallInteger, TIMESTAMP, BigInteger, ForeignKeyConstraint, \
Boolean, func, select, Float, distinct
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
import structlog
log =structlog.get_logger()
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
tmp=''
tab_portals = tmp+'portals'
tab_portalevolution=tmp+'portalevolution'
tab_portalsnapshot=tmp+'portalsnapshot'
tab_portalsnapshotquality=tmp+'portalsnapshotquality'
tab_portalsnapshotdynamicity=tmp+'portalsnapshotdyn'
tab_portalsnapshotfetch=tmp+'portalsnapshotfetch'
tab_formatdist= tmp+"formatdist"
tab_licensedist= tmp+"licensedist"
tab_isodist= tmp+"licensedist"
tab_datasets=tmp+'datasets'
tab_datasetsquality=tmp+'datasetsquality'
tab_datasetsdata=tmp+'datasetsdata'
tab_resources=tmp+'metaresources'
tab_resourcesinfo=tmp+'resourcesinfo'
tab_resourcescrawllog=tmp+'resourcescrawllog'
tab_organisations=tmp+'organisations'
tab_organisationssnapshot=tmp+'organisationsnapshot'
tab_resourceshistory=tmp+'resourceshistory'
tab_resourcesfreshness=tmp+'resourcesfreshness'
class Portal(Base):
__tablename__ = tab_portals
id = Column(String, primary_key=True, index=True,nullable=False)
uri = Column(String, nullable=False)
apiuri = Column(String)
software = Column(String(12), nullable=False) # OpenDataSoft, CKAN, Socrata <13
iso = Column(String(2), nullable=False)
active = Column(Boolean, default=True,nullable=False)
snapshots = relationship("PortalSnapshot", back_populates="portal")
snapshotsquality = relationship("PortalSnapshotQuality", back_populates="portal")
@hybrid_property
def snapshot_count(self):
print len(self.snapshots)
return len(self.snapshots)
@snapshot_count.expression
def snapshot_count(cls):
return select([func.count(PortalSnapshot.snapshot)])\
.where(PortalSnapshot.portalid == cls.id).label("snapshot_count")
@hybrid_property
def first_snapshot(self):
print [s for s in self.snapshots]
return min([s.snapshot for s in self.snapshots])
@first_snapshot.expression
def first_snapshot(cls):
return select([func.min(PortalSnapshot.snapshot)])\
.where(PortalSnapshot.portalid == cls.id).label("first_snapshot")
@hybrid_property
def last_snapshot(self):
return max([s.snapshot for s in self.snapshots])
@last_snapshot.expression
def last_snapshot(cls):
return select([func.max(PortalSnapshot.snapshot)])\
.where(PortalSnapshot.portalid == cls.id).label("last_snapshot")
@hybrid_property
def datasetcount(self):
return self.snapshots.order_by(PortalSnapshot.snapshot.desc()).one().datasetcount
@datasetcount.expression
def datasetcount(cls):
q=select([PortalSnapshot.datasetcount])\
.where(PortalSnapshot.portalid == cls.id).order_by(PortalSnapshot.snapshot.desc()).limit(1).label("datasetcount")
return q
@hybrid_property
def resourcecount(self):
return self.snapshots.order_by(PortalSnapshot.snapshot.desc()).one().resourcecount
@resourcecount.expression
def resourcecount(cls):
q=select([PortalSnapshot.resourcecount])\
.where(PortalSnapshot.portalid == cls.id).order_by(PortalSnapshot.snapshot.desc()).limit(1).label("resourcecount")
return q
def __repr__(self):
return "<Portal(id=%s, uri='%s', apiuri='%s', software='%s', iso=%s)>" % (
self.id, self.uri, self.apiuri, self.software, self.iso)
class PortalSnapshot(Base):
__tablename__ = tab_portalsnapshot
portalid = Column(String, ForeignKey(tab_portals+'.id'), primary_key=True, index=True,nullable=False)
snapshot= Column( SmallInteger, primary_key=True)
portal = relationship("Portal", back_populates="snapshots")
start = Column(TIMESTAMP)
end = Column(TIMESTAMP)
status = Column(SmallInteger)
exc = Column(String)
datasetcount = Column(Integer)
datasetsfetched = Column(Integer)
resourcecount = Column(Integer)
@hybrid_property
def fetchtime(self):
return self.end-self.start
datasets = relationship("Dataset", back_populates="portalsnapshot")
def __repr__(self):
return "<PortalSnapshot(id=%s, snapshot=%s, start=%s, end=%s, status=%s,ds=%s,res=%s)>" % (
self.portalid, self.snapshot, self.start, self.end, self.status,self.datasetcount,self.resourcecount)
class Serializable(object):
__public__ = []
def to_dict(self):
d = {}
for field in self.__public__:
value = getattr(self, field)
if value:
d[field] = value
return d
class PortalSnapshotDynamicity(Base,Serializable):
__tablename__ = tab_portalsnapshotdynamicity
portalid = Column(String, ForeignKey(tab_portals+'.id'), primary_key=True, index=True,nullable=False)
snapshot= Column( SmallInteger, primary_key=True)
updated = Column(Integer)
added = Column(Integer)
deleted = Column(Integer)
static = Column(Integer)
intersected = Column(Integer)
dindex = Column(Integer)
changefrequ = Column(Float)
size = Column(Integer)
@hybrid_property
def dyratio(self):
return (self.added+self.deleted+self.updated)\
/(1.0* self.intersected) if self.intersected >0 else 0
@hybrid_property
def adddelratio(self):
return (self.added-self.deleted)\
/(1.0* (self.added+self.deleted))if ((self.added+self.deleted)) >0 else 0
@hybrid_property
def addRatio(self):
return (self.added) \
/ (1.0 * self.intersected)if self.intersected >0 else 0
@hybrid_property
def delRatio(self):
return (self.deleted) \
/ (1.0 * self.intersected)if self.intersected >0 else 0
@hybrid_property
def updatedRatio(self):
return ( self.updated) \
/ (1.0 * self.intersected)if self.intersected >0 else 0
@hybrid_property
def staticRatio(self):
return (self.static) \
/ (1.0 * self.intersected)if self.intersected >0 else 0
class PortalSnapshotQuality(Base):
__tablename__ = tab_portalsnapshotquality
portalid = Column(String, ForeignKey(tab_portals+'.id'), primary_key=True, index=True,nullable=False)
snapshot= Column( SmallInteger, primary_key=True)
portal = relationship("Portal", back_populates="snapshotsquality")
cocu = Column(Float)
cocuN = Column(Integer)
coce = Column(Float)
coceN = Column(Integer)
coda = Column(Float)
codaN = Column(Integer)
cofo = Column(Float)
cofoN = Column(Integer)
coli = Column(Float)
coliN = Column(Integer)
coac = Column(Float)
coacN = Column(Integer)
exda = Column(Float)
exdaN = Column(Integer)
exri = Column(Float)
exriN = Column(Integer)
expr = Column(Float)
exprN = Column(Integer)
exac = Column(Float)
exacN = Column(Integer)
exdi = Column(Float)
exdiN = Column(Integer)
exte = Column(Float)
exteN = Column(Integer)
exsp = Column(Float)
exspN = Column(Integer)
exco = Column(Float)
excoN = Column(Integer)
opfo = Column(Float)
opfoN = Column(Integer)
opma = Column(Float)
opmaN = Column(Integer)
opli = Column(Float)
opliN = Column(Integer)
datasets=Column(Integer)
def __repr__(self):
return "<PortalSnapshotQuality(id=%s, snapshot=%s, agg=%s)>" % (
self.portalid, self.snapshot, any([self.exda,self.coac,self.coce,self.cocu]))
class Dataset(Base):
__tablename__ = tab_datasets
id = Column( String, primary_key=True)
snapshot = Column( SmallInteger, primary_key=True, index=True)
portalid = Column( String, primary_key=True, index=True)
organisation = Column(String, index=True)
title = Column(String, index=True)
md5 = Column(String, ForeignKey(tab_datasetsdata+'.md5'), index=True)
__table_args__ = (ForeignKeyConstraint([portalid, snapshot],
[tab_portalsnapshot+'.portalid',tab_portalsnapshot+'.snapshot']),
{})
portalsnapshot = relationship("PortalSnapshot", back_populates="datasets")
data = relationship("DatasetData", back_populates="dataset")
def __repr__(self):
return "<Dataset(id=%s, portalid='%s', snapshot=%s, md5=%s)>" % (
self.id, self.portalid, self.snapshot, self.md5)
class DatasetData(Base):
__tablename__ = tab_datasetsdata
md5 = Column(String, primary_key=True, index=True, nullable=False)
raw = Column(JSONB)
dataset = relationship("Dataset", back_populates="data")
resources = relationship("MetaResource", back_populates="dataset")
modified = Column(TIMESTAMP)
created = Column(TIMESTAMP)
organisation = Column(String, index=True)
license = Column(String, index=True)
def __repr__(self):
return "<DatasetData(md5=%s, data=%s)>" % (
self.md5, self.raw is not None)
class DatasetQuality(Base):
__tablename__ = tab_datasetsquality
md5 = Column(String, ForeignKey(DatasetData.md5), primary_key=True, index=True)
cocu = Column(Boolean)
coce = Column(Boolean)
coda = Column(Float)
cofo = Column(Float)
coli = Column(Boolean)
coac = Column(Boolean)
exda = Column(Float)
exri = Column(Float)
expr = Column(Float)
exac = Column(Boolean)
exdi = Column(Float)
exte = Column(Float)
exsp = Column(Float)
exco = Column(Boolean)
opfo = Column(Float)
opma = Column(Float)
opli = Column(Boolean)
data = relationship("DatasetData", backref=backref("quality", uselist=False))
def __repr__(self):
return "<DatasetQuality(md5=%s, assessment=%s)>" % (
self.md5, any([self.exda,self.coac,self.coce,self.cocu]))
class MetaResource(Base):
__tablename__ = tab_resources
uri = Column(String, primary_key=True, index=True)
md5 = Column(String,ForeignKey(DatasetData.md5), primary_key=True,index=True )
valid = Column(Boolean)
format = Column(String)
media = Column(String)
size = Column(BigInteger)
created = Column(TIMESTAMP)
modified = Column(TIMESTAMP)
dataset = relationship("DatasetData", back_populates="resources")
def info(cls):
return select([ResourceInfo]).where(cls.uri == ResourceInfo.uri).where(cls.snapshot== ResourceInfo.snapshot)
def __repr__(self):
return "<Resource(uri=%s, dataset=%s)>" % (
self.uri, self.md5)
class ResourceInfo(Base):
__tablename__ = tab_resourcesinfo
uri= Column(String, primary_key=True)
snapshot= Column(SmallInteger, primary_key=True)
timestamp= Column(TIMESTAMP)
status=Column(SmallInteger)
exc=Column(String)
header=Column(JSONB)
mime=Column(String)
size=Column(BigInteger)
class ResourceCrawlLog(Base):
__tablename__ = tab_resourcescrawllog
uri= Column(String, primary_key=True)
snapshot= Column(SmallInteger, primary_key=True)
timestamp= Column(TIMESTAMP, primary_key=True)
status=Column(SmallInteger, index=True)
exc=Column(String)
header=Column(JSONB)
mime=Column(String)
size=Column(BigInteger)
crawltime=Column(BigInteger)
referrer=Column( String)
disklocation=Column( String)
digest=Column( String)
contentchanged=Column( Integer)
domain=Column( String, index=True)
class ResourceHistory(Base):
__tablename__ = tab_resourceshistory
uri = Column(String, primary_key=True)
snapshot = Column(SmallInteger, primary_key=True)
md5 = Column(String,ForeignKey(DatasetData.md5), primary_key=True)
modified = Column(TIMESTAMP)
source = Column(String, primary_key=True)
class ResourceFreshness(Base):
__tablename__ = tab_resourcesfreshness
uri = Column(String, primary_key=True)
snapshot = Column(SmallInteger, primary_key=True)
md5 = Column(String,ForeignKey(DatasetData.md5), primary_key=True)
a_cho_naive_header = Column(Float)
a_cho_impr_header = Column(Float)
a_emp_dist_header = Column(Float)
mark1_header = Column(Float)
mark2_header = Column(Float)
a_cho_naive_metadata = Column(Float)
a_cho_impr_metadata = Column(Float)
a_emp_dist_metadata = Column(Float)
mark1_metadata = Column(Float)
mark2_metadata = Column(Float)
class FormatDist(Base):
__tablename__ = tab_formatdist
format = Column(String, primary_key=True)
snapshot = Column(SmallInteger, primary_key=True)
grouping = Column(String, primary_key=True)
count = Column(Integer)
| ADEQUATeDQ/portalmonitor | odpw/core/model.py | Python | gpl-3.0 | 12,953 | 0.012738 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Stc(AutotoolsPackage):
"""STC: The Swift-Turbine Compiler"""
homepage = 'http://swift-lang.org/Swift-T'
url = 'http://swift-lang.github.io/swift-t-downloads/stc-0.7.3.tar.gz'
version('0.7.3', '6bf769f406f6c33d1c134521373718d3')
depends_on('java')
depends_on('ant')
depends_on('turbine')
depends_on('zsh', type='run')
def configure_args(self):
args = ['--with-turbine=' + self.spec['turbine'].prefix]
return args
| EmreAtes/spack | var/spack/repos/builtin/packages/stc/package.py | Python | lgpl-2.1 | 1,737 | 0.000576 |
# Testing the line trace facility.
from test import test_support
import unittest
import sys
import difflib
# A very basic example. If this fails, we're in deep trouble.
def basic():
return 1
basic.events = [(0, 'call'),
(1, 'line'),
(1, 'return')]
# Many of the tests below are tricky because they involve pass statements.
# If there is implicit control flow around a pass statement (in an except
# clause or else caluse) under what conditions do you set a line number
# following that clause?
# The entire "while 0:" statement is optimized away. No code
# exists for it, so the line numbers skip directly from "del x"
# to "x = 1".
def arigo_example():
x = 1
del x
while 0:
pass
x = 1
arigo_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(5, 'line'),
(5, 'return')]
# check that lines consisting of just one instruction get traced:
def one_instr_line():
x = 1
del x
x = 1
one_instr_line.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(3, 'return')]
def no_pop_tops(): # 0
x = 1 # 1
for a in range(2): # 2
if a: # 3
x = 1 # 4
else: # 5
x = 1 # 6
no_pop_tops.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(6, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(2, 'line'),
(2, 'return')]
def no_pop_blocks():
y = 1
while not y:
bla
x = 1
no_pop_blocks.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(4, 'line'),
(4, 'return')]
def called(): # line -3
x = 1
def call(): # line 0
called()
call.events = [(0, 'call'),
(1, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'return'),
(1, 'return')]
def raises():
raise Exception
def test_raise():
try:
raises()
except Exception, exc:
x = 1
test_raise.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(-3, 'call'),
(-2, 'line'),
(-2, 'exception'),
(-2, 'return'),
(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
def _settrace_and_return(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
def settrace_and_return(tracefunc):
_settrace_and_return(tracefunc)
settrace_and_return.events = [(1, 'return')]
def _settrace_and_raise(tracefunc):
sys.settrace(tracefunc)
sys._getframe().f_back.f_trace = tracefunc
raise RuntimeError
def settrace_and_raise(tracefunc):
try:
_settrace_and_raise(tracefunc)
except RuntimeError, exc:
pass
settrace_and_raise.events = [(2, 'exception'),
(3, 'line'),
(4, 'line'),
(4, 'return')]
# implicit return example
# This test is interesting because of the else: pass
# part of the code. The code generate for the true
# part of the if contains a jump past the else branch.
# The compiler then generates an implicit "return None"
# Internally, the compiler visits the pass statement
# and stores its line number for use on the next instruction.
# The next instruction is the implicit return None.
def ireturn_example():
a = 5
b = 5
if a == b:
b = a+1
else:
pass
ireturn_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(6, 'line'),
(6, 'return')]
# Tight loop with while(1) example (SF #765624)
def tightloop_example():
items = range(0, 3)
try:
i = 0
while 1:
b = items[i]; i+=1
except IndexError:
pass
tightloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'line'),
(5, 'exception'),
(6, 'line'),
(7, 'line'),
(7, 'return')]
def tighterloop_example():
items = range(1, 4)
try:
i = 0
while 1: i = items[i]
except IndexError:
pass
tighterloop_example.events = [(0, 'call'),
(1, 'line'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'line'),
(4, 'exception'),
(5, 'line'),
(6, 'line'),
(6, 'return')]
def generator_function():
try:
yield True
"continued"
finally:
"finally"
def generator_example():
# any() will leave the generator before its end
x = any(generator_function())
# the following lines were not traced
for x in range(10):
y = x
generator_example.events = ([(0, 'call'),
(2, 'line'),
(-6, 'call'),
(-5, 'line'),
(-4, 'line'),
(-4, 'return'),
(-4, 'call'),
(-4, 'exception'),
(-1, 'line'),
(-1, 'return')] +
[(5, 'line'), (6, 'line')] * 10 +
[(5, 'line'), (5, 'return')])
class Tracer:
def __init__(self):
self.events = []
def trace(self, frame, event, arg):
self.events.append((frame.f_lineno, event))
return self.trace
def traceWithGenexp(self, frame, event, arg):
(o for o in [1])
self.events.append((frame.f_lineno, event))
return self.trace
class TraceTestCase(unittest.TestCase):
def compare_events(self, line_offset, events, expected_events):
events = [(l - line_offset, e) for (l, e) in events]
if events != expected_events:
self.fail(
"events did not match expectation:\n" +
"\n".join(difflib.ndiff([str(x) for x in expected_events],
[str(x) for x in events])))
def run_and_compare(self, func, events):
tracer = Tracer()
sys.settrace(tracer.trace)
func()
sys.settrace(None)
self.compare_events(func.func_code.co_firstlineno,
tracer.events, events)
def run_test(self, func):
self.run_and_compare(func, func.events)
def run_test2(self, func):
tracer = Tracer()
func(tracer.trace)
sys.settrace(None)
self.compare_events(func.func_code.co_firstlineno,
tracer.events, func.events)
def test_01_basic(self):
self.run_test(basic)
def test_02_arigo(self):
self.run_test(arigo_example)
def test_03_one_instr(self):
self.run_test(one_instr_line)
def test_04_no_pop_blocks(self):
self.run_test(no_pop_blocks)
def test_05_no_pop_tops(self):
self.run_test(no_pop_tops)
def test_06_call(self):
self.run_test(call)
def test_07_raise(self):
self.run_test(test_raise)
def test_08_settrace_and_return(self):
self.run_test2(settrace_and_return)
def test_09_settrace_and_raise(self):
self.run_test2(settrace_and_raise)
def test_10_ireturn(self):
self.run_test(ireturn_example)
def test_11_tightloop(self):
self.run_test(tightloop_example)
def test_12_tighterloop(self):
self.run_test(tighterloop_example)
def test_13_genexp(self):
self.run_test(generator_example)
# issue1265: if the trace function contains a generator,
# and if the traced function contains another generator
# that is not completely exhausted, the trace stopped.
# Worse: the 'finally' clause was not invoked.
tracer = Tracer()
sys.settrace(tracer.traceWithGenexp)
generator_example()
sys.settrace(None)
self.compare_events(generator_example.func_code.co_firstlineno,
tracer.events, generator_example.events)
def test_14_onliner_if(self):
def onliners():
if True: False
else: True
return 0
self.run_and_compare(
onliners,
[(0, 'call'),
(1, 'line'),
(3, 'line'),
(3, 'return')])
def test_15_loops(self):
# issue1750076: "while" expression is skipped by debugger
def for_example():
for x in range(2):
pass
self.run_and_compare(
for_example,
[(0, 'call'),
(1, 'line'),
(2, 'line'),
(1, 'line'),
(2, 'line'),
(1, 'line'),
(1, 'return')])
def while_example():
# While expression should be traced on every loop
x = 2
while x > 0:
x -= 1
self.run_and_compare(
while_example,
[(0, 'call'),
(2, 'line'),
(3, 'line'),
(4, 'line'),
(3, 'line'),
(4, 'line'),
(3, 'line'),
(3, 'return')])
def test_16_blank_lines(self):
exec("def f():\n" + "\n" * 256 + " pass")
self.run_and_compare(
f,
[(0, 'call'),
(257, 'line'),
(257, 'return')])
class RaisingTraceFuncTestCase(unittest.TestCase):
def trace(self, frame, event, arg):
"""A trace function that raises an exception in response to a
specific trace event."""
if event == self.raiseOnEvent:
raise ValueError # just something that isn't RuntimeError
else:
return self.trace
def f(self):
"""The function to trace; raises an exception if that's the case
we're testing, so that the 'exception' trace event fires."""
if self.raiseOnEvent == 'exception':
x = 0
y = 1/x
else:
return 1
def run_test_for_event(self, event):
"""Tests that an exception raised in response to the given event is
handled OK."""
self.raiseOnEvent = event
try:
for i in xrange(sys.getrecursionlimit() + 1):
sys.settrace(self.trace)
try:
self.f()
except ValueError:
pass
else:
self.fail("exception not thrown!")
except RuntimeError:
self.fail("recursion counter not reset")
# Test the handling of exceptions raised by each kind of trace event.
def test_call(self):
self.run_test_for_event('call')
def test_line(self):
self.run_test_for_event('line')
def test_return(self):
self.run_test_for_event('return')
def test_exception(self):
self.run_test_for_event('exception')
def test_trash_stack(self):
def f():
for i in range(5):
print i # line tracing will raise an exception at this line
def g(frame, why, extra):
if (why == 'line' and
frame.f_lineno == f.func_code.co_firstlineno + 2):
raise RuntimeError, "i am crashing"
return g
sys.settrace(g)
try:
f()
except RuntimeError:
# the test is really that this doesn't segfault:
import gc
gc.collect()
else:
self.fail("exception not propagated")
# 'Jump' tests: assigning to frame.f_lineno within a trace function
# moves the execution position - it's how debuggers implement a Jump
# command (aka. "Set next statement").
class JumpTracer:
"""Defines a trace function that jumps from one place to another,
with the source and destination lines of the jump being defined by
the 'jump' property of the function under test."""
def __init__(self, function):
self.function = function
self.jumpFrom = function.jump[0]
self.jumpTo = function.jump[1]
self.done = False
def trace(self, frame, event, arg):
if not self.done and frame.f_code == self.function.func_code:
firstLine = frame.f_code.co_firstlineno
if frame.f_lineno == firstLine + self.jumpFrom:
# Cope with non-integer self.jumpTo (because of
# no_jump_to_non_integers below).
try:
frame.f_lineno = firstLine + self.jumpTo
except TypeError:
frame.f_lineno = self.jumpTo
self.done = True
return self.trace
# The first set of 'jump' tests are for things that are allowed:
def jump_simple_forwards(output):
output.append(1)
output.append(2)
output.append(3)
jump_simple_forwards.jump = (1, 3)
jump_simple_forwards.output = [3]
def jump_simple_backwards(output):
output.append(1)
output.append(2)
jump_simple_backwards.jump = (2, 1)
jump_simple_backwards.output = [1, 1, 2]
def jump_out_of_block_forwards(output):
for i in 1, 2:
output.append(2)
for j in [3]: # Also tests jumping over a block
output.append(4)
output.append(5)
jump_out_of_block_forwards.jump = (3, 5)
jump_out_of_block_forwards.output = [2, 5]
def jump_out_of_block_backwards(output):
output.append(1)
for i in [1]:
output.append(3)
for j in [2]: # Also tests jumping over a block
output.append(5)
output.append(6)
output.append(7)
jump_out_of_block_backwards.jump = (6, 1)
jump_out_of_block_backwards.output = [1, 3, 5, 1, 3, 5, 6, 7]
def jump_to_codeless_line(output):
output.append(1)
# Jumping to this line should skip to the next one.
output.append(3)
jump_to_codeless_line.jump = (1, 2)
jump_to_codeless_line.output = [3]
def jump_to_same_line(output):
output.append(1)
output.append(2)
output.append(3)
jump_to_same_line.jump = (2, 2)
jump_to_same_line.output = [1, 2, 3]
# Tests jumping within a finally block, and over one.
def jump_in_nested_finally(output):
try:
output.append(2)
finally:
output.append(4)
try:
output.append(6)
finally:
output.append(8)
output.append(9)
jump_in_nested_finally.jump = (4, 9)
jump_in_nested_finally.output = [2, 9]
# The second set of 'jump' tests are for things that are not allowed:
def no_jump_too_far_forwards(output):
try:
output.append(2)
output.append(3)
except ValueError, e:
output.append('after' in str(e))
no_jump_too_far_forwards.jump = (3, 6)
no_jump_too_far_forwards.output = [2, True]
def no_jump_too_far_backwards(output):
try:
output.append(2)
output.append(3)
except ValueError, e:
output.append('before' in str(e))
no_jump_too_far_backwards.jump = (3, -1)
no_jump_too_far_backwards.output = [2, True]
# Test each kind of 'except' line.
def no_jump_to_except_1(output):
try:
output.append(2)
except:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_1.jump = (2, 3)
no_jump_to_except_1.output = [True]
def no_jump_to_except_2(output):
try:
output.append(2)
except ValueError:
e = sys.exc_info()[1]
output.append('except' in str(e))
no_jump_to_except_2.jump = (2, 3)
no_jump_to_except_2.output = [True]
def no_jump_to_except_3(output):
try:
output.append(2)
except ValueError, e:
output.append('except' in str(e))
no_jump_to_except_3.jump = (2, 3)
no_jump_to_except_3.output = [True]
def no_jump_to_except_4(output):
try:
output.append(2)
except (ValueError, RuntimeError), e:
output.append('except' in str(e))
no_jump_to_except_4.jump = (2, 3)
no_jump_to_except_4.output = [True]
def no_jump_forwards_into_block(output):
try:
output.append(2)
for i in 1, 2:
output.append(4)
except ValueError, e:
output.append('into' in str(e))
no_jump_forwards_into_block.jump = (2, 4)
no_jump_forwards_into_block.output = [True]
def no_jump_backwards_into_block(output):
try:
for i in 1, 2:
output.append(3)
output.append(4)
except ValueError, e:
output.append('into' in str(e))
no_jump_backwards_into_block.jump = (4, 3)
no_jump_backwards_into_block.output = [3, 3, True]
def no_jump_into_finally_block(output):
try:
try:
output.append(3)
x = 1
finally:
output.append(6)
except ValueError, e:
output.append('finally' in str(e))
no_jump_into_finally_block.jump = (4, 6)
no_jump_into_finally_block.output = [3, 6, True] # The 'finally' still runs
def no_jump_out_of_finally_block(output):
try:
try:
output.append(3)
finally:
output.append(5)
output.append(6)
except ValueError, e:
output.append('finally' in str(e))
no_jump_out_of_finally_block.jump = (5, 1)
no_jump_out_of_finally_block.output = [3, True]
# This verifies the line-numbers-must-be-integers rule.
def no_jump_to_non_integers(output):
try:
output.append(2)
except ValueError, e:
output.append('integer' in str(e))
no_jump_to_non_integers.jump = (2, "Spam")
no_jump_to_non_integers.output = [True]
# This verifies that you can't set f_lineno via _getframe or similar
# trickery.
def no_jump_without_trace_function():
try:
previous_frame = sys._getframe().f_back
previous_frame.f_lineno = previous_frame.f_lineno
except ValueError, e:
# This is the exception we wanted; make sure the error message
# talks about trace functions.
if 'trace' not in str(e):
raise
else:
# Something's wrong - the expected exception wasn't raised.
raise RuntimeError, "Trace-function-less jump failed to fail"
class JumpTestCase(unittest.TestCase):
def compare_jump_output(self, expected, received):
if received != expected:
self.fail( "Outputs don't match:\n" +
"Expected: " + repr(expected) + "\n" +
"Received: " + repr(received))
def run_test(self, func):
tracer = JumpTracer(func)
sys.settrace(tracer.trace)
output = []
func(output)
sys.settrace(None)
self.compare_jump_output(func.output, output)
def test_01_jump_simple_forwards(self):
self.run_test(jump_simple_forwards)
def test_02_jump_simple_backwards(self):
self.run_test(jump_simple_backwards)
def test_03_jump_out_of_block_forwards(self):
self.run_test(jump_out_of_block_forwards)
def test_04_jump_out_of_block_backwards(self):
self.run_test(jump_out_of_block_backwards)
def test_05_jump_to_codeless_line(self):
self.run_test(jump_to_codeless_line)
def test_06_jump_to_same_line(self):
self.run_test(jump_to_same_line)
def test_07_jump_in_nested_finally(self):
self.run_test(jump_in_nested_finally)
def test_08_no_jump_too_far_forwards(self):
self.run_test(no_jump_too_far_forwards)
def test_09_no_jump_too_far_backwards(self):
self.run_test(no_jump_too_far_backwards)
def test_10_no_jump_to_except_1(self):
self.run_test(no_jump_to_except_1)
def test_11_no_jump_to_except_2(self):
self.run_test(no_jump_to_except_2)
def test_12_no_jump_to_except_3(self):
self.run_test(no_jump_to_except_3)
def test_13_no_jump_to_except_4(self):
self.run_test(no_jump_to_except_4)
def test_14_no_jump_forwards_into_block(self):
self.run_test(no_jump_forwards_into_block)
def test_15_no_jump_backwards_into_block(self):
self.run_test(no_jump_backwards_into_block)
def test_16_no_jump_into_finally_block(self):
self.run_test(no_jump_into_finally_block)
def test_17_no_jump_out_of_finally_block(self):
self.run_test(no_jump_out_of_finally_block)
def test_18_no_jump_to_non_integers(self):
self.run_test(no_jump_to_non_integers)
def test_19_no_jump_without_trace_function(self):
no_jump_without_trace_function()
def test_main():
test_support.run_unittest(
TraceTestCase,
RaisingTraceFuncTestCase,
JumpTestCase
)
if __name__ == "__main__":
test_main()
| babble/babble | include/jython/Lib/test/test_trace.py | Python | apache-2.0 | 21,728 | 0.005339 |
from copy import copy
import datetime
import time
import urllib2
from nose.tools import assert_equals
from nose.plugins.skip import SkipTest
from autoscalebot import TOO_LOW, JUST_RIGHT, TOO_HIGH
from autoscalebot.conf import AutoscaleSettings
from autoscalebot.models import HerokuAutoscaler
class TestSettings(AutoscaleSettings):
pass
test_settings = TestSettings()
test_settings.HEROKU_APP_NAME = "test-app"
test_settings.HEROKU_API_KEY = "1234567"
test_settings.HEARTBEAT_INTERVAL_IN_SECONDS = 30
test_settings.HEARTBEAT_URL = 'http://www.google.com'
test_settings.MAX_RESPONSE_TIME_IN_MS = 1000
test_settings.MIN_RESPONSE_TIME_IN_MS = 400
test_settings.NUMBER_OF_FAILS_TO_SCALE_UP_AFTER = 3
test_settings.NUMBER_OF_PASSES_TO_SCALE_DOWN_AFTER = 5
test_settings.MAX_DYNOS = 3
test_settings.MIN_DYNOS = 1
test_settings.INCREMENT = 1
test_settings.NOTIFY_IF_SCALE_DIFF_EXCEEDS_THRESHOLD = None
test_settings.NOTIFY_IF_SCALE_DIFF_EXCEEDS_PERIOD_IN_MINUTES = None
test_settings.NOTIFY_IF_NEEDS_EXCEED_MAX = True
test_settings.NOTIFY_IF_NEEDS_BELOW_MIN = True
test_settings.NOTIFICATION_BACKENDS = ["autoscalebot.backends.notification.TestBackend", ]
class MockHerokuProcesses:
def __init__(self):
self.current = 0
self._processes = [1, ]
@property
def processes(self):
if not hasattr(self, "_processes"):
self._processes = [1, ]
return self._processes
def scale(self, new_num):
self._processes = [n + 1 for n in range(0, new_num)]
def __iter__(self):
return self
def next(self):
self.current += 1
if self.current > len(self.processes):
raise StopIteration
else:
return self.processes[self.current - 1]
class MockBrokenHerokuProcesses(MockHerokuProcesses):
def scale(self):
raise Exception
class MockHerokuApp:
def __init__(self, *args, **kwargs):
self.processes
@property
def processes(self):
if not hasattr(self, "_processes"):
self._processes = {'web': MockHerokuProcesses(), }
return self._processes
class MockBrokenHerokuApp(MockHerokuApp):
@property
def processes(self):
if not hasattr(self, "_processes"):
self._processes = {'web': MockBrokenHerokuProcesses(), }
return self._processes
class MockHerokuAutoscaler(HerokuAutoscaler):
def __init__(self, *args, **kwargs):
super(MockHerokuAutoscaler, self).__init__(*args, **kwargs)
self.heroku_app
@property
def heroku_app(self):
if not hasattr(self, "_heroku_app"):
self._heroku_app = MockHerokuApp()
return self._heroku_app
def out_of_band_heroku_scale(self, num_dynos):
# Ugly mock out of band scale
self.heroku_app.processes["web"]._processes = [1, 2, 3, 4]
self._num_dynos = len([i for i in self.heroku_app.processes["web"]._processes])
class MockValidResponse:
def read(self, *args, **kwargs):
return "A"
class Mock500Response:
def read(self, *args, **kwargs):
raise Exception
def mock_valid_urlopen(self, *args, **kwargs):
time.sleep(0.5)
return MockValidResponse()
def mock_invalid_urlopen(self, *args, **kwargs):
return Mock500Response()
def mock_fast_urlopen(self, *args, **kwargs):
return MockValidResponse()
def mock_slow_urlopen(self, *args, **kwargs):
time.sleep(2)
return MockValidResponse()
class TestHerokuAutoscaler:
def setUp(self):
self.test_scaler
@property
def test_scaler(self):
if not hasattr(self, "_test_scaler"):
self._test_scaler = MockHerokuAutoscaler(test_settings)
return self._test_scaler
def test_heroku_scale(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.heroku_scale(3)
assert_equals(self.test_scaler.num_dynos, 3)
self.test_scaler.heroku_scale(5)
assert_equals(self.test_scaler.num_dynos, 3)
self.test_scaler.heroku_scale(2)
assert_equals(self.test_scaler.num_dynos, 2)
def test_num_dynos(self):
self.test_scaler.heroku_scale(3)
assert_equals(len([i for i in self.test_scaler.heroku_app.processes['web']]), 3)
def test_add_to_history(self):
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(JUST_RIGHT)
assert_equals(self.test_scaler.results, [TOO_LOW, TOO_HIGH, JUST_RIGHT])
def test_add_to_history_caps_length(self):
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
assert_equals(self.test_scaler.results, [TOO_LOW, TOO_LOW, TOO_LOW, TOO_LOW, TOO_LOW])
def test_needs_scale_up_works(self):
self.test_scaler.add_to_history(TOO_LOW)
assert_equals(self.test_scaler.needs_scale_up, False)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
assert_equals(self.test_scaler.needs_scale_up, True)
def test_needs_scale_down_works(self):
self.test_scaler.add_to_history(TOO_HIGH)
assert_equals(self.test_scaler.needs_scale_down, False)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
assert_equals(self.test_scaler.needs_scale_down, True)
def test_scale_up(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.scale_up()
assert_equals(self.test_scaler.num_dynos, 2)
def test_scale_up_stops_at_limit(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
assert_equals(self.test_scaler.num_dynos, 3)
def test_scale_down(self):
self.test_scaler.scale_up()
self.test_scaler.scale_up()
assert_equals(self.test_scaler.num_dynos, 3)
self.test_scaler.scale_down()
assert_equals(self.test_scaler.num_dynos, 2)
def test_scale_down_stops_at_limit(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_down()
self.test_scaler.scale_down()
self.test_scaler.scale_down()
self.test_scaler.scale_down()
self.test_scaler.scale_down()
self.test_scaler.scale_down()
assert_equals(self.test_scaler.num_dynos, 1)
def test_do_autoscale_up_works(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 2)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 3)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 3)
def test_do_autoscale_down_works(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.scale_up()
self.test_scaler.scale_up()
assert_equals(self.test_scaler.num_dynos, 3)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
assert_equals(self.test_scaler.num_dynos, 3)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 2)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 1)
def test_max_dynos_from_time_based_settings_works(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.MAX_DYNOS = {
"0:00": 2,
"9:00": 5,
"17:00": 3
}
now_time = datetime.datetime.now()
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
early_morning = datetime.datetime(now_time.year, now_time.month, now_time.day, 1, 0)
mid_day = datetime.datetime(now_time.year, now_time.month, now_time.day, 12, 0)
evening = datetime.datetime(now_time.year, now_time.month, now_time.day, 18, 0)
morning_off_by_minutes = datetime.datetime(now_time.year, now_time.month, now_time.day, 9, 5)
morning_exact = datetime.datetime(now_time.year, now_time.month, now_time.day, 9, 0)
assert_equals(self.test_scaler.max_num_dynos(when=early_morning), 2)
assert_equals(self.test_scaler.max_num_dynos(when=mid_day), 5)
assert_equals(self.test_scaler.max_num_dynos(when=evening), 3)
assert_equals(self.test_scaler.max_num_dynos(when=morning_off_by_minutes), 5)
assert_equals(self.test_scaler.max_num_dynos(when=morning_exact), 5)
def test_min_dynos_from_time_based_settings_works(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.MIN_DYNOS = {
"0:00": 2,
"9:00": 5,
"17:00": 3
}
now_time = datetime.datetime.now()
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
early_morning = datetime.datetime(now_time.year, now_time.month, now_time.day, 1, 0)
mid_day = datetime.datetime(now_time.year, now_time.month, now_time.day, 12, 0)
evening = datetime.datetime(now_time.year, now_time.month, now_time.day, 18, 0)
morning_off_by_minutes = datetime.datetime(now_time.year, now_time.month, now_time.day, 9, 5)
morning_exact = datetime.datetime(now_time.year, now_time.month, now_time.day, 9, 0)
assert_equals(self.test_scaler.min_num_dynos(when=early_morning), 2)
assert_equals(self.test_scaler.min_num_dynos(when=mid_day), 5)
assert_equals(self.test_scaler.min_num_dynos(when=evening), 3)
assert_equals(self.test_scaler.min_num_dynos(when=morning_off_by_minutes), 5)
assert_equals(self.test_scaler.min_num_dynos(when=morning_exact), 5)
def test_custom_increments_work(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.INCREMENT = 2
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 3)
def test_if_min_is_changed_to_higher_than_current_scaling_works(self):
self.test_scaler.heroku_scale(1)
one_off_test_settings = copy(test_settings)
one_off_test_settings.MIN_DYNOS = 2
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 2)
def test_if_max_is_changed_to_lower_than_current_scaling_works(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.MAX_DYNOS = 2
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.out_of_band_heroku_scale(4)
assert_equals(self.test_scaler.num_dynos, 4)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 2)
def test_scaling_clears_the_results_queue(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 2)
assert_equals(self.test_scaler.results, [])
def test_a_mixed_stack_of_low_high_scales_to_the_min_needed_for_the_condition(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.do_autoscale()
assert_equals(self.test_scaler.num_dynos, 2)
def test_ping_and_store_for_valid_url(self):
urllib2.urlopen = mock_valid_urlopen
assert_equals(self.test_scaler.results, [])
self.test_scaler.ping_and_store()
assert_equals(self.test_scaler.results, [JUST_RIGHT])
def test_ping_and_store_for_invalid_url(self):
urllib2.urlopen = mock_invalid_urlopen
assert_equals(self.test_scaler.results, [])
self.test_scaler.ping_and_store()
assert_equals(self.test_scaler.results, [TOO_HIGH])
def test_ping_and_store_for_slow_url(self):
urllib2.urlopen = mock_slow_urlopen
assert_equals(self.test_scaler.results, [])
self.test_scaler.ping_and_store()
assert_equals(self.test_scaler.results, [TOO_HIGH])
def test_ping_and_store_for_fast_url(self):
urllib2.urlopen = mock_fast_urlopen
assert_equals(self.test_scaler.results, [])
self.test_scaler.ping_and_store()
assert_equals(self.test_scaler.results, [TOO_LOW])
def test_notify_if_scale_diff_exceeds_threshold_works(self):
assert_equals(self.test_scaler.num_dynos, 1)
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
assert_equals(self.test_scaler.num_dynos, 3)
print "Feature not written"
raise SkipTest
def test_notify_if_scale_diff_exceeds_period_in_minutes_works(self):
print "Feature not written"
raise SkipTest
def test_notify_if_needs_exceed_max_works(self):
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.backends[0].clear_messages()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.do_autoscale()
assert_equals(len(self.test_scaler.backends[0].messages), 1)
assert "max" in self.test_scaler.backends[0].messages[0]
def test_notify_if_needs_below_min_does_not_notify_on_one_dyno_works(self):
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.backends[0].clear_messages()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.do_autoscale()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
def test_notify_if_needs_below_min_works(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.MIN_DYNOS = 2
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.do_autoscale()
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.backends[0].clear_messages()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.do_autoscale()
assert_equals(len(self.test_scaler.backends[0].messages), 1)
assert "min" in self.test_scaler.backends[0].messages[0]
def test_notify_if_needs_exceed_max_disabled_works(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.NOTIFY_IF_NEEDS_EXCEED_MAX = False
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.scale_up()
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.add_to_history(TOO_HIGH)
self.test_scaler.backends[0].clear_messages()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.do_autoscale()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
def test_notify_if_needs_below_min_disabled_works(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.NOTIFY_IF_NEEDS_BELOW_MIN = False
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.add_to_history(TOO_LOW)
self.test_scaler.backends[0].clear_messages()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.do_autoscale()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
def test_notify_on_scale_fails_works(self):
self.test_scaler._heroku_app = MockBrokenHerokuApp()
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.scale_up()
assert_equals(len(self.test_scaler.backends[0].messages), 1)
assert "fail" in self.test_scaler.backends[0].messages[0]
def test_notify_on_every_scale_works(self):
assert_equals(len(self.test_scaler.backends[0].messages), 0)
self.test_scaler.scale_up()
assert_equals(len(self.test_scaler.backends[0].messages), 1)
def test_all_backends_are_called_on_notification(self):
one_off_test_settings = copy(test_settings)
one_off_test_settings.NOTIFICATION_BACKENDS = [
"autoscalebot.backends.notification.TestBackend",
"autoscalebot.backends.notification.TestBackend"
]
self._test_scaler = MockHerokuAutoscaler(one_off_test_settings)
assert_equals([len(b.messages) for b in self.test_scaler.backends], [0, 0])
self.test_scaler.scale_up()
assert_equals([len(b.messages) for b in self.test_scaler.backends], [1, 1])
# TODO: django tests
| wieden-kennedy/autoscalebot | autoscalebot/tests.py | Python | bsd-3-clause | 21,789 | 0.001056 |
import trafaret as t
from server.core.passwords import generate_password, check_password
from server.core.forms import TrafaretForm, TrafaretError
class RegistrationForm(TrafaretForm):
fields = t.Dict({
t.Key('email'): t.Email(),
t.Key('password'): t.String(max_length=255),
t.Key('confirm'): t.String(max_length=255),
t.Key('accept_tos'): t.StrBool(),
})
async def extra_validation(self):
errors = {}
if self.data['confirm'] != self.data['password']:
errors['confirm'] = 'Passwords should match.'
if await self.db.users.find_one({'email': self.data['email']}):
errors['email'] = 'User with this email is already registered.'
if errors:
raise TrafaretError(errors)
async def save(self):
data = self.data
data_to_save = {
'email': data['email'],
'password': generate_password(data['password']),
}
result = await self.db.users.insert_one(data_to_save)
data_to_save['_id'] = result.inserted_id
return data_to_save
class LoginForm(TrafaretForm):
user = None
fields = t.Dict({
t.Key('email'): t.Email(),
t.Key('password'): t.String(max_length=255),
})
async def extra_validation(self):
errors = {}
user = await self.db.users.find_one({'email': self.data['email']})
if not user:
errors['email'] = 'User not found'
else:
if not check_password(self.data['password'], user.password):
errors['password'] = 'Password is not correct'
self.user = user
if errors:
raise TrafaretError(errors)
def get_user(self):
return self.user
| vgamula/sp | server/accounts/forms.py | Python | mit | 1,763 | 0 |
# Copyright (C) 2003 CAMP
# Please see the accompanying LICENSE file for further information.
"""Main gpaw module."""
import os
import sys
try:
from distutils.util import get_platform
except ImportError:
modulepath = os.environ.get('GPAW_GET_PLATFORM')
if modulepath is None:
errmsg = ('Error: Could not get platform from distutils. '
'Set the GPAW_GET_PLATFORM environment variable to '
'the architecture string printed during build.')
raise ImportError(errmsg)
def get_platform():
return modulepath
from glob import glob
from os.path import join, isfile
import numpy as np
assert not np.version.version.startswith('1.6.0')
__all__ = ['GPAW', 'Calculator',
'Mixer', 'MixerSum', 'MixerDif', 'MixerSum2',
'CG', 'Davidson', 'RMM_DIIS', 'LCAO',
'PoissonSolver',
'FermiDirac', 'MethfesselPaxton',
'restart']
class ConvergenceError(Exception):
pass
class KohnShamConvergenceError(ConvergenceError):
pass
class PoissonConvergenceError(ConvergenceError):
pass
# Check for special command line arguments:
debug = False
trace = False
dry_run = 0
memory_estimate_depth = 2
parsize_domain = None
parsize_bands = None
sl_default = None
sl_diagonalize = None
sl_inverse_cholesky = None
sl_lcao = None
sl_lrtddft = None
buffer_size = None
extra_parameters = {}
profile = False
i = 1
while len(sys.argv) > i:
arg = sys.argv[i]
if arg.startswith('--gpaw-'):
# Found old-style gpaw command line argument:
arg = '--' + arg[7:]
raise RuntimeError('Warning: Use %s instead of %s.' %
(arg, sys.argv[i]))
if arg == '--trace':
trace = True
elif arg == '--debug':
debug = True
elif arg.startswith('--dry-run'):
dry_run = 1
if len(arg.split('=')) == 2:
dry_run = int(arg.split('=')[1])
elif arg.startswith('--memory-estimate-depth'):
memory_estimate_depth = -1
if len(arg.split('=')) == 2:
memory_estimate_depth = int(arg.split('=')[1])
elif arg.startswith('--domain-decomposition='):
parsize_domain = [int(n) for n in arg.split('=')[1].split(',')]
if len(parsize_domain) == 1:
parsize_domain = parsize_domain[0]
else:
assert len(parsize_domain) == 3
elif arg.startswith('--state-parallelization='):
parsize_bands = int(arg.split('=')[1])
elif arg.startswith('--sl_default='):
# --sl_default=nprow,npcol,mb,cpus_per_node
# use 'd' for the default of one or more of the parameters
# --sl_default=default to use all default values
sl_args = [n for n in arg.split('=')[1].split(',')]
if len(sl_args) == 1:
assert sl_args[0] == 'default'
sl_default = ['d'] * 3
else:
sl_default = []
assert len(sl_args) == 3
for sl_args_index in range(len(sl_args)):
assert sl_args[sl_args_index] is not None
if sl_args[sl_args_index] is not 'd':
assert int(sl_args[sl_args_index]) > 0
sl_default.append(int(sl_args[sl_args_index]))
else:
sl_default.append(sl_args[sl_args_index])
elif arg.startswith('--sl_diagonalize='):
# --sl_diagonalize=nprow,npcol,mb,cpus_per_node
# use 'd' for the default of one or more of the parameters
# --sl_diagonalize=default to use all default values
sl_args = [n for n in arg.split('=')[1].split(',')]
if len(sl_args) == 1:
assert sl_args[0] == 'default'
sl_diagonalize = ['d'] * 3
else:
sl_diagonalize = []
assert len(sl_args) == 3
for sl_args_index in range(len(sl_args)):
assert sl_args[sl_args_index] is not None
if sl_args[sl_args_index] is not 'd':
assert int(sl_args[sl_args_index]) > 0
sl_diagonalize.append(int(sl_args[sl_args_index]))
else:
sl_diagonalize.append(sl_args[sl_args_index])
elif arg.startswith('--sl_inverse_cholesky='):
# --sl_inverse_cholesky=nprow,npcol,mb,cpus_per_node
# use 'd' for the default of one or more of the parameters
# --sl_inverse_cholesky=default to use all default values
sl_args = [n for n in arg.split('=')[1].split(',')]
if len(sl_args) == 1:
assert sl_args[0] == 'default'
sl_inverse_cholesky = ['d'] * 3
else:
sl_inverse_cholesky = []
assert len(sl_args) == 3
for sl_args_index in range(len(sl_args)):
assert sl_args[sl_args_index] is not None
if sl_args[sl_args_index] is not 'd':
assert int(sl_args[sl_args_index]) > 0
sl_inverse_cholesky.append(int(sl_args[sl_args_index]))
else:
sl_inverse_cholesky.append(sl_args[sl_args_index])
elif arg.startswith('--sl_lcao='):
# --sl_lcao=nprow,npcol,mb,cpus_per_node
# use 'd' for the default of one or more of the parameters
# --sl_lcao=default to use all default values
sl_args = [n for n in arg.split('=')[1].split(',')]
if len(sl_args) == 1:
assert sl_args[0] == 'default'
sl_lcao = ['d'] * 3
else:
sl_lcao = []
assert len(sl_args) == 3
for sl_args_index in range(len(sl_args)):
assert sl_args[sl_args_index] is not None
if sl_args[sl_args_index] is not 'd':
assert int(sl_args[sl_args_index]) > 0
sl_lcao.append(int(sl_args[sl_args_index]))
else:
sl_lcao.append(sl_args[sl_args_index])
elif arg.startswith('--sl_lrtddft='):
# --sl_lcao=nprow,npcol,mb,cpus_per_node
# use 'd' for the default of one or more of the parameters
# --sl_lcao=default to use all default values
sl_args = [n for n in arg.split('=')[1].split(',')]
if len(sl_args) == 1:
assert sl_args[0] == 'default'
sl_lrtddft = ['d'] * 3
else:
sl_lrtddft = []
assert len(sl_args) == 3
for sl_args_index in range(len(sl_args)):
assert sl_args[sl_args_index] is not None
if sl_args[sl_args_index] is not 'd':
assert int(sl_args[sl_args_index]) > 0
sl_lrtddft.append(int(sl_args[sl_args_index]))
else:
sl_lrtddft.append(sl_args[sl_args_index])
elif arg.startswith('--buffer_size='):
# Buffer size for MatrixOperator in MB
buffer_size = int(arg.split('=')[1])
elif arg.startswith('--gpaw='):
extra_parameters = eval('dict(%s)' % arg[7:])
elif arg == '--gpaw':
extra_parameters = eval('dict(%s)' % sys.argv.pop(i + 1))
elif arg.startswith('--profile='):
profile = arg.split('=')[1]
else:
i += 1
continue
# Delete used command line argument:
del sys.argv[i]
if debug:
np.seterr(over='raise', divide='raise', invalid='raise', under='ignore')
oldempty = np.empty
def empty(*args, **kwargs):
a = oldempty(*args, **kwargs)
try:
a.fill(np.nan)
except ValueError:
a.fill(-1000000)
return a
np.empty = empty
build_path = join(__path__[0], '..', 'build')
arch = '%s-%s' % (get_platform(), sys.version[0:3])
# If we are running the code from the source directory, then we will
# want to use the extension from the distutils build directory:
sys.path.insert(0, join(build_path, 'lib.' + arch))
def get_gpaw_python_path():
paths = os.environ['PATH'].split(os.pathsep)
paths.insert(0, join(build_path, 'bin.' + arch))
for path in paths:
if isfile(join(path, 'gpaw-python')):
return path
raise RuntimeError('Could not find gpaw-python!')
try:
setup_paths = os.environ['GPAW_SETUP_PATH'].split(os.pathsep)
except KeyError:
if os.pathsep == ';':
setup_paths = [r'C:\gpaw-setups']
else:
setup_paths = ['/usr/local/share/gpaw-setups',
'/usr/share/gpaw-setups']
from gpaw.aseinterface import GPAW
from gpaw.mixer import Mixer, MixerSum, MixerDif, MixerSum2
from gpaw.eigensolvers import Davidson, RMM_DIIS, CG, LCAO
from gpaw.poisson import PoissonSolver
from gpaw.occupations import FermiDirac, MethfesselPaxton
from gpaw.wavefunctions.pw import PW
class Calculator(GPAW):
def __init__(self, *args, **kwargs):
sys.stderr.write('Please start using GPAW instead of Calculator!\n')
GPAW.__init__(self, *args, **kwargs)
def restart(filename, Class=GPAW, **kwargs):
calc = Class(filename, **kwargs)
atoms = calc.get_atoms()
return atoms, calc
if trace:
indent = ' '
path = __path__[0]
from gpaw.mpi import parallel, rank
if parallel:
indent = 'CPU%d ' % rank
def f(frame, event, arg):
global indent
f = frame.f_code.co_filename
if not f.startswith(path):
return
if event == 'call':
print('%s%s:%d(%s)' % (indent, f[len(path):], frame.f_lineno,
frame.f_code.co_name))
indent += '| '
elif event == 'return':
indent = indent[:-2]
sys.setprofile(f)
if profile:
from cProfile import Profile
import atexit
prof = Profile()
def f(prof, filename):
prof.disable()
from gpaw.mpi import rank
if filename == '-':
prof.print_stats('time')
else:
prof.dump_stats(filename + '.%04d' % rank)
atexit.register(f, prof, profile)
prof.enable()
command = os.environ.get('GPAWSTARTUP')
if command is not None:
exec(command)
def is_parallel_environment():
"""Check if we are running in a parallel environment.
This function can be redefined in ~/.gpaw/rc.py. Example::
def is_parallel_environment():
import os
return 'PBS_NODEFILE' in os.environ
"""
return False
home = os.environ.get('HOME')
if home is not None:
rc = os.path.join(home, '.gpaw', 'rc.py')
if os.path.isfile(rc):
# Read file in ~/.gpaw/rc.py
execfile(rc)
| robwarm/gpaw-symm | gpaw/__init__.py | Python | gpl-3.0 | 10,531 | 0.001045 |
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# Sends notification to search-server that it should update server index
#
import xmlrpclib
from spacewalk.common.rhnLog import log_error
class SearchNotify:
def __init__(self, host="127.0.0.1", port="2828"):
self.addr = "http://%s:%s" % (host, port)
def notify(self, indexName="server"):
try:
client = xmlrpclib.ServerProxy(self.addr)
result = client.admin.updateIndex(indexName)
except Exception, e:
log_error("Failed to notify search service located at %s to update %s indexes"
% (self.addr, indexName), e)
return False
return result
if __name__ == "__main__":
search = SearchNotify()
result = search.notify()
print "search.notify() = %s" % (result)
| xkollar/spacewalk | backend/server/rhnServer/search_notify.py | Python | gpl-2.0 | 1,394 | 0.001435 |
#
# Copyright (c) 2016, AnyWi Technologies BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# UBNT airView alternative mockup for the Java client, with added functionality
# of storing data for later analytics
#
# Rick van der Zwet <rick.vanderzwet@anywi.com>
#
import requests
import telnetlib
import time
import sys
import numpy as np
import matplotlib
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.ticker as plticker
# Latest firmware versions use HTTPS self-signed certificates by default
requests.packages.urllib3.disable_warnings()
#requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += 'HIGH:!DH:!aNULL'
#try:
# requests.packages.urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST += 'HIGH:!DH:!aNULL'
#except AttributeError:
# # no pyopenssl support used / needed / available
# pass
USERNAME = 'ubnt'
PASSWORD = 'ubnt'
HOST = "192.168.1.20"
PORT = 18888
TIMEOUT = 10
FRAME_SPEED = 1
BASE_URI = 'https://' + HOST + ':443'
def usage():
print(("Usage:" + sys.argv[0] + " <live|replay FILENAME>"))
print("")
print("Options:")
print(("\tlive \t=\tProcess live data from device " + HOST))
print("\treplay FILENAME \t=\tReplay FILENAME")
print("\trecord FILENAME \t=\tMake movie of FILENAME")
exit(128)
if len(sys.argv) == 2 and sys.argv[1] == 'live':
ACTION='live'
FILENAME = None
elif len(sys.argv) == 3 and sys.argv[1] == 'replay':
ACTION='replay'
FILENAME = sys.argv[2] # Stored data processing
FRAME_SPEED = 50
elif len(sys.argv) == 3 and sys.argv[1] == 'record':
ACTION='record'
FILENAME = sys.argv[2] # Stored data processing
FRAME_SPEED = 50
else:
usage()
def parse_get_frame_resp(line):
_,vals_raw = line.split(':')
vals = list(map(int, vals_raw.split(',')))
frame_nr = vals.pop(0)
return(frame_nr, vals)
#TODO: Make me dynamic parse from 'SCAN RANGE' response
scan_range_begin = 2402000000
scan_range_end = 2497000000
if not FILENAME:
print(("Enabling Ubiquiti airView at %s:%s@%s..." %(USERNAME, PASSWORD, HOST)))
# Request session cookie
s = requests.session()
s.get(BASE_URI + '/login.cgi', verify=False)
# Authenticate
r = s.post(BASE_URI + '/login.cgi',
{"username": USERNAME, "password": PASSWORD}, verify=False)
r.raise_for_status()
if 'Invalid credentials.' in r.text:
print("# CRIT: Username/password invalid!")
sys.exit(1)
# Enable airView
r = s.post(BASE_URI + '/airview.cgi',
{"start": 1}, verify=False)
r.raise_for_status()
print("Waiting for device to enter airView modus...")
# Allow device a few moments to settle
time.sleep(TIMEOUT)
print("Start scanning...")
tn = telnetlib.Telnet(HOST, PORT, timeout=TIMEOUT)
#tn.set_debuglevel(99)
# Storage on unique files
outfile = 'output-%s.dat' % int(time.time())
print(("Storing output at '%s'" % outfile))
fh = open(outfile, 'a')
def writeline(cmd):
""" Write line to device"""
ts = time.time()
tn.write(cmd.encode('ascii'))
print("Sending: %s", cmd.strip())
fh.write("%s\001%s" % (ts, cmd))
return ts
def getline():
"""Read line from device"""
line = tn.read_until(b"\n")
print('Received: %s', line.decode('ascii').strip())
fh.write("%s\001%s" % (time.time(), line.decode('ascii')))
return line.decode('ascii')
# Commands needs to have a trailing space if no arguments specified
writeline("CONNECT: \n")
getline()
#writeline("REQUEST RANGE: 2402000000,2407000000\n") # 5 MHz
#writeline("REQUEST RANGE: 2402000000,2412000000\n") # 10 MHz
#writeline("REQUEST RANGE: 2402000000,2417000000\n") # 15 MHz
#writeline("REQUEST RANGE: 2402000000,2422000000\n") # 20 Mhz
#writeline("REQUEST RANGE: 2402000000,2477000000\n") # (ch 1-11 - US allocation)
#writeline("REQUEST RANGE: 2402000000,2487000000\n") # (ch 1-13 - UK allocation)
#writeline("REQUEST RANGE: 2402000000,2497000000\n") # (ch 1-14)
writeline("REQUEST RANGE: 5150000000,5250000000\n") # 5.150-5.250 (U-NII Lower Band)
#writeline("REQUEST RANGE: 5250000000,5350000000\n") # 5.250-5.350 (U-NII Middle Band)
#writeline("REQUEST RANGE: 5470000000,5725000000\n") # 5.470-5.725 (U-NII Worldwide)
#writeline("REQUEST RANGE: 5150000000,5725000000\n") # (U-NII wide-spectrum)
getline()
writeline("START SCAN: \n")
getline()
print("Waiting for scan to start...")
time.sleep(2)
def get_frame(frame):
""" Get frame from device airView """
# TODO: Receiving frames in order, sometimes yield of empty responses. Already flush out maybe?
#writeline("GET FRAME: %s\n" % frame)
ts = writeline("GET FRAME: \n")
line = getline()
return((ts,) + parse_get_frame_resp(line))
else:
# No need for logic since we are processing stored data
sh = open(FILENAME, 'r')
def get_frame(frame):
global scan_range_begin, scan_range_end
""" Perform replay data processing """
while True:
line = sh.readline()
if not line:
return(None, None, None)
ts_raw, a = line.split('\001', 1)
ts = float(ts_raw)
cmd, ret = a.split(':', 1)
if cmd == 'FRAME':
return((ts,) + parse_get_frame_resp(a))
elif cmd == 'SCAN RANGE':
scan_range_begin, scan_range_end = map(int, ret.split(','))
kHz = lambda x: float(x) / 1000
MHz = lambda x: kHz(x) / 1000
GHz = lambda x: MHz(x) / 1000
# Get innitial frame number and bins sizes
_, frame_nr, vals = get_frame(None)
bin_size = len(vals)
bin_sample_khz = kHz(scan_range_end - scan_range_begin) / bin_size
print(("Bin size: %s" % bin_size))
print('Scan range: %s - %s MHz (delta: %s MHz)' % (MHz(scan_range_begin), MHz(scan_range_end), MHz(scan_range_end - scan_range_begin)))
# Start making picture
fig, ax = plt.subplots(figsize=(20,11))
fig.canvas.set_window_title('UBNT airView Client')
ax.set_ylabel('100ms units elapsed')
ax.set_xlabel('Frequency (sampled with bins of %s kHz)' % bin_sample_khz)
# Plotting 2.4GHz channels
#a = [2402,2412,2417,2422,2427,2432,2437,2442,2447,2452,2457,2462,2467,2472,2484,2497]
#channels = (np.array(a,dtype='float32') - 2402) / (bin_sample_khz / 1000)
# Plotting 5GHz channels
channels = list(range(32,68,4)) + list(range(100,148,4)) + list(range(149,169,4))
xticks = []
xticklabels = []
for channel in channels:
freq_mhz = 5000 + (channel * 5)
xtick = freq_mhz - MHz(scan_range_begin)
xticklabel = "%i (%s)" % (freq_mhz, channel)
xticks.append(xtick)
xticklabels.append(xticklabel)
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels)
plt.xticks(rotation=45)
# Plotting of 2.4 GHz channels
# Plot channel description
#for i in range(1,15):
# width_20mhz = 20000.0 / bin_sample_khz
# if i in [1,6,11,14]:
# pac = mpatches.Arc([channels[i], 0], width_20mhz, 300,
# theta2=180, linestyle='solid', linewidth=2, color='black')
# else:
# pac = mpatches.Arc([channels[i], 0], width_20mhz, 300,
# theta2=180, linestyle='dashed', linewidth=2, color='black')
# ax.add_patch(pac)
#ax.get_xaxis().set_major_formatter(
# plticker.FuncFormatter(lambda x, p: format(int((x * bin_sample_khz) + 5000), ',')))
# # plticker.FuncFormatter(lambda x, p: format(int((x * bin_sample_khz / 1000) + 5000), ',')))
# Plotting 5GHz 20MHz-width channels
for channel in channels:
freq_mhz = 5000 + (channel * 5)
xtick = (freq_mhz - 10) - MHz(scan_range_begin)
xtick = freq_mhz - MHz(scan_range_begin)
pac = mpatches.Polygon((
(((freq_mhz - 10) - MHz(scan_range_begin)), 0),
(((freq_mhz - 7.5) - MHz(scan_range_begin)), 20),
(((freq_mhz + 7.5) - MHz(scan_range_begin)), 20),
(((freq_mhz + 10) - MHz(scan_range_begin)), 0),
), linestyle='solid', linewidth=0, color='grey', alpha=0.4)
ax.add_patch(pac)
plt.grid(linewidth=2,linestyle='solid',color='black')
plt.tight_layout()
bbox = fig.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width*fig.dpi, bbox.height*fig.dpi
# Initial data and history of amount of pixels of the screen, since it is
# important that all lines are draw on the screen.
bbox = fig.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width*fig.dpi, bbox.height*fig.dpi
matrix = np.empty([int(height),bin_size]) * np.nan
pcm = ax.pcolorfast(matrix, vmin=-122, vmax=-30)
if ACTION == 'record':
# Set up formatting for the movie files
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='AnyWi UBNT airViewer'), bitrate=1800)
#
# Matplotlib Animation
#
def update(data):
global frame_nr, matrix
# Fast forwarding in time
for i in range(FRAME_SPEED):
frame_nr_next = -1
# The same frame (duplicated), we are too fast
while frame_nr_next <= frame_nr:
ts, frame_nr_next, row = get_frame(frame_nr + 1)
# We are on the end of the file
if not ts and not frame_nr_next and not row:
return
frame_nr = frame_nr_next
#matrix = np.vstack([row, pcm.get_array()[:-1]])
matrix = np.vstack([row, matrix[:-1]])
pcm.set_array(matrix)
ax.set_title('Frame %s at %s' % (frame_nr, time.asctime(time.localtime(ts))))
#fig.canvas.draw()
ani = animation.FuncAnimation(fig, update, interval=100)
# Dual display and recording data does not seems to work, use a screencast
# program like gtk-recordmydesktop for that matter
if ACTION == 'record':
ani.save('live.mp4' if not FILENAME else FILENAME.rsplit('.',1)[0] + '.mp4', writer=writer)
else:
plt.show()
#
# Takes some time (10 seconds) for device to return to an active state
#
| AnyWi/py-ubnt-airviewer | airviewer.py | Python | bsd-2-clause | 11,342 | 0.0082 |
# coding: utf-8
from __future__ import unicode_literals
# created by: Han Feng (https://github.com/hanx11)
import collections
import hashlib
import logging
import requests
from wxpy.api.messages import Message
from wxpy.ext.talk_bot_utils import get_context_user_id, next_topic
from wxpy.utils.misc import get_text_without_at_bot
from wxpy.utils import enhance_connection
logger = logging.getLogger(__name__)
from wxpy.compatible import *
class XiaoI(object):
"""
与 wxpy 深度整合的小 i 机器人
"""
# noinspection SpellCheckingInspection
def __init__(self, key, secret):
"""
| 需要通过注册获得 key 和 secret
| 免费申请: http://cloud.xiaoi.com/
:param key: 你申请的 key
:param secret: 你申请的 secret
"""
self.key = key
self.secret = secret
self.realm = "xiaoi.com"
self.http_method = "POST"
self.uri = "/ask.do"
self.url = "http://nlp.xiaoi.com/ask.do?platform=custom"
xauth = self._make_http_header_xauth()
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain",
}
headers.update(xauth)
self.session = requests.Session()
self.session.headers.update(headers)
enhance_connection(self.session)
def _make_signature(self):
"""
生成请求签名
"""
# 40位随机字符
# nonce = "".join([str(randint(0, 9)) for _ in range(40)])
nonce = "4103657107305326101203516108016101205331"
sha1 = "{0}:{1}:{2}".format(self.key, self.realm, self.secret).encode("utf-8")
sha1 = hashlib.sha1(sha1).hexdigest()
sha2 = "{0}:{1}".format(self.http_method, self.uri).encode("utf-8")
sha2 = hashlib.sha1(sha2).hexdigest()
signature = "{0}:{1}:{2}".format(sha1, nonce, sha2).encode("utf-8")
signature = hashlib.sha1(signature).hexdigest()
ret = collections.namedtuple("signature_return", "signature nonce")
ret.signature = signature
ret.nonce = nonce
return ret
def _make_http_header_xauth(self):
"""
生成请求认证
"""
sign = self._make_signature()
ret = {
"X-Auth": "app_key=\"{0}\",nonce=\"{1}\",signature=\"{2}\"".format(
self.key, sign.nonce, sign.signature)
}
return ret
def do_reply(self, msg):
"""
回复消息,并返回答复文本
:param msg: Message 对象
:return: 答复文本
"""
ret = self.reply_text(msg)
msg.reply(ret)
return ret
def reply_text(self, msg):
"""
仅返回答复文本
:param msg: Message 对象,或消息文本
:return: 答复文本
"""
error_response = (
"主人还没给我设置这类话题的回复",
)
if isinstance(msg, Message):
user_id = get_context_user_id(msg)
question = get_text_without_at_bot(msg)
else:
user_id = "abc"
question = msg or ""
params = {
"question": question,
"format": "json",
"platform": "custom",
"userId": user_id,
}
resp = self.session.post(self.url, data=params)
text = resp.text
for err in error_response:
if err in text:
return next_topic()
return text
| youfou/wxpy | wxpy/ext/xiaoi.py | Python | mit | 3,546 | 0.000897 |
import os
import warnings
import numpy as np
from pyrates.utility.genetic_algorithm import CGSGeneticAlgorithm
from pandas import DataFrame, read_hdf
from copy import deepcopy
class CustomGOA(CGSGeneticAlgorithm):
def eval_fitness(self, target: list, **kwargs):
# define simulation conditions
worker_file = self.cgs_config['worker_file'] if 'worker_file' in self.cgs_config else None
param_grid = self.pop.drop(['fitness', 'sigma', 'results'], axis=1)
result_vars = ['r_e', 'r_p', 'r_a', 'r_m', 'r_f']
freq_targets = [0.0, np.nan, np.nan, np.nan, np.nan]
#param_grid, invalid_params = eval_params(param_grid)
conditions = [{}, # healthy control
{'k_pe': 0.2, 'k_ae': 0.2}, # AMPA blockade in GPe
{'k_pe': 0.2, 'k_ae': 0.2, 'k_pp': 0.2, 'k_pa': 0.2, 'k_pm': 0.2, 'k_aa': 0.2, 'k_ap': 0.2,
'k_am': 0.2}, # AMPA blockade and GABAA blockade in GPe
{'k_pp': 0.2, 'k_pa': 0.2, 'k_pm': 0.2, 'k_aa': 0.2, 'k_ap': 0.2,
'k_am': 0.2}, # GABAA blockade in GPe
{'k_pe': 0.0, 'k_ae': 0.0}, # STN blockade
{'k_ep': 0.2}, # GABAA blocker in STN
]
param_scalings = [
('delta_e', 'tau_e', 2.0),
('delta_p', 'tau_p', 2.0),
('delta_a', 'tau_a', 2.0),
('delta_m', 'tau_m', 2.0),
('delta_f', 'tau_f', 2.0),
('k_ee', 'delta_e', 0.5),
('k_ep', 'delta_e', 0.5),
('k_pe', 'delta_p', 0.5),
('k_pp', 'delta_p', 0.5),
('k_pa', 'tau_p', 0.5),
('k_pm', 'tau_p', 0.5),
('k_ae', 'tau_a', 0.5),
('k_ap', 'tau_a', 0.5),
('k_aa', 'tau_a', 0.5),
('k_am', 'tau_a', 0.5),
('k_mf', 'delta_m', 0.5),
('k_mm', 'delta_m', 0.5),
('k_fa', 'delta_f', 0.5),
('k_ff', 'delta_f', 0.5),
('eta_e', 'delta_e', 1.0),
('eta_p', 'delta_p', 1.0),
('eta_a', 'delta_a', 1.0),
('eta_m', 'delta_m', 1.0),
('eta_f', 'delta_f', 1.0),
]
chunk_size = [
60, # carpenters
100, # osttimor
60, # spanien
100, # animals
60, # kongo
60, # tschad
#100, # uganda
# 50, # tiber
#50, # giraffe
40, # lech
20, # rilke
12, # dinkel
#10, # rosmarin
#10, # mosambik
# 50, # compute servers
# 40,
# 30,
# 20,
# 10,
# 50,
# 40,
# 30,
# 20,
# 10,
# 50,
# 40,
# 30,
# 20,
# 10,
# 50,
# 40,
]
# perform simulations
if len(param_grid) > 0:
self.gs_config['init_kwargs'].update(kwargs)
res_file = self.cgs.run(
circuit_template=self.gs_config['circuit_template'],
param_grid=deepcopy(param_grid),
param_map=self.gs_config['param_map'],
simulation_time=self.gs_config['simulation_time'],
dt=self.gs_config['step_size'],
inputs=self.gs_config['inputs'],
outputs=self.gs_config['outputs'],
sampling_step_size=self.gs_config['sampling_step_size'],
permute=False,
chunk_size=chunk_size,
worker_file=worker_file,
worker_env=self.cgs_config['worker_env'],
gs_kwargs={'init_kwargs': self.gs_config['init_kwargs'], 'conditions': conditions,
'param_scalings': param_scalings},
worker_kwargs={'y': target, 'time_lim': 7200.0, 'freq_targets': freq_targets},
result_concat_axis=0)
results_tmp = read_hdf(res_file, key=f'Results/results')
# calculate fitness
for gene_id in param_grid.index:
self.pop.at[gene_id, 'fitness'] = 1.0 / results_tmp.at[gene_id, 'fitness']
self.pop.at[gene_id, 'results'] = [results_tmp.at[gene_id, v] for v in result_vars]
# set fitness of invalid parametrizations
#for gene_id in invalid_params.index:
# self.pop.at[gene_id, 'fitness'] = 0.0
# self.pop.at[gene_id, 'results'] = [0. for _ in result_vars]
def fitness(y, t):
y = np.asarray(y).flatten()
t = np.asarray(t).flatten()
diff = np.asarray([0.0 if np.isnan(t_tmp) else y_tmp - t_tmp for y_tmp, t_tmp in zip(y, t)]).flatten()
t[np.isnan(t)] = 1.0
t[t == 0] = 1.0
weights = 1 / np.abs(t)
return weights @ np.abs(diff)
if __name__ == "__main__":
warnings.filterwarnings("ignore")
pop_size = 1024
pop_genes = {
'k_ee': {'min': 0, 'max': 15, 'size': pop_size, 'sigma': 0.1, 'loc': 1.0, 'scale': 0.5},
'k_ae': {'min': 0, 'max': 150, 'size': pop_size, 'sigma': 0.5, 'loc': 20.0, 'scale': 2.0},
'k_pe': {'min': 0, 'max': 150, 'size': pop_size, 'sigma': 0.5, 'loc': 20.0, 'scale': 2.0},
'k_pp': {'min': 0, 'max': 100, 'size': pop_size, 'sigma': 0.5, 'loc': 10.0, 'scale': 1.0},
'k_ep': {'min': 0, 'max': 150, 'size': pop_size, 'sigma': 0.5, 'loc': 20.0, 'scale': 2.0},
'k_ap': {'min': 0, 'max': 100, 'size': pop_size, 'sigma': 0.5, 'loc': 10.0, 'scale': 1.0},
'k_aa': {'min': 0, 'max': 50, 'size': pop_size, 'sigma': 0.5, 'loc': 10.0, 'scale': 1.0},
'k_pa': {'min': 0, 'max': 50, 'size': pop_size, 'sigma': 0.5, 'loc': 10.0, 'scale': 1.0},
'k_fa': {'min': 0, 'max': 100, 'size': pop_size, 'sigma': 0.5, 'loc': 20.0, 'scale': 2.0},
'k_mm': {'min': 0, 'max': 50, 'size': pop_size, 'sigma': 0.5, 'loc': 10.0, 'scale': 1.0},
'k_am': {'min': 0, 'max': 200, 'size': pop_size, 'sigma': 0.8, 'loc': 40.0, 'scale': 4.0},
'k_pm': {'min': 0, 'max': 200, 'size': pop_size, 'sigma': 0.5, 'loc': 5.0, 'scale': 1.0},
'k_mf': {'min': 0, 'max': 150, 'size': pop_size, 'sigma': 0.5, 'loc': 20.0, 'scale': 2.0},
'k_ff': {'min': 0, 'max': 100, 'size': pop_size, 'sigma': 0.5, 'loc': 10.0, 'scale': 1.0},
'eta_e': {'min': -5, 'max': 5, 'size': pop_size, 'sigma': 0.2, 'loc': 0.0, 'scale': 0.5},
'eta_p': {'min': -5, 'max': 5, 'size': pop_size, 'sigma': 0.2, 'loc': 0.0, 'scale': 0.5},
'eta_a': {'min': -5, 'max': 5, 'size': pop_size, 'sigma': 0.2, 'loc': 0.0, 'scale': 0.5},
'eta_m': {'min': -10, 'max': 0, 'size': pop_size, 'sigma': 0.2, 'loc': -3.0, 'scale': 0.5},
'eta_f': {'min': -5, 'max': 5, 'size': pop_size, 'sigma': 0.2, 'loc': 0.0, 'scale': 0.5},
'delta_e': {'min': 0.01, 'max': 1.0, 'size': pop_size, 'sigma': 0.05, 'loc': 0.1, 'scale': 0.1},
'delta_p': {'min': 0.01, 'max': 1.0, 'size': pop_size, 'sigma': 0.05, 'loc': 0.2, 'scale': 0.1},
'delta_a': {'min': 0.01, 'max': 1.5, 'size': pop_size, 'sigma': 0.05, 'loc': 0.4, 'scale': 0.1},
'delta_m': {'min': 0.01, 'max': 1.5, 'size': pop_size, 'sigma': 0.05, 'loc': 0.2, 'scale': 0.1},
'delta_f': {'min': 0.01, 'max': 1.5, 'size': pop_size, 'sigma': 0.05, 'loc': 0.2, 'scale': 0.1},
'tau_e': {'min': 12, 'max': 12, 'size': pop_size, 'sigma': 0.0, 'loc': 12.0, 'scale': 0.0},
'tau_p': {'min': 24, 'max': 24, 'size': pop_size, 'sigma': 0.0, 'loc': 24.0, 'scale': 0.0},
'tau_a': {'min': 20, 'max': 20, 'size': pop_size, 'sigma': 0.0, 'loc': 20.0, 'scale': 0.0},
'tau_m': {'min': 20, 'max': 20, 'size': pop_size, 'sigma': 0.0, 'loc': 20.0, 'scale': 0.0},
'tau_f': {'min': 20, 'max': 20, 'size': pop_size, 'sigma': 0.0, 'loc': 20.0, 'scale': 0.0},
#'tau_ee_v': {'min': 0.5, 'max': 1.0, 'size': 2, 'sigma': 0.1, 'loc': 0.5, 'scale': 0.1},
# 'tau_ei': {'min': 3.0, 'max': 5.0, 'size': 1, 'sigma': 0.1, 'loc': 4.0, 'scale': 0.1},
#'tau_ei_v': {'min': 0.5, 'max': 1.0, 'size': 2, 'sigma': 0.1, 'loc': 1.0, 'scale': 0.2},
# 'tau_ie': {'min': 2.0, 'max': 4.0, 'size': 1, 'sigma': 0.1, 'loc': 3.0, 'scale': 0.1},
#'tau_ie_v': {'min': 0.8, 'max': 1.6, 'size': 2, 'sigma': 0.1, 'loc': 0.7, 'scale': 0.1},
#'tau_ii_v': {'min': 0.5, 'max': 1.0, 'size': 2, 'sigma': 0.1, 'loc': 0.5, 'scale': 0.1},
}
param_map = {
'k_ee': {'vars': ['weight'], 'edges': [('stn', 'stn')]},
'k_ae': {'vars': ['weight'], 'edges': [('stn', 'gpe_a')]},
'k_pe': {'vars': ['weight'], 'edges': [('stn', 'gpe_p')]},
'k_pp': {'vars': ['weight'], 'edges': [('gpe_p', 'gpe_p')]},
'k_ep': {'vars': ['weight'], 'edges': [('gpe_p', 'stn')]},
'k_ap': {'vars': ['weight'], 'edges': [('gpe_p', 'gpe_a')]},
'k_aa': {'vars': ['weight'], 'edges': [('gpe_a', 'gpe_a')]},
'k_pa': {'vars': ['weight'], 'edges': [('gpe_a', 'gpe_p')]},
'k_fa': {'vars': ['weight'], 'edges': [('gpe_a', 'fsi')]},
'k_mm': {'vars': ['weight'], 'edges': [('msn', 'msn')]},
'k_am': {'vars': ['weight'], 'edges': [('msn', 'gpe_a')]},
'k_pm': {'vars': ['weight'], 'edges': [('msn', 'gpe_p')]},
'k_ff': {'vars': ['weight'], 'edges': [('fsi', 'fsi')]},
'k_mf': {'vars': ['weight'], 'edges': [('fsi', 'msn')]},
'eta_e': {'vars': ['stn_op/eta_e'], 'nodes': ['stn']},
'eta_p': {'vars': ['gpe_proto_op/eta_i'], 'nodes': ['gpe_p']},
'eta_a': {'vars': ['gpe_arky_op/eta_a'], 'nodes': ['gpe_a']},
'eta_m': {'vars': ['str_msn_op/eta_s'], 'nodes': ['msn']},
'eta_f': {'vars': ['str_fsi_op/eta_f'], 'nodes': ['fsi']},
'delta_e': {'vars': ['stn_op/delta_e'], 'nodes': ['stn']},
'delta_p': {'vars': ['gpe_proto_op/delta_i'], 'nodes': ['gpe_p']},
'delta_a': {'vars': ['gpe_arky_op/delta_a'], 'nodes': ['gpe_a']},
'delta_m': {'vars': ['str_msn_op/delta_s'], 'nodes': ['msn']},
'delta_f': {'vars': ['str_fsi_op/delta_f'], 'nodes': ['fsi']},
'tau_e': {'vars': ['stn_op/tau_e'], 'nodes': ['stn']},
'tau_p': {'vars': ['gpe_proto_op/tau_i'], 'nodes': ['gpe_p']},
'tau_a': {'vars': ['gpe_arky_op/tau_a'], 'nodes': ['gpe_a']},
'tau_m': {'vars': ['str_msn_op/tau_s'], 'nodes': ['msn']},
'tau_f': {'vars': ['str_fsi_op/tau_f'], 'nodes': ['fsi']},
}
T = 2000.
dt = 1e-2
dts = 1e-1
compute_dir = f"{os.getcwd()}/stn_gpe_str_opt"
# perform genetic optimization
ga = CustomGOA(fitness_measure=fitness,
gs_config={
'circuit_template': f"{os.getcwd()}/config/stn_gpe/stn_gpe_str",
'permute_grid': True,
'param_map': param_map,
'simulation_time': T,
'step_size': dt,
'sampling_step_size': dts,
'inputs': {},
'outputs': {'r_e': "stn/stn_op/R_e", 'r_p': 'gpe_p/gpe_proto_op/R_i',
'r_a': 'gpe_a/gpe_arky_op/R_a', 'r_m': 'msn/str_msn_op/R_s',
'r_f': 'fsi/str_fsi_op/R_f'},
'init_kwargs': {'backend': 'numpy', 'solver': 'scipy', 'step_size': dt},
},
cgs_config={'nodes': [
'carpenters',
'osttimor',
'spanien',
'animals',
'kongo',
'tschad',
#'uganda',
# 'tiber',
#'giraffe',
'lech',
'rilke',
'dinkel',
#'rosmarin',
#'mosambik',
# 'comps06h01',
# 'comps06h02',
# 'comps06h03',
# 'comps06h04',
# 'comps06h05',
# 'comps06h06',
# 'comps06h07',
# 'comps06h08',
# 'comps06h09',
# 'comps06h10',
# 'comps06h11',
# 'comps06h12',
# 'comps06h13',
# 'comps06h14',
# 'scorpions',
# 'spliff',
# 'supertramp',
# 'ufo'
],
'compute_dir': compute_dir,
'worker_file': f'{os.getcwd()}/stn_gpe_str_worker.py',
'worker_env': "/data/u_rgast_software/anaconda3/envs/pyrates/bin/python3",
})
drop_save_dir = f'{compute_dir}/PopulationDrops/'
os.makedirs(drop_save_dir, exist_ok=True)
winner = ga.run(
initial_gene_pool=pop_genes,
gene_sampling_func=np.random.normal,
new_member_sampling_func=np.random.normal,
target=[[20, 60, 20, 2, 20], # healthy control
[np.nan, 2/3, np.nan, np.nan, np.nan], # ampa blockade in GPe
[np.nan, 1, np.nan, np.nan, np.nan], # ampa and gabaa blockade in GPe
[np.nan, 2, np.nan, np.nan, np.nan], # GABAA blockade in GPe
[np.nan, 1/2, np.nan, np.nan, np.nan], # STN blockade
[2, 2, np.nan, np.nan, np.nan], # GABAA blockade in STN
],
max_iter=100,
enforce_max_iter=True,
min_fit=1.0,
n_winners=10,
n_parent_pairs=40,
n_new=62,
sigma_adapt=0.05,
candidate_save=f'{compute_dir}/GeneticCGSCandidatestn.h5',
drop_save=drop_save_dir,
new_pop_on_drop=True,
pop_save=f'{drop_save_dir}/pop_summary',
permute=False
)
# winner.to_hdf(f'{drop_save_dir}/winner.h5', key='data')
| Richert/BrainNetworks | BasalGanglia/stn_gpe_str_opt.py | Python | apache-2.0 | 14,060 | 0.004481 |
# coding=utf-8
class _Webhooks:
def __init__(self, client=None):
self.client = client
def create_webhook(self, params=None, **options):
"""Establish a webhook
:param Object params: Parameters for the request
:param **options
- opt_fields {list[str]}: Defines fields to return. Some requests return *compact* representations of objects in order to conserve resources and complete the request more efficiently. Other times requests return more information than you may need. This option allows you to list the exact set of fields that the API should be sure to return for the objects. The field names should be provided as paths, described below. The id of included objects will always be returned, regardless of the field options.
- opt_pretty {bool}: Provides “pretty” output. Provides the response in a “pretty” format. In the case of JSON this means doing proper line breaking and indentation to make it readable. This will take extra time and increase the response size so it is advisable only to use this during debugging.
:return: Object
"""
if params is None:
params = {}
path = "/webhooks"
return self.client.post(path, params, **options)
def delete_webhook(self, webhook_gid, params=None, **options):
"""Delete a webhook
:param str webhook_gid: (required) Globally unique identifier for the webhook.
:param Object params: Parameters for the request
:param **options
- opt_fields {list[str]}: Defines fields to return. Some requests return *compact* representations of objects in order to conserve resources and complete the request more efficiently. Other times requests return more information than you may need. This option allows you to list the exact set of fields that the API should be sure to return for the objects. The field names should be provided as paths, described below. The id of included objects will always be returned, regardless of the field options.
- opt_pretty {bool}: Provides “pretty” output. Provides the response in a “pretty” format. In the case of JSON this means doing proper line breaking and indentation to make it readable. This will take extra time and increase the response size so it is advisable only to use this during debugging.
:return: Object
"""
if params is None:
params = {}
path = "/webhooks/{webhook_gid}".replace("{webhook_gid}", webhook_gid)
return self.client.delete(path, params, **options)
def get_webhook(self, webhook_gid, params=None, **options):
"""Get a webhook
:param str webhook_gid: (required) Globally unique identifier for the webhook.
:param Object params: Parameters for the request
:param **options
- opt_fields {list[str]}: Defines fields to return. Some requests return *compact* representations of objects in order to conserve resources and complete the request more efficiently. Other times requests return more information than you may need. This option allows you to list the exact set of fields that the API should be sure to return for the objects. The field names should be provided as paths, described below. The id of included objects will always be returned, regardless of the field options.
- opt_pretty {bool}: Provides “pretty” output. Provides the response in a “pretty” format. In the case of JSON this means doing proper line breaking and indentation to make it readable. This will take extra time and increase the response size so it is advisable only to use this during debugging.
:return: Object
"""
if params is None:
params = {}
path = "/webhooks/{webhook_gid}".replace("{webhook_gid}", webhook_gid)
return self.client.get(path, params, **options)
def get_webhooks(self, params=None, **options):
"""Get multiple webhooks
:param Object params: Parameters for the request
- workspace {str}: (required) The workspace to query for webhooks in.
- resource {str}: Only return webhooks for the given resource.
:param **options
- offset {str}: Offset token. An offset to the next page returned by the API. A pagination request will return an offset token, which can be used as an input parameter to the next request. If an offset is not passed in, the API will return the first page of results. 'Note: You can only pass in an offset that was returned to you via a previously paginated request.'
- limit {int}: Results per page. The number of objects to return per page. The value must be between 1 and 100.
- opt_fields {list[str]}: Defines fields to return. Some requests return *compact* representations of objects in order to conserve resources and complete the request more efficiently. Other times requests return more information than you may need. This option allows you to list the exact set of fields that the API should be sure to return for the objects. The field names should be provided as paths, described below. The id of included objects will always be returned, regardless of the field options.
- opt_pretty {bool}: Provides “pretty” output. Provides the response in a “pretty” format. In the case of JSON this means doing proper line breaking and indentation to make it readable. This will take extra time and increase the response size so it is advisable only to use this during debugging.
:return: Object
"""
if params is None:
params = {}
path = "/webhooks"
return self.client.get_collection(path, params, **options)
def update_webhook(self, webhook_gid, params=None, **options):
"""Update a webhook
:param str webhook_gid: (required) Globally unique identifier for the webhook.
:param Object params: Parameters for the request
:param **options
- opt_fields {list[str]}: Defines fields to return. Some requests return *compact* representations of objects in order to conserve resources and complete the request more efficiently. Other times requests return more information than you may need. This option allows you to list the exact set of fields that the API should be sure to return for the objects. The field names should be provided as paths, described below. The id of included objects will always be returned, regardless of the field options.
- opt_pretty {bool}: Provides “pretty” output. Provides the response in a “pretty” format. In the case of JSON this means doing proper line breaking and indentation to make it readable. This will take extra time and increase the response size so it is advisable only to use this during debugging.
:return: Object
"""
if params is None:
params = {}
path = "/webhooks/{webhook_gid}".replace("{webhook_gid}", webhook_gid)
return self.client.put(path, params, **options)
| Asana/python-asana | asana/resources/gen/webhooks.py | Python | mit | 7,089 | 0.00227 |
"""Test safe_exec.py"""
from cStringIO import StringIO
import os.path
import textwrap
import unittest
import zipfile
from nose.plugins.skip import SkipTest
from codejail import safe_exec
class SafeExecTests(unittest.TestCase):
"""The tests for `safe_exec`, to be mixed into specific test classes."""
# SafeExecTests is a TestCase so pylint understands the methods it can
# call, but it's abstract, so stop nose from running the tests.
__test__ = False
def safe_exec(self, *args, **kwargs):
"""The function under test.
This class will be mixed into subclasses that implement `safe_exec` to
give the tests something to test.
"""
raise NotImplementedError # pragma: no cover
def test_set_values(self):
globs = {}
self.safe_exec("a = 17", globs)
self.assertEqual(globs['a'], 17)
def test_files_are_copied(self):
globs = {}
self.safe_exec(
"a = 'Look: ' + open('hello.txt').read()", globs,
files=[os.path.dirname(__file__) + "/hello.txt"]
)
self.assertEqual(globs['a'], 'Look: Hello there.\n')
def test_python_path(self):
globs = {}
self.safe_exec(
"import module; a = module.const", globs,
python_path=[os.path.dirname(__file__) + "/pylib"]
)
self.assertEqual(globs['a'], 42)
def test_functions_calling_each_other(self):
globs = {}
self.safe_exec(textwrap.dedent("""\
def f():
return 1723
def g():
return f()
x = g()
"""), globs)
self.assertEqual(globs['x'], 1723)
def test_printing_stuff_when_you_shouldnt(self):
globs = {}
self.safe_exec("a = 17; print 'hi!'", globs)
self.assertEqual(globs['a'], 17)
def test_importing_lots_of_crap(self):
globs = {}
self.safe_exec(textwrap.dedent("""\
from numpy import *
a = 1723
"""), globs)
self.assertEqual(globs['a'], 1723)
def test_raising_exceptions(self):
globs = {}
with self.assertRaises(safe_exec.SafeExecException) as what_happened:
self.safe_exec(textwrap.dedent("""\
raise ValueError("That's not how you pour soup!")
"""), globs)
msg = str(what_happened.exception)
# The result may be repr'd or not, so the backslash needs to be
# optional in this match.
self.assertRegexpMatches(
msg,
r"ValueError: That\\?'s not how you pour soup!"
)
def test_extra_files(self):
globs = {}
extras = [
("extra.txt", "I'm extra!\n"),
("also.dat", "\x01\xff\x02\xfe"),
]
self.safe_exec(textwrap.dedent("""\
with open("extra.txt") as f:
extra = f.read()
with open("also.dat") as f:
also = f.read().encode("hex")
"""), globs, extra_files=extras)
self.assertEqual(globs['extra'], "I'm extra!\n")
self.assertEqual(globs['also'], "01ff02fe")
def test_extra_files_as_pythonpath_zipfile(self):
zipstring = StringIO()
zipf = zipfile.ZipFile(zipstring, "w")
zipf.writestr("zipped_module1.py", textwrap.dedent("""\
def func1(x):
return 2*x + 3
"""))
zipf.writestr("zipped_module2.py", textwrap.dedent("""\
def func2(s):
return "X" + s + s + "X"
"""))
zipf.close()
globs = {}
extras = [("code.zip", zipstring.getvalue())]
self.safe_exec(textwrap.dedent("""\
import zipped_module1 as zm1
import zipped_module2 as zm2
a = zm1.func1(10)
b = zm2.func2("hello")
"""), globs, python_path=["code.zip"], extra_files=extras)
self.assertEqual(globs['a'], 23)
self.assertEqual(globs['b'], "XhellohelloX")
class TestSafeExec(SafeExecTests, unittest.TestCase):
"""Run SafeExecTests, with the real safe_exec."""
__test__ = True
def safe_exec(self, *args, **kwargs):
safe_exec.safe_exec(*args, **kwargs)
class TestNotSafeExec(SafeExecTests, unittest.TestCase):
"""Run SafeExecTests, with not_safe_exec."""
__test__ = True
def setUp(self):
# If safe_exec is actually an alias to not_safe_exec, then there's no
# point running these tests.
if safe_exec.UNSAFE: # pragma: no cover
raise SkipTest
def safe_exec(self, *args, **kwargs):
safe_exec.not_safe_exec(*args, **kwargs)
| GbalsaC/bitnamiP | venv/src/codejail/codejail/tests/test_safe_exec.py | Python | agpl-3.0 | 4,712 | 0 |
'''
Created on Dec 3, 2014
@author: gearsad
'''
import sys
from roverpylot import rover
from bot_update_t import bot_update_t
from bot_control_command_t import bot_control_command_t
import lcm
# Try to start OpenCV for video
try:
import cv
except:
cv = None
class LCMRover(rover.Rover):
'''
A rover using LCM for control and camera feed upstream
'''
def Initialize(self, botname):
'''
Init the rover and store the name
'''
self.__botname = botname
self.__lcm = lcm.LCM("udpm://239.255.76.67:7667?ttl=1")
self.__controlSubscription = self.__lcm.subscribe("ARNerve_Bot_Control_" + self.__botname, self.UpdateBotControlHandler)
self.__lightsOn = 0
self.__infraredOn = 0
def processVideo(self, jpegbytes):
#try:
camUpdate = bot_update_t()
camUpdate.name = self.__botname
camUpdate.numBytes_cameraFrameJpeg = len(jpegbytes)
camUpdate.cameraFrameJpeg = jpegbytes
# Get the battery health as well
battery = self.getBatteryPercentage()
camUpdate.batteryPercentage = battery
self.__lcm.publish("ARNerve_Bot_Update_" + self.__botname, camUpdate.encode())
#except:
# print "Exception", sys.exc_info()[0]
# pass
def Update(self):
'''
Update the LCM
'''
self.__lcm.handle()
def Disconnect(self):
self.lc.unsubscribe(self.__controlSubscription)
def UpdateBotControlHandler(self, channel, data):
'''
Get the updated bot parameters and send them to the bot.
'''
controlParams = bot_control_command_t.decode(data)
# Check if it is the right bot.
if self.__botname != controlParams.name:
return
self.setTreads(controlParams.botTreadVelLeft, controlParams.botTreadVelright)
print "Setting the treads to {0}, {1}".format(controlParams.botTreadVelLeft, controlParams.botTreadVelright)
if self.__lightsOn != controlParams.isLightsOn:
if controlParams.isLightsOn != 0:
self.turnLightsOn()
else:
self.turnLightsOff()
self.__lightsOn = controlParams.isLightsOn
if self.__infraredOn != controlParams.isInfraredOn:
if controlParams.isInfraredOn != 0:
self.turnInfraredOn()
else:
self.turnInfraredOff()
self.__infraredOn = controlParams.isInfraredOn
| GearsAD/semisorted_arnerve | arnerve_bot/arnerve_bot/LCMRover.py | Python | mit | 2,720 | 0.009926 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio-Previewer-ISPY
# Copyright (C) 2014 CERN
#
# Invenio-Previewer-ISPY is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio-Previewer-ISPY is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio-Previewer-ISPY; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio-Previewer-ISPY testsuite."""
| tpmccauley/invenio-previewer-ispy | tests/__init__.py | Python | gpl-2.0 | 871 | 0.001148 |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Stackdriver Logging API"""
import traceback
import google.cloud.logging.client
import six
class HTTPContext(object):
"""HTTPContext defines an object that captures the parameter for the
httpRequest part of Error Reporting API
:type method: str
:param method: The type of HTTP request, such as GET, POST, etc.
:type url: str
:param url: The URL of the request
:type user_agent: str
:param user_agent: The user agent information that is provided with the
request.
:type referrer: str
:param referrer: The referrer information that is provided with the
request.
:type response_status_code: int
:param response_status_code: The HTTP response status code for the request.
:type remote_ip: str
:param remote_ip: The IP address from which the request originated. This
can be IPv4, IPv6, or a token which is derived from
the IP address, depending on the data that has been
provided in the error report.
"""
def __init__(self, method=None, url=None,
user_agent=None, referrer=None,
response_status_code=None, remote_ip=None):
self.method = method
self.url = url
# intentionally camel case for mapping to JSON API expects
# pylint: disable=invalid-name
self.userAgent = user_agent
self.referrer = referrer
self.responseStatusCode = response_status_code
self.remoteIp = remote_ip
class Client(object):
"""Error Reporting client. Currently Error Reporting is done by creating
a Logging client.
:type project: str
:param project: the project which the client acts on behalf of. If not
passed falls back to the default inferred from the
environment.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: The OAuth2 Credentials to use for the connection
owned by this client. If not passed (and if no ``http``
object is passed), falls back to the default inferred
from the environment.
:type http: :class:`httplib2.Http` or class that defines ``request()``.
:param http: An optional HTTP object to make requests. If not passed, an
``http`` object is created that is bound to the
``credentials`` for the current object.
:type service: str
:param service: An identifier of the service, such as the name of the
executable, job, or Google App Engine service name. This
field is expected to have a low number of values that are
relatively stable over time, as opposed to version,
which can be changed whenever new code is deployed.
:type version: str
:param version: Represents the source code version that the developer
provided, which could represent a version label or a Git
SHA-1 hash, for example. If the developer did not provide
a version, the value is set to default.
:raises: :class:`ValueError` if the project is neither passed in nor
set in the environment.
"""
def __init__(self, project=None,
credentials=None,
http=None,
service=None,
version=None):
self.logging_client = google.cloud.logging.client.Client(
project, credentials, http)
self.service = service if service else self.DEFAULT_SERVICE
self.version = version
DEFAULT_SERVICE = 'python'
def _send_error_report(self, message,
report_location=None, http_context=None, user=None):
"""Makes the call to the Error Reporting API via the log stream.
This is the lower-level interface to build the payload, generally
users will use either report() or report_exception() to automatically
gather the parameters for this method.
Currently this method sends the Error Report by formatting a structured
log message according to
https://cloud.google.com/error-reporting/docs/formatting-error-messages
:type message: str
:param message: The stack trace that was reported or logged by the
service.
:type report_location: dict
:param report_location: The location in the source code where the
decision was made to report the error, usually the place
where it was logged. For a logged exception this would be the
source line where the exception is logged, usually close to
the place where it was caught.
This should be a Python dict that contains the keys 'filePath',
'lineNumber', and 'functionName'
:type http_context: :class`google.cloud.error_reporting.HTTPContext`
:param http_context: The HTTP request which was processed when the
error was triggered.
:type user: str
:param user: The user who caused or was affected by the crash. This can
be a user ID, an email address, or an arbitrary token that
uniquely identifies the user. When sending an error
report, leave this field empty if the user was not
logged in. In this case the Error Reporting system will
use other data, such as remote IP address,
to distinguish affected users.
"""
payload = {
'serviceContext': {
'service': self.service,
},
'message': '{0}'.format(message)
}
if self.version:
payload['serviceContext']['version'] = self.version
if report_location or http_context or user:
payload['context'] = {}
if report_location:
payload['context']['reportLocation'] = report_location
if http_context:
http_context_dict = http_context.__dict__
# strip out None values
payload['context']['httpContext'] = {
key: value for key, value in six.iteritems(http_context_dict)
if value is not None
}
if user:
payload['context']['user'] = user
logger = self.logging_client.logger('errors')
logger.log_struct(payload)
def report(self, message, http_context=None, user=None):
""" Reports a message to Stackdriver Error Reporting
https://cloud.google.com/error-reporting/docs/formatting-error-messages
:type message: str
:param message: A user-supplied message to report
:type http_context: :class`google.cloud.error_reporting.HTTPContext`
:param http_context: The HTTP request which was processed when the
error was triggered.
:type user: str
:param user: The user who caused or was affected by the crash. This
can be a user ID, an email address, or an arbitrary
token that uniquely identifies the user. When sending
an error report, leave this field empty if the user
was not logged in. In this case the Error Reporting
system will use other data, such as remote IP address,
to distinguish affected users.
Example:
.. code-block:: python
>>> client.report("Something went wrong!")
"""
stack = traceback.extract_stack()
last_call = stack[-2]
file_path = last_call[0]
line_number = last_call[1]
function_name = last_call[2]
report_location = {
'filePath': file_path,
'lineNumber': line_number,
'functionName': function_name
}
self._send_error_report(message,
http_context=http_context,
user=user,
report_location=report_location)
def report_exception(self, http_context=None, user=None):
""" Reports the details of the latest exceptions to Stackdriver Error
Reporting.
:type http_context: :class`google.cloud.error_reporting.HTTPContext`
:param http_context: The HTTP request which was processed when the
error was triggered.
:type user: str
:param user: The user who caused or was affected by the crash. This
can be a user ID, an email address, or an arbitrary
token that uniquely identifies the user. When sending an
error report, leave this field empty if the user was
not logged in. In this case the Error Reporting system
will use other data, such as remote IP address,
to distinguish affected users.
Example::
>>> try:
>>> raise NameError
>>> except Exception:
>>> client.report_exception()
"""
self._send_error_report(traceback.format_exc(),
http_context=http_context,
user=user)
| jgeewax/gcloud-python | error_reporting/google/cloud/error_reporting/client.py | Python | apache-2.0 | 10,193 | 0 |
#!/usr/bin/env python3
import urllib
import codecs
from bs4 import BeautifulSoup
from sys import argv
import re,time
class Translate:
def start(self):
self._get_html_sourse()
self._get_content("enc")
self._remove_tag()
self.print_result()
def _get_html_sourse(self):
word=argv[1] if len(argv)>1 else ''
url="http://dict.baidu.com/s?wd=%s&tn=dict" % word
self.htmlsourse=urllib.unicode(urllib.urlopen(url).read(),"gb2312","ignore").encode("utf-8","ignore")
def _get_content(self,div_id):
soup=BeautifulSoup("".join(self.htmlsourse), "lxml")
self.data=str(soup.find("div",{"id":div_id}))
def _remove_tag(self):
soup=BeautifulSoup(self.data, "lxml")
self.outtext=''.join([element for element in soup.recursiveChildGenerator() if isinstance(element,unicode)])
def print_result(self):
for item in range(1,10):
self.outtext=self.outtext.replace(str(item),"\n%s" % str(item))
self.outtext=self.outtext.replace(" ","\n")
print(self.outtext)
# from outofmemory.cn
if __name__ == "__main__":
Translate().Start() | yonglong009/pycharmNoteBook | lean_python3/dive_into_p3/tanslate.py | Python | gpl-3.0 | 1,160 | 0.025862 |
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import uuid
import fixtures
import mock
import oslo_config.fixture
from oslo_db.sqlalchemy import migration
from oslo_log import log
from six.moves import configparser
from six.moves import range
from testtools import matchers
from keystone.auth import controllers
from keystone.cmd import cli
from keystone.cmd.doctor import caching
from keystone.cmd.doctor import credential
from keystone.cmd.doctor import database as doc_database
from keystone.cmd.doctor import debug
from keystone.cmd.doctor import federation
from keystone.cmd.doctor import ldap
from keystone.cmd.doctor import security_compliance
from keystone.cmd.doctor import tokens
from keystone.cmd.doctor import tokens_fernet
from keystone.common import dependency
from keystone.common.sql import upgrades
import keystone.conf
from keystone import exception
from keystone.i18n import _
from keystone.identity.mapping_backends import mapping as identity_mapping
from keystone.tests import unit
from keystone.tests.unit import default_fixtures
from keystone.tests.unit.ksfixtures import database
from keystone.tests.unit.ksfixtures import ldapdb
CONF = keystone.conf.CONF
class CliTestCase(unit.SQLDriverOverrides, unit.TestCase):
def config_files(self):
config_files = super(CliTestCase, self).config_files()
config_files.append(unit.dirs.tests_conf('backend_sql.conf'))
return config_files
def test_token_flush(self):
self.useFixture(database.Database())
self.load_backends()
cli.TokenFlush.main()
class CliNoConfigTestCase(unit.BaseTestCase):
def setUp(self):
self.config_fixture = self.useFixture(oslo_config.fixture.Config(CONF))
self.config_fixture.register_cli_opt(cli.command_opt)
self.useFixture(fixtures.MockPatch(
'oslo_config.cfg.find_config_files', return_value=[]))
super(CliNoConfigTestCase, self).setUp()
# NOTE(crinkle): the command call doesn't have to actually work,
# that's what the other unit tests are for. So just mock it out.
class FakeConfCommand(object):
def __init__(self):
self.cmd_class = mock.Mock()
self.useFixture(fixtures.MockPatchObject(
CONF, 'command', FakeConfCommand()))
self.logging = self.useFixture(fixtures.FakeLogger(level=log.WARN))
def test_cli(self):
expected_msg = 'Config file not found, using default configs.'
cli.main(argv=['keystone-manage', 'db_sync'])
self.assertThat(self.logging.output, matchers.Contains(expected_msg))
class CliBootStrapTestCase(unit.SQLDriverOverrides, unit.TestCase):
def setUp(self):
self.useFixture(database.Database())
super(CliBootStrapTestCase, self).setUp()
def config_files(self):
self.config_fixture.register_cli_opt(cli.command_opt)
config_files = super(CliBootStrapTestCase, self).config_files()
config_files.append(unit.dirs.tests_conf('backend_sql.conf'))
return config_files
def config(self, config_files):
CONF(args=['bootstrap', '--bootstrap-password', uuid.uuid4().hex],
project='keystone',
default_config_files=config_files)
def test_bootstrap(self):
bootstrap = cli.BootStrap()
self._do_test_bootstrap(bootstrap)
def _do_test_bootstrap(self, bootstrap):
bootstrap.do_bootstrap()
project = bootstrap.resource_manager.get_project_by_name(
bootstrap.project_name,
'default')
user = bootstrap.identity_manager.get_user_by_name(
bootstrap.username,
'default')
role = bootstrap.role_manager.get_role(bootstrap.role_id)
role_list = (
bootstrap.assignment_manager.get_roles_for_user_and_project(
user['id'],
project['id']))
self.assertIs(1, len(role_list))
self.assertEqual(role_list[0], role['id'])
# NOTE(morganfainberg): Pass an empty context, it isn't used by
# `authenticate` method.
bootstrap.identity_manager.authenticate(
self.make_request(),
user['id'],
bootstrap.password)
if bootstrap.region_id:
region = bootstrap.catalog_manager.get_region(bootstrap.region_id)
self.assertEqual(self.region_id, region['id'])
if bootstrap.service_id:
svc = bootstrap.catalog_manager.get_service(bootstrap.service_id)
self.assertEqual(self.service_name, svc['name'])
self.assertEqual(set(['admin', 'public', 'internal']),
set(bootstrap.endpoints))
urls = {'public': self.public_url,
'internal': self.internal_url,
'admin': self.admin_url}
for interface, url in urls.items():
endpoint_id = bootstrap.endpoints[interface]
endpoint = bootstrap.catalog_manager.get_endpoint(endpoint_id)
self.assertEqual(self.region_id, endpoint['region_id'])
self.assertEqual(url, endpoint['url'])
self.assertEqual(svc['id'], endpoint['service_id'])
self.assertEqual(interface, endpoint['interface'])
def test_bootstrap_is_idempotent_when_password_does_not_change(self):
# NOTE(morganfainberg): Ensure we can run bootstrap with the same
# configuration multiple times without erroring.
bootstrap = cli.BootStrap()
self._do_test_bootstrap(bootstrap)
v3_token_controller = controllers.Auth()
v3_password_data = {
'identity': {
"methods": ["password"],
"password": {
"user": {
"name": bootstrap.username,
"password": bootstrap.password,
"domain": {
"id": CONF.identity.default_domain_id
}
}
}
}
}
auth_response = v3_token_controller.authenticate_for_token(
self.make_request(), v3_password_data)
token = auth_response.headers['X-Subject-Token']
self._do_test_bootstrap(bootstrap)
# build validation request
request = self.make_request(is_admin=True)
request.context_dict['subject_token_id'] = token
# Make sure the token we authenticate for is still valid.
v3_token_controller.validate_token(request)
def test_bootstrap_is_not_idempotent_when_password_does_change(self):
# NOTE(lbragstad): Ensure bootstrap isn't idempotent when run with
# different arguments or configuration values.
bootstrap = cli.BootStrap()
self._do_test_bootstrap(bootstrap)
v3_token_controller = controllers.Auth()
v3_password_data = {
'identity': {
"methods": ["password"],
"password": {
"user": {
"name": bootstrap.username,
"password": bootstrap.password,
"domain": {
"id": CONF.identity.default_domain_id
}
}
}
}
}
auth_response = v3_token_controller.authenticate_for_token(
self.make_request(), v3_password_data)
token = auth_response.headers['X-Subject-Token']
os.environ['OS_BOOTSTRAP_PASSWORD'] = uuid.uuid4().hex
self._do_test_bootstrap(bootstrap)
# build validation request
request = self.make_request(is_admin=True)
request.context_dict['subject_token_id'] = token
# Since the user account was recovered with a different password, we
# shouldn't be able to validate this token. Bootstrap should have
# persisted a revocation event because the user's password was updated.
# Since this token was obtained using the original password, it should
# now be invalid.
self.assertRaises(
exception.TokenNotFound,
v3_token_controller.validate_token,
request
)
def test_bootstrap_recovers_user(self):
bootstrap = cli.BootStrap()
self._do_test_bootstrap(bootstrap)
# Completely lock the user out.
user_id = bootstrap.identity_manager.get_user_by_name(
bootstrap.username,
'default')['id']
bootstrap.identity_manager.update_user(
user_id,
{'enabled': False,
'password': uuid.uuid4().hex})
# The second bootstrap run will recover the account.
self._do_test_bootstrap(bootstrap)
# Sanity check that the original password works again.
bootstrap.identity_manager.authenticate(
self.make_request(),
user_id,
bootstrap.password)
def test_bootstrap_creates_default_role(self):
bootstrap = cli.BootStrap()
try:
role = bootstrap.role_manager.get_role(CONF.member_role_id)
self.fail('Member Role is created and should not be.')
except exception.RoleNotFound:
pass
self._do_test_bootstrap(bootstrap)
role = bootstrap.role_manager.get_role(CONF.member_role_id)
self.assertEqual(role['name'], CONF.member_role_name)
self.assertEqual(role['id'], CONF.member_role_id)
class CliBootStrapTestCaseWithEnvironment(CliBootStrapTestCase):
def config(self, config_files):
CONF(args=['bootstrap'], project='keystone',
default_config_files=config_files)
def setUp(self):
super(CliBootStrapTestCaseWithEnvironment, self).setUp()
self.password = uuid.uuid4().hex
self.username = uuid.uuid4().hex
self.project_name = uuid.uuid4().hex
self.role_name = uuid.uuid4().hex
self.service_name = uuid.uuid4().hex
self.public_url = uuid.uuid4().hex
self.internal_url = uuid.uuid4().hex
self.admin_url = uuid.uuid4().hex
self.region_id = uuid.uuid4().hex
self.default_domain = {
'id': CONF.identity.default_domain_id,
'name': 'Default',
}
self.useFixture(
fixtures.EnvironmentVariable('OS_BOOTSTRAP_PASSWORD',
newvalue=self.password))
self.useFixture(
fixtures.EnvironmentVariable('OS_BOOTSTRAP_USERNAME',
newvalue=self.username))
self.useFixture(
fixtures.EnvironmentVariable('OS_BOOTSTRAP_PROJECT_NAME',
newvalue=self.project_name))
self.useFixture(
fixtures.EnvironmentVariable('OS_BOOTSTRAP_ROLE_NAME',
newvalue=self.role_name))
self.useFixture(
fixtures.EnvironmentVariable('OS_BOOTSTRAP_SERVICE_NAME',
newvalue=self.service_name))
self.useFixture(
fixtures.EnvironmentVariable('OS_BOOTSTRAP_PUBLIC_URL',
newvalue=self.public_url))
self.useFixture(
fixtures.EnvironmentVariable('OS_BOOTSTRAP_INTERNAL_URL',
newvalue=self.internal_url))
self.useFixture(
fixtures.EnvironmentVariable('OS_BOOTSTRAP_ADMIN_URL',
newvalue=self.admin_url))
self.useFixture(
fixtures.EnvironmentVariable('OS_BOOTSTRAP_REGION_ID',
newvalue=self.region_id))
def test_assignment_created_with_user_exists(self):
# test assignment can be created if user already exists.
bootstrap = cli.BootStrap()
bootstrap.resource_manager.create_domain(self.default_domain['id'],
self.default_domain)
user_ref = unit.new_user_ref(self.default_domain['id'],
name=self.username,
password=self.password)
bootstrap.identity_manager.create_user(user_ref)
self._do_test_bootstrap(bootstrap)
def test_assignment_created_with_project_exists(self):
# test assignment can be created if project already exists.
bootstrap = cli.BootStrap()
bootstrap.resource_manager.create_domain(self.default_domain['id'],
self.default_domain)
project_ref = unit.new_project_ref(self.default_domain['id'],
name=self.project_name)
bootstrap.resource_manager.create_project(project_ref['id'],
project_ref)
self._do_test_bootstrap(bootstrap)
def test_assignment_created_with_role_exists(self):
# test assignment can be created if role already exists.
bootstrap = cli.BootStrap()
bootstrap.resource_manager.create_domain(self.default_domain['id'],
self.default_domain)
role = unit.new_role_ref(name=self.role_name)
bootstrap.role_manager.create_role(role['id'], role)
self._do_test_bootstrap(bootstrap)
def test_assignment_created_with_region_exists(self):
# test assignment can be created if region already exists.
bootstrap = cli.BootStrap()
bootstrap.resource_manager.create_domain(self.default_domain['id'],
self.default_domain)
region = unit.new_region_ref(id=self.region_id)
bootstrap.catalog_manager.create_region(region)
self._do_test_bootstrap(bootstrap)
def test_endpoints_created_with_service_exists(self):
# test assignment can be created if service already exists.
bootstrap = cli.BootStrap()
bootstrap.resource_manager.create_domain(self.default_domain['id'],
self.default_domain)
service = unit.new_service_ref(name=self.service_name)
bootstrap.catalog_manager.create_service(service['id'], service)
self._do_test_bootstrap(bootstrap)
def test_endpoints_created_with_endpoint_exists(self):
# test assignment can be created if endpoint already exists.
bootstrap = cli.BootStrap()
bootstrap.resource_manager.create_domain(self.default_domain['id'],
self.default_domain)
service = unit.new_service_ref(name=self.service_name)
bootstrap.catalog_manager.create_service(service['id'], service)
region = unit.new_region_ref(id=self.region_id)
bootstrap.catalog_manager.create_region(region)
endpoint = unit.new_endpoint_ref(interface='public',
service_id=service['id'],
url=self.public_url,
region_id=self.region_id)
bootstrap.catalog_manager.create_endpoint(endpoint['id'], endpoint)
self._do_test_bootstrap(bootstrap)
class CliDomainConfigAllTestCase(unit.SQLDriverOverrides, unit.TestCase):
def setUp(self):
self.useFixture(database.Database())
super(CliDomainConfigAllTestCase, self).setUp()
self.load_backends()
self.config_fixture.config(
group='identity',
domain_config_dir=unit.TESTCONF + '/domain_configs_multi_ldap')
self.domain_count = 3
self.setup_initial_domains()
self.logging = self.useFixture(
fixtures.FakeLogger(level=logging.INFO))
def config_files(self):
self.config_fixture.register_cli_opt(cli.command_opt)
config_files = super(CliDomainConfigAllTestCase, self).config_files()
config_files.append(unit.dirs.tests_conf('backend_sql.conf'))
return config_files
def cleanup_domains(self):
for domain in self.domains:
if domain == 'domain_default':
# Not allowed to delete the default domain, but should at least
# delete any domain-specific config for it.
self.domain_config_api.delete_config(
CONF.identity.default_domain_id)
continue
this_domain = self.domains[domain]
this_domain['enabled'] = False
self.resource_api.update_domain(this_domain['id'], this_domain)
self.resource_api.delete_domain(this_domain['id'])
self.domains = {}
def config(self, config_files):
CONF(args=['domain_config_upload', '--all'], project='keystone',
default_config_files=config_files)
def setup_initial_domains(self):
def create_domain(domain):
return self.resource_api.create_domain(domain['id'], domain)
self.domains = {}
self.addCleanup(self.cleanup_domains)
for x in range(1, self.domain_count):
domain = 'domain%s' % x
self.domains[domain] = create_domain(
{'id': uuid.uuid4().hex, 'name': domain})
self.default_domain = unit.new_domain_ref(
description=u'The default domain',
id=CONF.identity.default_domain_id,
name=u'Default')
self.domains['domain_default'] = create_domain(self.default_domain)
def test_config_upload(self):
# The values below are the same as in the domain_configs_multi_ldap
# directory of test config_files.
default_config = {
'ldap': {'url': 'fake://memory',
'user': 'cn=Admin',
'password': 'password',
'suffix': 'cn=example,cn=com'},
'identity': {'driver': 'ldap'}
}
domain1_config = {
'ldap': {'url': 'fake://memory1',
'user': 'cn=Admin',
'password': 'password',
'suffix': 'cn=example,cn=com'},
'identity': {'driver': 'ldap',
'list_limit': '101'}
}
domain2_config = {
'ldap': {'url': 'fake://memory',
'user': 'cn=Admin',
'password': 'password',
'suffix': 'cn=myroot,cn=com',
'group_tree_dn': 'ou=UserGroups,dc=myroot,dc=org',
'user_tree_dn': 'ou=Users,dc=myroot,dc=org'},
'identity': {'driver': 'ldap'}
}
# Clear backend dependencies, since cli loads these manually
dependency.reset()
cli.DomainConfigUpload.main()
res = self.domain_config_api.get_config_with_sensitive_info(
CONF.identity.default_domain_id)
self.assertEqual(default_config, res)
res = self.domain_config_api.get_config_with_sensitive_info(
self.domains['domain1']['id'])
self.assertEqual(domain1_config, res)
res = self.domain_config_api.get_config_with_sensitive_info(
self.domains['domain2']['id'])
self.assertEqual(domain2_config, res)
class CliDomainConfigSingleDomainTestCase(CliDomainConfigAllTestCase):
def config(self, config_files):
CONF(args=['domain_config_upload', '--domain-name', 'Default'],
project='keystone', default_config_files=config_files)
def test_config_upload(self):
# The values below are the same as in the domain_configs_multi_ldap
# directory of test config_files.
default_config = {
'ldap': {'url': 'fake://memory',
'user': 'cn=Admin',
'password': 'password',
'suffix': 'cn=example,cn=com'},
'identity': {'driver': 'ldap'}
}
# Clear backend dependencies, since cli loads these manually
dependency.reset()
cli.DomainConfigUpload.main()
res = self.domain_config_api.get_config_with_sensitive_info(
CONF.identity.default_domain_id)
self.assertEqual(default_config, res)
res = self.domain_config_api.get_config_with_sensitive_info(
self.domains['domain1']['id'])
self.assertEqual({}, res)
res = self.domain_config_api.get_config_with_sensitive_info(
self.domains['domain2']['id'])
self.assertEqual({}, res)
def test_no_overwrite_config(self):
# Create a config for the default domain
default_config = {
'ldap': {'url': uuid.uuid4().hex},
'identity': {'driver': 'ldap'}
}
self.domain_config_api.create_config(
CONF.identity.default_domain_id, default_config)
# Now try and upload the settings in the configuration file for the
# default domain
dependency.reset()
with mock.patch('six.moves.builtins.print') as mock_print:
self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main)
file_name = ('keystone.%s.conf' % self.default_domain['name'])
error_msg = _(
'Domain: %(domain)s already has a configuration defined - '
'ignoring file: %(file)s.') % {
'domain': self.default_domain['name'],
'file': os.path.join(CONF.identity.domain_config_dir,
file_name)}
mock_print.assert_has_calls([mock.call(error_msg)])
res = self.domain_config_api.get_config(
CONF.identity.default_domain_id)
# The initial config should not have been overwritten
self.assertEqual(default_config, res)
class CliDomainConfigNoOptionsTestCase(CliDomainConfigAllTestCase):
def config(self, config_files):
CONF(args=['domain_config_upload'],
project='keystone', default_config_files=config_files)
def test_config_upload(self):
dependency.reset()
with mock.patch('six.moves.builtins.print') as mock_print:
self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main)
mock_print.assert_has_calls(
[mock.call(
_('At least one option must be provided, use either '
'--all or --domain-name'))])
class CliDomainConfigTooManyOptionsTestCase(CliDomainConfigAllTestCase):
def config(self, config_files):
CONF(args=['domain_config_upload', '--all', '--domain-name',
'Default'],
project='keystone', default_config_files=config_files)
def test_config_upload(self):
dependency.reset()
with mock.patch('six.moves.builtins.print') as mock_print:
self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main)
mock_print.assert_has_calls(
[mock.call(_('The --all option cannot be used with '
'the --domain-name option'))])
class CliDomainConfigInvalidDomainTestCase(CliDomainConfigAllTestCase):
def config(self, config_files):
self.invalid_domain_name = uuid.uuid4().hex
CONF(args=['domain_config_upload', '--domain-name',
self.invalid_domain_name],
project='keystone', default_config_files=config_files)
def test_config_upload(self):
dependency.reset()
with mock.patch('six.moves.builtins.print') as mock_print:
self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main)
file_name = 'keystone.%s.conf' % self.invalid_domain_name
error_msg = (_(
'Invalid domain name: %(domain)s found in config file name: '
'%(file)s - ignoring this file.') % {
'domain': self.invalid_domain_name,
'file': os.path.join(CONF.identity.domain_config_dir,
file_name)})
mock_print.assert_has_calls([mock.call(error_msg)])
class TestDomainConfigFinder(unit.BaseTestCase):
def setUp(self):
super(TestDomainConfigFinder, self).setUp()
self.logging = self.useFixture(fixtures.LoggerFixture())
@mock.patch('os.walk')
def test_finder_ignores_files(self, mock_walk):
mock_walk.return_value = [
['.', [], ['file.txt', 'keystone.conf', 'keystone.domain0.conf']],
]
domain_configs = list(cli._domain_config_finder('.'))
expected_domain_configs = [('./keystone.domain0.conf', 'domain0')]
self.assertThat(domain_configs,
matchers.Equals(expected_domain_configs))
expected_msg_template = ('Ignoring file (%s) while scanning '
'domain config directory')
self.assertThat(
self.logging.output,
matchers.Contains(expected_msg_template % 'file.txt'))
self.assertThat(
self.logging.output,
matchers.Contains(expected_msg_template % 'keystone.conf'))
class CliDBSyncTestCase(unit.BaseTestCase):
class FakeConfCommand(object):
def __init__(self, parent):
self.extension = False
self.check = parent.command_check
self.expand = parent.command_expand
self.migrate = parent.command_migrate
self.contract = parent.command_contract
self.version = None
def setUp(self):
super(CliDBSyncTestCase, self).setUp()
self.config_fixture = self.useFixture(oslo_config.fixture.Config(CONF))
self.config_fixture.register_cli_opt(cli.command_opt)
upgrades.offline_sync_database_to_version = mock.Mock()
upgrades.expand_schema = mock.Mock()
upgrades.migrate_data = mock.Mock()
upgrades.contract_schema = mock.Mock()
self.command_check = False
self.command_expand = False
self.command_migrate = False
self.command_contract = False
def _assert_correct_call(self, mocked_function):
for func in [upgrades.offline_sync_database_to_version,
upgrades.expand_schema,
upgrades.migrate_data,
upgrades.contract_schema]:
if func == mocked_function:
self.assertTrue(func.called)
else:
self.assertFalse(func.called)
def test_db_sync(self):
self.useFixture(fixtures.MockPatchObject(
CONF, 'command', self.FakeConfCommand(self)))
cli.DbSync.main()
self._assert_correct_call(
upgrades.offline_sync_database_to_version)
def test_db_sync_expand(self):
self.command_expand = True
self.useFixture(fixtures.MockPatchObject(
CONF, 'command', self.FakeConfCommand(self)))
cli.DbSync.main()
self._assert_correct_call(upgrades.expand_schema)
def test_db_sync_migrate(self):
self.command_migrate = True
self.useFixture(fixtures.MockPatchObject(
CONF, 'command', self.FakeConfCommand(self)))
cli.DbSync.main()
self._assert_correct_call(upgrades.migrate_data)
def test_db_sync_contract(self):
self.command_contract = True
self.useFixture(fixtures.MockPatchObject(
CONF, 'command', self.FakeConfCommand(self)))
cli.DbSync.main()
self._assert_correct_call(upgrades.contract_schema)
@mock.patch('keystone.cmd.cli.upgrades.get_db_version')
def test_db_sync_check_when_database_is_empty(self, mocked_get_db_version):
e = migration.exception.DbMigrationError("Invalid version")
mocked_get_db_version.side_effect = e
checker = cli.DbSync()
log_info = self.useFixture(fixtures.FakeLogger(level=log.INFO))
status = checker.check_db_sync_status()
self.assertIn("not currently under version control", log_info.output)
self.assertEqual(status, 2)
class TestMappingPopulate(unit.SQLDriverOverrides, unit.TestCase):
def setUp(self):
sqldb = self.useFixture(database.Database())
super(TestMappingPopulate, self).setUp()
self.ldapdb = self.useFixture(ldapdb.LDAPDatabase())
self.ldapdb.clear()
self.load_backends()
sqldb.recreate()
self.load_fixtures(default_fixtures)
def config_files(self):
self.config_fixture.register_cli_opt(cli.command_opt)
config_files = super(TestMappingPopulate, self).config_files()
config_files.append(unit.dirs.tests_conf('backend_ldap_sql.conf'))
return config_files
def config_overrides(self):
super(TestMappingPopulate, self).config_overrides()
self.config_fixture.config(group='identity', driver='ldap')
self.config_fixture.config(group='identity_mapping',
backward_compatible_ids=False)
def config(self, config_files):
CONF(args=['mapping_populate', '--domain-name', 'Default'],
project='keystone',
default_config_files=config_files)
def test_mapping_populate(self):
# mapping_populate should create id mappings. Test plan:
# 0. Purge mappings
# 1. Fetch user list directly via backend. It will not create any
# mappings because it bypasses identity manager
# 2. Verify that users have no public_id yet
# 3. Execute mapping_populate. It should create id mappings
# 4. For the same users verify that they have public_id now
purge_filter = {}
self.id_mapping_api.purge_mappings(purge_filter)
hints = None
users = self.identity_api.driver.list_users(hints)
for user in users:
local_entity = {
'domain_id': CONF.identity.default_domain_id,
'local_id': user['id'],
'entity_type': identity_mapping.EntityType.USER}
self.assertIsNone(self.id_mapping_api.get_public_id(local_entity))
dependency.reset() # backends are loaded again in the command handler
cli.MappingPopulate.main()
for user in users:
local_entity = {
'domain_id': CONF.identity.default_domain_id,
'local_id': user['id'],
'entity_type': identity_mapping.EntityType.USER}
self.assertIsNotNone(
self.id_mapping_api.get_public_id(local_entity))
def test_bad_domain_name(self):
CONF(args=['mapping_populate', '--domain-name', uuid.uuid4().hex],
project='keystone')
dependency.reset() # backends are loaded again in the command handler
# NOTE: assertEqual is used on purpose. assertFalse passes with None.
self.assertEqual(False, cli.MappingPopulate.main())
class CliDomainConfigUploadNothing(unit.BaseTestCase):
def setUp(self):
super(CliDomainConfigUploadNothing, self).setUp()
config_fixture = self.useFixture(oslo_config.fixture.Config(CONF))
config_fixture.register_cli_opt(cli.command_opt)
# NOTE(dstanek): since this is not testing any database
# functionality there is no need to go through the motions and
# setup a test database.
def fake_load_backends(self):
self.resource_manager = mock.Mock()
self.useFixture(fixtures.MockPatchObject(
cli.DomainConfigUploadFiles, 'load_backends', fake_load_backends))
tempdir = self.useFixture(fixtures.TempDir())
config_fixture.config(group='identity', domain_config_dir=tempdir.path)
self.logging = self.useFixture(
fixtures.FakeLogger(level=logging.DEBUG))
def test_uploading_all_from_an_empty_directory(self):
CONF(args=['domain_config_upload', '--all'], project='keystone',
default_config_files=[])
cli.DomainConfigUpload.main()
expected_msg = ('No domain configs uploaded from %r' %
CONF.identity.domain_config_dir)
self.assertThat(self.logging.output,
matchers.Contains(expected_msg))
class CachingDoctorTests(unit.TestCase):
def test_symptom_caching_disabled(self):
# Symptom Detected: Caching disabled
self.config_fixture.config(group='cache', enabled=False)
self.assertTrue(caching.symptom_caching_disabled())
# No Symptom Detected: Caching is enabled
self.config_fixture.config(group='cache', enabled=True)
self.assertFalse(caching.symptom_caching_disabled())
def test_caching_symptom_caching_enabled_without_a_backend(self):
# Success Case: Caching enabled and backend configured
self.config_fixture.config(group='cache', enabled=True)
self.config_fixture.config(group='cache', backend='dogpile.cache.null')
self.assertTrue(caching.symptom_caching_enabled_without_a_backend())
# Failure Case 1: Caching disabled and backend not configured
self.config_fixture.config(group='cache', enabled=False)
self.config_fixture.config(group='cache', backend='dogpile.cache.null')
self.assertFalse(caching.symptom_caching_enabled_without_a_backend())
# Failure Case 2: Caching disabled and backend configured
self.config_fixture.config(group='cache', enabled=False)
self.config_fixture.config(group='cache',
backend='dogpile.cache.memory')
self.assertFalse(caching.symptom_caching_enabled_without_a_backend())
# Failure Case 3: Caching enabled and backend configured
self.config_fixture.config(group='cache', enabled=True)
self.config_fixture.config(group='cache',
backend='dogpile.cache.memory')
self.assertFalse(caching.symptom_caching_enabled_without_a_backend())
class CredentialDoctorTests(unit.TestCase):
def test_credential_and_fernet_key_repositories_match(self):
# Symptom Detected: Key repository paths are not unique
directory = self.useFixture(fixtures.TempDir()).path
self.config_fixture.config(group='credential',
key_repository=directory)
self.config_fixture.config(group='fernet_tokens',
key_repository=directory)
self.assertTrue(credential.symptom_unique_key_repositories())
def test_credential_and_fernet_key_repositories_are_unique(self):
# No Symptom Detected: Key repository paths are unique
self.config_fixture.config(group='credential',
key_repository='/etc/keystone/cred-repo')
self.config_fixture.config(group='fernet_tokens',
key_repository='/etc/keystone/fernet-repo')
self.assertFalse(credential.symptom_unique_key_repositories())
@mock.patch('keystone.cmd.doctor.credential.utils')
def test_usability_of_cred_fernet_key_repo_raised(self, mock_utils):
# Symptom Detected: credential fernet key repository is world readable
self.config_fixture.config(group='credential', provider='fernet')
mock_utils.FernetUtils().validate_key_repository.return_value = False
self.assertTrue(
credential.symptom_usability_of_credential_fernet_key_repository())
@mock.patch('keystone.cmd.doctor.credential.utils')
def test_usability_of_cred_fernet_key_repo_not_raised(self, mock_utils):
# No Symptom Detected: Custom driver is used
self.config_fixture.config(group='credential', provider='my-driver')
mock_utils.FernetUtils().validate_key_repository.return_value = True
self.assertFalse(
credential.symptom_usability_of_credential_fernet_key_repository())
# No Symptom Detected: key repository is not world readable
self.config_fixture.config(group='credential', provider='fernet')
mock_utils.FernetUtils().validate_key_repository.return_value = True
self.assertFalse(
credential.symptom_usability_of_credential_fernet_key_repository())
@mock.patch('keystone.cmd.doctor.credential.utils')
def test_keys_in_credential_fernet_key_repository_raised(self, mock_utils):
# Symptom Detected: Key repo is empty
self.config_fixture.config(group='credential', provider='fernet')
mock_utils.FernetUtils().load_keys.return_value = False
self.assertTrue(
credential.symptom_keys_in_credential_fernet_key_repository())
@mock.patch('keystone.cmd.doctor.credential.utils')
def test_keys_in_credential_fernet_key_repository_not_raised(
self, mock_utils):
# No Symptom Detected: Custom driver is used
self.config_fixture.config(group='credential', provider='my-driver')
mock_utils.FernetUtils().load_keys.return_value = True
self.assertFalse(
credential.symptom_keys_in_credential_fernet_key_repository())
# No Symptom Detected: Key repo is not empty, fernet is current driver
self.config_fixture.config(group='credential', provider='fernet')
mock_utils.FernetUtils().load_keys.return_value = True
self.assertFalse(
credential.symptom_keys_in_credential_fernet_key_repository())
class DatabaseDoctorTests(unit.TestCase):
def test_symptom_is_raised_if_database_connection_is_SQLite(self):
# Symptom Detected: Database connection is sqlite
self.config_fixture.config(
group='database',
connection='sqlite:///mydb')
self.assertTrue(
doc_database.symptom_database_connection_is_not_SQLite())
# No Symptom Detected: Database connection is MySQL
self.config_fixture.config(
group='database',
connection='mysql+mysqlconnector://admin:secret@localhost/mydb')
self.assertFalse(
doc_database.symptom_database_connection_is_not_SQLite())
class DebugDoctorTests(unit.TestCase):
def test_symptom_debug_mode_is_enabled(self):
# Symptom Detected: Debug mode is enabled
self.config_fixture.config(debug=True)
self.assertTrue(debug.symptom_debug_mode_is_enabled())
# No Symptom Detected: Debug mode is disabled
self.config_fixture.config(debug=False)
self.assertFalse(debug.symptom_debug_mode_is_enabled())
class FederationDoctorTests(unit.TestCase):
def test_symptom_comma_in_SAML_public_certificate_path(self):
# Symptom Detected: There is a comma in path to public cert file
self.config_fixture.config(group='saml', certfile='file,cert.pem')
self.assertTrue(
federation.symptom_comma_in_SAML_public_certificate_path())
# No Symptom Detected: There is no comma in the path
self.config_fixture.config(group='saml', certfile='signing_cert.pem')
self.assertFalse(
federation.symptom_comma_in_SAML_public_certificate_path())
def test_symptom_comma_in_SAML_private_key_file_path(self):
# Symptom Detected: There is a comma in path to private key file
self.config_fixture.config(group='saml', keyfile='file,key.pem')
self.assertTrue(
federation.symptom_comma_in_SAML_private_key_file_path())
# No Symptom Detected: There is no comma in the path
self.config_fixture.config(group='saml', keyfile='signing_key.pem')
self.assertFalse(
federation.symptom_comma_in_SAML_private_key_file_path())
class LdapDoctorTests(unit.TestCase):
def test_user_enabled_emulation_dn_ignored_raised(self):
# Symptom when user_enabled_emulation_dn is being ignored because the
# user did not enable the user_enabled_emulation
self.config_fixture.config(group='ldap', user_enabled_emulation=False)
self.config_fixture.config(
group='ldap',
user_enabled_emulation_dn='cn=enabled_users,dc=example,dc=com')
self.assertTrue(
ldap.symptom_LDAP_user_enabled_emulation_dn_ignored())
def test_user_enabled_emulation_dn_ignored_not_raised(self):
# No symptom when configuration set properly
self.config_fixture.config(group='ldap', user_enabled_emulation=True)
self.config_fixture.config(
group='ldap',
user_enabled_emulation_dn='cn=enabled_users,dc=example,dc=com')
self.assertFalse(
ldap.symptom_LDAP_user_enabled_emulation_dn_ignored())
# No symptom when both configurations disabled
self.config_fixture.config(group='ldap', user_enabled_emulation=False)
self.config_fixture.config(group='ldap',
user_enabled_emulation_dn=None)
self.assertFalse(
ldap.symptom_LDAP_user_enabled_emulation_dn_ignored())
def test_user_enabled_emulation_use_group_config_ignored_raised(self):
# Symptom when user enabled emulation isn't enabled but group_config is
# enabled
self.config_fixture.config(group='ldap', user_enabled_emulation=False)
self.config_fixture.config(
group='ldap',
user_enabled_emulation_use_group_config=True)
self.assertTrue(
ldap.
symptom_LDAP_user_enabled_emulation_use_group_config_ignored())
def test_user_enabled_emulation_use_group_config_ignored_not_raised(self):
# No symptom when configuration deactivated
self.config_fixture.config(group='ldap', user_enabled_emulation=False)
self.config_fixture.config(
group='ldap',
user_enabled_emulation_use_group_config=False)
self.assertFalse(
ldap.
symptom_LDAP_user_enabled_emulation_use_group_config_ignored())
# No symptom when configurations set properly
self.config_fixture.config(group='ldap', user_enabled_emulation=True)
self.config_fixture.config(
group='ldap',
user_enabled_emulation_use_group_config=True)
self.assertFalse(
ldap.
symptom_LDAP_user_enabled_emulation_use_group_config_ignored())
def test_group_members_are_ids_disabled_raised(self):
# Symptom when objectclass is set to posixGroup but members_are_ids are
# not enabled
self.config_fixture.config(group='ldap',
group_objectclass='posixGroup')
self.config_fixture.config(group='ldap',
group_members_are_ids=False)
self.assertTrue(ldap.symptom_LDAP_group_members_are_ids_disabled())
def test_group_members_are_ids_disabled_not_raised(self):
# No symptom when the configurations are set properly
self.config_fixture.config(group='ldap',
group_objectclass='posixGroup')
self.config_fixture.config(group='ldap',
group_members_are_ids=True)
self.assertFalse(ldap.symptom_LDAP_group_members_are_ids_disabled())
# No symptom when configuration deactivated
self.config_fixture.config(group='ldap',
group_objectclass='groupOfNames')
self.config_fixture.config(group='ldap',
group_members_are_ids=False)
self.assertFalse(ldap.symptom_LDAP_group_members_are_ids_disabled())
@mock.patch('os.listdir')
@mock.patch('os.path.isdir')
def test_file_based_domain_specific_configs_raised(self, mocked_isdir,
mocked_listdir):
self.config_fixture.config(
group='identity',
domain_specific_drivers_enabled=True)
self.config_fixture.config(
group='identity',
domain_configurations_from_database=False)
# Symptom if there is no existing directory
mocked_isdir.return_value = False
self.assertTrue(ldap.symptom_LDAP_file_based_domain_specific_configs())
# Symptom if there is an invalid filename inside the domain directory
mocked_isdir.return_value = True
mocked_listdir.return_value = ['openstack.domains.conf']
self.assertTrue(ldap.symptom_LDAP_file_based_domain_specific_configs())
@mock.patch('os.listdir')
@mock.patch('os.path.isdir')
def test_file_based_domain_specific_configs_not_raised(self, mocked_isdir,
mocked_listdir):
# No symptom if both configurations deactivated
self.config_fixture.config(
group='identity',
domain_specific_drivers_enabled=False)
self.config_fixture.config(
group='identity',
domain_configurations_from_database=False)
self.assertFalse(
ldap.symptom_LDAP_file_based_domain_specific_configs())
# No symptom if directory exists with no invalid filenames
self.config_fixture.config(
group='identity',
domain_specific_drivers_enabled=True)
self.config_fixture.config(
group='identity',
domain_configurations_from_database=False)
mocked_isdir.return_value = True
mocked_listdir.return_value = ['keystone.domains.conf']
self.assertFalse(
ldap.symptom_LDAP_file_based_domain_specific_configs())
@mock.patch('os.listdir')
@mock.patch('os.path.isdir')
@mock.patch('keystone.cmd.doctor.ldap.configparser.ConfigParser')
def test_file_based_domain_specific_configs_formatted_correctly_raised(
self, mocked_parser, mocked_isdir, mocked_listdir):
symptom = ('symptom_LDAP_file_based_domain_specific_configs'
'_formatted_correctly')
# Symptom Detected: Ldap domain specific configuration files are not
# formatted correctly
self.config_fixture.config(
group='identity',
domain_specific_drivers_enabled=True)
self.config_fixture.config(
group='identity',
domain_configurations_from_database=False)
mocked_isdir.return_value = True
mocked_listdir.return_value = ['keystone.domains.conf']
mock_instance = mock.MagicMock()
mock_instance.read.side_effect = configparser.Error('No Section')
mocked_parser.return_value = mock_instance
self.assertTrue(getattr(ldap, symptom)())
@mock.patch('os.listdir')
@mock.patch('os.path.isdir')
def test_file_based_domain_specific_configs_formatted_correctly_not_raised(
self, mocked_isdir, mocked_listdir):
symptom = ('symptom_LDAP_file_based_domain_specific_configs'
'_formatted_correctly')
# No Symptom Detected: Domain_specific drivers is not enabled
self.config_fixture.config(
group='identity',
domain_specific_drivers_enabled=False)
self.assertFalse(getattr(ldap, symptom)())
# No Symptom Detected: Domain configuration from database is enabled
self.config_fixture.config(
group='identity',
domain_specific_drivers_enabled=True)
self.assertFalse(getattr(ldap, symptom)())
self.config_fixture.config(
group='identity',
domain_configurations_from_database=True)
self.assertFalse(getattr(ldap, symptom)())
# No Symptom Detected: The directory in domain_config_dir doesn't exist
mocked_isdir.return_value = False
self.assertFalse(getattr(ldap, symptom)())
# No Symptom Detected: domain specific drivers are enabled, domain
# configurations from database are disabled, directory exists, and no
# exceptions found.
self.config_fixture.config(
group='identity',
domain_configurations_from_database=False)
mocked_isdir.return_value = True
# An empty directory should not raise this symptom
self.assertFalse(getattr(ldap, symptom)())
# Test again with a file inside the directory
mocked_listdir.return_value = ['keystone.domains.conf']
self.assertFalse(getattr(ldap, symptom)())
class SecurityComplianceDoctorTests(unit.TestCase):
def test_minimum_password_age_greater_than_password_expires_days(self):
# Symptom Detected: Minimum password age is greater than the password
# expires days. Both values are positive integers greater than zero.
self.config_fixture.config(group='security_compliance',
minimum_password_age=2)
self.config_fixture.config(group='security_compliance',
password_expires_days=1)
self.assertTrue(
security_compliance.
symptom_minimum_password_age_greater_than_expires_days())
def test_minimum_password_age_equal_to_password_expires_days(self):
# Symptom Detected: Minimum password age is equal to the password
# expires days. Both values are positive integers greater than zero.
self.config_fixture.config(group='security_compliance',
minimum_password_age=1)
self.config_fixture.config(group='security_compliance',
password_expires_days=1)
self.assertTrue(
security_compliance.
symptom_minimum_password_age_greater_than_expires_days())
def test_minimum_password_age_less_than_password_expires_days(self):
# No Symptom Detected: Minimum password age is less than password
# expires days. Both values are positive integers greater than zero.
self.config_fixture.config(group='security_compliance',
minimum_password_age=1)
self.config_fixture.config(group='security_compliance',
password_expires_days=2)
self.assertFalse(
security_compliance.
symptom_minimum_password_age_greater_than_expires_days())
def test_minimum_password_age_and_password_expires_days_deactivated(self):
# No Symptom Detected: when minimum_password_age's default value is 0
# and password_expires_days' default value is None
self.assertFalse(
security_compliance.
symptom_minimum_password_age_greater_than_expires_days())
def test_invalid_password_regular_expression(self):
# Symptom Detected: Regular expression is invalid
self.config_fixture.config(
group='security_compliance',
password_regex='^^(??=.*\d)$')
self.assertTrue(
security_compliance.symptom_invalid_password_regular_expression())
def test_valid_password_regular_expression(self):
# No Symptom Detected: Regular expression is valid
self.config_fixture.config(
group='security_compliance',
password_regex='^(?=.*\d)(?=.*[a-zA-Z]).{7,}$')
self.assertFalse(
security_compliance.symptom_invalid_password_regular_expression())
def test_password_regular_expression_deactivated(self):
# No Symptom Detected: Regular expression deactivated to None
self.config_fixture.config(
group='security_compliance',
password_regex=None)
self.assertFalse(
security_compliance.symptom_invalid_password_regular_expression())
def test_password_regular_expression_description_not_set(self):
# Symptom Detected: Regular expression is set but description is not
self.config_fixture.config(
group='security_compliance',
password_regex='^(?=.*\d)(?=.*[a-zA-Z]).{7,}$')
self.config_fixture.config(
group='security_compliance',
password_regex_description=None)
self.assertTrue(
security_compliance.
symptom_password_regular_expression_description_not_set())
def test_password_regular_expression_description_set(self):
# No Symptom Detected: Regular expression and description are set
desc = '1 letter, 1 digit, and a minimum length of 7 is required'
self.config_fixture.config(
group='security_compliance',
password_regex='^(?=.*\d)(?=.*[a-zA-Z]).{7,}$')
self.config_fixture.config(
group='security_compliance',
password_regex_description=desc)
self.assertFalse(
security_compliance.
symptom_password_regular_expression_description_not_set())
def test_password_regular_expression_description_deactivated(self):
# No Symptom Detected: Regular expression and description are
# deactivated to None
self.config_fixture.config(
group='security_compliance', password_regex=None)
self.config_fixture.config(
group='security_compliance', password_regex_description=None)
self.assertFalse(
security_compliance.
symptom_password_regular_expression_description_not_set())
class TokensDoctorTests(unit.TestCase):
def test_unreasonable_max_token_size_raised(self):
# Symptom Detected: the max_token_size for uuid is not 32
self.config_fixture.config(group='token', provider='uuid')
self.config_fixture.config(max_token_size=33)
self.assertTrue(tokens.symptom_unreasonable_max_token_size())
# Symptom Detected: the max_token_size for fernet is greater than 255
self.config_fixture.config(group='token', provider='fernet')
self.config_fixture.config(max_token_size=256)
self.assertTrue(tokens.symptom_unreasonable_max_token_size())
def test_unreasonable_max_token_size_not_raised(self):
# No Symptom Detected: the max_token_size for uuid is 32
self.config_fixture.config(group='token', provider='uuid')
self.config_fixture.config(max_token_size=32)
self.assertFalse(tokens.symptom_unreasonable_max_token_size())
# No Symptom Detected: the max_token_size for fernet is 255 or less
self.config_fixture.config(group='token', provider='fernet')
self.config_fixture.config(max_token_size=255)
self.assertFalse(tokens.symptom_unreasonable_max_token_size())
class TokenFernetDoctorTests(unit.TestCase):
@mock.patch('keystone.cmd.doctor.tokens_fernet.utils')
def test_usability_of_Fernet_key_repository_raised(self, mock_utils):
# Symptom Detected: Fernet key repo is world readable
self.config_fixture.config(group='token', provider='fernet')
mock_utils.FernetUtils().validate_key_repository.return_value = False
self.assertTrue(
tokens_fernet.symptom_usability_of_Fernet_key_repository())
@mock.patch('keystone.cmd.doctor.tokens_fernet.utils')
def test_usability_of_Fernet_key_repository_not_raised(self, mock_utils):
# No Symptom Detected: UUID is used instead of fernet
self.config_fixture.config(group='token', provider='uuid')
mock_utils.FernetUtils().validate_key_repository.return_value = False
self.assertFalse(
tokens_fernet.symptom_usability_of_Fernet_key_repository())
# No Symptom Detected: configs set properly, key repo is not world
# readable but is user readable
self.config_fixture.config(group='token', provider='fernet')
mock_utils.FernetUtils().validate_key_repository.return_value = True
self.assertFalse(
tokens_fernet.symptom_usability_of_Fernet_key_repository())
@mock.patch('keystone.cmd.doctor.tokens_fernet.utils')
def test_keys_in_Fernet_key_repository_raised(self, mock_utils):
# Symptom Detected: Fernet key repository is empty
self.config_fixture.config(group='token', provider='fernet')
mock_utils.FernetUtils().load_keys.return_value = False
self.assertTrue(
tokens_fernet.symptom_keys_in_Fernet_key_repository())
@mock.patch('keystone.cmd.doctor.tokens_fernet.utils')
def test_keys_in_Fernet_key_repository_not_raised(self, mock_utils):
# No Symptom Detected: UUID is used instead of fernet
self.config_fixture.config(group='token', provider='uuid')
mock_utils.FernetUtils().load_keys.return_value = True
self.assertFalse(
tokens_fernet.symptom_usability_of_Fernet_key_repository())
# No Symptom Detected: configs set properly, key repo has been
# populated with keys
self.config_fixture.config(group='token', provider='fernet')
mock_utils.FernetUtils().load_keys.return_value = True
self.assertFalse(
tokens_fernet.symptom_usability_of_Fernet_key_repository())
| rajalokan/keystone | keystone/tests/unit/test_cli.py | Python | apache-2.0 | 57,490 | 0.00007 |
import tornado
import tornado.gen
from req import Service
from req import ApiRequestHandler
class Index(ApiRequestHandler):
@tornado.gen.coroutine
def get(self):
self.render()
| Tocknicsu/nctuoj_contest | backend/handler/index.py | Python | apache-2.0 | 195 | 0.010256 |
#!/usr/bin/python
# FRANKEN CIPHER
# WRITTEN FOR ACADEMIC PURPOSES
#
# AUTHORED BY: Dan C and james@forscience.xyz
#
# THIS SCRIPT IS WRITTEN TO DEMONSTRATE A UNIQUE ENCRYPTION ALGORITHM THAT IS INSPIRED BY A NUMBER
# OF EXISTING ALGORITHMS.
# THE SCRIPT IS WRITTEN ENTIRELY FOR ACADEMIC PURPOSES. NO WARRANTY OR GUARANTEES ARE
# OFFERED BY THE AUTHORS IN RELATION TO THE USE OF THIS SCRIPT.
#
# Usage: franken.py <"-v" (verbose)> <"-d" (decrypt)> <"-k" (key phrase)> <"-m" (string to encrypt/decrypt)>
#
# indentation: TABS!
import sys
import getopt
import collections
import binascii
import hashlib
import itertools
# GLOBALS
# define -v and -d as false (-d defaults to encrypt mode)
verbose_opt = False
decrypt_opt = False
key_phrase = '' # clear text key phrase
key_hashed = '' # hashed key phrase
clear_text = '' # starting message input
pigpen_message = '' # message after pigpen stage
encrypted_message = '' # the encrypted message
decrypted_message = '' # the decrypted message
# GLOBALS
# pigpen dictionaries
pigpen_A = {'A':'ETL', 'B':'ETM', 'C':'ETR', 'D':'EML', 'E':'EMM', 'F':'EMR', 'G':'EBL', 'H':'EBM', 'I':'EBR', 'J':'DTL',
'K':'DTM', 'L':'DTR', 'M':'DML', 'N':'DMM', 'O':'DMR', 'P':'DBL', 'Q':'DBM', 'R':'DBR', 'S':'EXT', 'T':'EXL', 'U':'EXR',
'V':'EXB', 'W':'DXT', 'X':'DXL', 'Y':'DXR', 'Z':'DXB', ' ':'EPS', '.':'EPF', ',':'EPC', '!':'EPE', '?':'EPQ', '"':'EPD',
'@':'EPA','0':'NTL', '1':'NTM', '2':'NTR', '3':'NML', '4':'NMM', '5':'NMR', '6':'NBL', '7':'NBM', '8':'NBR','9':'NXT'}
pigpen_B = {'C':'ETL', 'D':'ETM', 'A':'ETR', 'B':'EML', 'G':'EMM', 'H':'EMR', 'E':'EBL', 'F':'EBM', 'K':'EBR', 'L':'DTL',
'I':'DTM', 'J':'DTR', 'O':'DML', 'P':'DMM', 'M':'DMR', 'N':'DBL', 'S':'DBM', 'T':'DBR', 'Q':'EXT', 'R':'EXL', 'W':'EXR',
'X':'EXB', 'U':'DXT', 'V':'DXL', ' ':'DXR', ',':'DXB', 'Y':'EPS', '!':'EPF', 'Z':'EPC', '.':'EPE', '@':'EPQ', '0':'EPD',
'?':'EPA','"':'NTL', '3':'NTM', '4':'NTR', '1':'NML', '2':'NMM', '7':'NMR', '8':'NBL', '9':'NBM', '5':'NBR', '6':'NXT'}
pigpen_C = {'K':'ETL', 'L':'ETM', 'M':'ETR', 'N':'EML', 'O':'EMM', 'P':'EMR', 'Q':'EBL', 'R':'EBM', 'S':'EBR', 'U':'DTL',
'V':'DTM', 'W':'DTR', 'X':'DML', 'Y':'DMM', 'Z':'DMR', ' ':'DBL', '.':'DBM', ',':'DBR', '!':'EXT', '"':'EXL', '?':'EXR',
'@':'EXB', '0':'DXT', '1':'DXL', '2':'DXR', '3':'DXB', '4':'EPS', '5':'EPF', '6':'EPC', '7':'EPE', '8':'EPQ', '9':'EPD',
'A':'EPA','B':'NTL', 'C':'NTM', 'D':'NTR', 'E':'NML', 'F':'NMM', 'G':'NMR', 'H':'NBL', 'I':'NBM', 'J':'NBR','T':'NXT'}
# creates hashes of the key phrase inputted by the user
# in order for it to be used as a key
# the clear text key phrase string is retained
def keyGenerate():
global key_hashed
# create the hashes of the key phrase string
md5_hash = hashlib.md5(key_phrase.encode())
sha256_hash = hashlib.sha256(key_phrase.encode())
sha512_hash = hashlib.sha512(key_phrase.encode())
# concatenate the hash digests into one key
key_hashed = md5_hash.hexdigest() + sha256_hash.hexdigest() + sha512_hash.hexdigest()
# hash the entire key (so far) one more time and concatenate to make 1024bit key
key_hashed_hash = hashlib.md5(key_hashed.encode())
key_hashed += key_hashed_hash.hexdigest()
# vebose mode if verbose option is set
if verbose_opt:
print("[KEY GENERATION]: The key phrase is: \"" + key_phrase + "\"")
print("[KEY GENERATION]: \"" + key_phrase + "\" is independantly hashed 3 times using MD5, SHA256 and SHA512")
print("[KEY GENERATION]: The 3 hashes are concatenated with 1 more md5 hash, resulting in the 1024bit key:")
print("[KEY GENERATION]: \"" + key_hashed + "\"\n")
return
# selects the appropriate pigpen dictionary based on summing all of the ascii
# values in the key phrase and modulating the sum of the integers by 3 in order to retrieve
# one of 3 values. Returns the appropriate dictionary
def selectDict():
# sum ASCII value of each character in the clear text key phrase
ascii_total = 0
for x in key_phrase:
ascii_total += ord(x)
# modulo 3 ascii_total to find 0-3 result to select pigpen dict
if ascii_total % 3 == 0:
pigpen_dict = pigpen_A
elif ascii_total % 3 == 1:
pigpen_dict = pigpen_B
elif ascii_total % 3 == 2:
pigpen_dict = pigpen_C
# return the dictionary
return pigpen_dict
# convert message into pigpen alphabet. compare each letter to dict key.
# first makes all chars uppercase and ignores some punctuation.
# itterates through pigpen dict to find value based on clear message char as key
def pigpenForward():
global pigpen_message
# convert clear message to uppercase
message = clear_text.upper()
# itterate through dict looking for chars
for letter in message:
if letter in selectDict():
pigpen_message += selectDict().get(letter)
# verbose mode if verbose option is set
if verbose_opt:
print("[ENCRYPTION - Phase 1]: The clear text is:")
print("[ENCRYPTION - Phase 1]: \"" + clear_text + "\"")
print("[ENCRYPTION - Phase 1]: 1 of 3 dictionaries is derived from the sum of the pre-hashed key ASCII values (mod 3)")
print("[ENCRYPTION - Phase 1]: The clear text is converted into pigpen cipher text using the selected dictionary:")
print("[ENCRYPTION - Phase 1]: \"" + pigpen_message + "\"\n")
return
# reverses the pigpen process. takes a pigpen string and converts it back to clear text
# first creates a list of each 3 values from the inputted string (each element has 3 chars)
# then compares those elements to the pigpen dictionary to create the decrypted string
def pigpenBackward():
global decrypted_message
# convert encrypted message (int array) back to a single ascii string
message = ''
try:
for i in decrypted_message:
message += chr(i)
except:
print("[ERROR]: Incorrect key. Cannot decrypt.")
usageText()
# retrieve each 3 chars (one pigpen value) and form a list
message_list = [message[i:i+3] for i in range(0, len(message), 3)]
# zero out decrypted message string in order to store pigpen deciphered characters
decrypted_message = ''
# itterate through list elements and compare against pigpen dict
# to find correct key (clear text letter) and create decrypted string
for element in message_list:
for key, value in selectDict().iteritems():
if value == element:
decrypted_message += key
# verbose mode if verbose option is set
if verbose_opt:
print("[DECRYPTION - Phase 3]: 1 of 3 dictionaries is derived from the sum of the pre-hashed key ASCII values (mod 3)")
print("[DECRYPTION - Phase 3]: The values of the pigpen cipher text are looked up in the selected dictionary")
print("[DECRYPTION - Phase 3]: The pigpen cipher text is converted back into clear text:\n")
print("[DECRYPTION - COMPLETE]: \"" + decrypted_message + "\"\n")
return
# XORs an int value derived from the hashed key to each ascii int value of the message.
# The key value is looked up by using the value stored in that key array position to reference
# the array position that value points to. That value is then XOR'ed with the corresponding value of the message
# this occurs three times. Inspired by DES key sub key generation and RC4
def keyConfusion(message):
# create array of base10 ints from ascii values of chars in hashed key
key = []
for x in key_hashed:
key.append(ord(x))
# create a variable for cycling through the key array (in case the message is longer than key)
key_cycle = itertools.cycle(key)
# loop through the key and XOR the resultant value with the corresponding value in the message
for i in range(len(message)):
# find the value pointed to by the value of each element of the key (for each value in the message array)
key_pointer = key_cycle.next() % 128 # get the next key byte. mod 128 because 128 bytes in 1024bits
key_byte = key[key_pointer]
# XOR message byte with current key_byte
message[i] = message[i] ^ key_byte
# XOR message byte with the key byte pointed to by previous key byte value
key_byte = key[(key_byte % 128)]
message[i] = message[i] ^ key_byte
# once again XOR message byte with the next key byte pointed to by previous key byte value
key_byte = key[(key_byte % 128)]
message[i] = message[i] ^ key_byte
# verbose mode if verbose option is set
if verbose_opt:
# are we decrypting or encrypting?
if decrypt_opt:
en_or_de = "[DECRYPTION - Phase 2]: "
en_or_de_text = " pigpen cipher text:"
else:
en_or_de = "[ENCRYPTION - Phase 2]: "
en_or_de_text = " partially encrypted string:"
# print the appropriate output for encrypting or decrypting
print(en_or_de + "Each byte of the pigpen cipher is then XOR'ed against 3 bytes of the key")
print(en_or_de + "The key byte is XOR'ed against the byte of the message and then used to select the")
print(en_or_de + "position in the key array of the next key byte value. This occurs three times.")
print(en_or_de + "Resulting in the" + en_or_de_text)
print(en_or_de + "\"" + message + "\"\n")
return message
# xors the hashed key against the pigpenned message
# each character in the message is xor'ed against each character
# in the hashed key, resulting in the encrypted message
def xorForward():
global encrypted_message
# convert key and message into ints for xoring
message = bytearray(pigpen_message)
key = bytearray(key_hashed)
# send pigpen message off for permution
message = keyConfusion(message)
# iterate over message and xor each character against each value in the key
for x in range(len(message)):
for y in range(len(key)):
xored = key[y] ^ message[x]
message[x] = xored
# store hex value of encrypted string in global variable
encrypted_message = binascii.hexlify(bytearray(message))
# verbose mode is verbose option is set
if verbose_opt:
print("[ENCRYPTION - Phase 3]: The partially encrypted cipher text and key are converted into a byte arrays")
print("[ENCRYPTION - Phase 3]: Each byte of the message is XOR'ed against each byte of the key")
print("[ENCRYPTION - Phase 3]: Resulting in the cipher text hex string:\n")
print("[ENCRYPTION - COMPLETE]: \"" + encrypted_message + "\"\n")
return
# the reverse of the encrypt function, whereby the supplied key is reversed
# and xored against the encrypted message. The message is first unhexlified
# to facilitate xoring
def xorBackward():
global decrypted_message
# create byte array for key and to store decrypted message
reverse_key = key_hashed[::-1]
key = bytearray(reverse_key)
# try to convert the encrypted message from hex to int, error if incorrect string
try:
message = bytearray(binascii.unhexlify(clear_text))
except:
print("[ERROR]: Incorrect string. Cannot decrypt.")
usageText()
# iterate over the encrypted message and xor each value against each value in the key
for x in range(len(message)):
for y in range(len(key)):
xored = key[y] ^ message[x]
message[x] = xored
# verbose mode is verbose option is set
if verbose_opt:
print("[DECRYPTION - Phase 1]: The cipher text is:")
print("[DECRYPTION - Phase 1]: \"" + clear_text + "\"")
print("[DECRYPTION - Phase 1]: The cipher text and key are converted into a byte arrays")
print("[DECRYPTION - Phase 1]: The key is reversed in order to reverse this stage of XOR'ing")
print("[DECRYPTION - Phase 1]: Each byte of the cipher text is XOR'ed against each byte of the key")
print("[DECRYPTION - Phase 1]: Resulting in the partially decrypted string:")
print("[DECRYPTION - Phase 1]: \"" + message + "\"\n")
# send decrypted array off for permutation (reverse encrypted XOR'ing)
decrypted_message = keyConfusion(message)
return
# text to be displayed on incorrect user input
def usageText():
print("\n[USAGE]: franken.py -v (verbose) -d (decrypt) --keyphrase (-k) <phrase> --message (-m) <message to encrypt>")
print("[USAGE]: -v and -d arguments are optional. --keyphrase(-k) and --message(-m) are required")
print("\n[EXAMPLE]: python franken.py -v --keyphrase \"super secret\" --message \"This is a super secret message\"\n")
print("[!] As with any cipher, your message is only as secure as your key phrase.")
print("[!] REMEMBER: The more complicated your key phrase, the stronger your encrypted message will be!\n")
sys.exit(2)
# USER INPUT HANDLING
# check that arguments have been supplied
if len(sys.argv) < 2:
usageText()
# define the arguments and necessity.
try:
opts, args = getopt.getopt(sys.argv[1:], 'vdk:m:', ["verbose", "decrypt", "keyphrase=", "message="])
except getopt.GetoptError:
usageText()
# check for presence of args and assign values
for opt, arg in opts:
if opt == '-v':
verbose_opt = True
if opt == '-d':
decrypt_opt = True
if opt in ('-k', '--keyphrase'):
key_phrase = arg
if opt in ('-m', '--message'):
clear_text = arg
# Check that a keyphrase and message has been set
if not key_phrase or not clear_text:
usageText()
print(
'''
__ _
/ _| | |
| |_ _ __ __ _ _ __ | | _____ _ __
| _| '__/ _` | '_ \| |/ / _ \ '_ \
| | | | | (_| | | | | < __/ | | |
|_| |_| \__,_|_| |_|_|\_\___|_| |_|
_ _
__(_)_ __| |_ ___ _ _
/ _| | '_ \ ' \/ -_) '_|
\__|_| .__/_||_\___|_|
|_|
[!] franken.py
An encryption algorithm inspired by a number of existing ciphers.
Created for CC6004 Course Work 1. 2016/17
[@] Dan C and james@forscience.xyz
__________________________________________________
'''
)
# are we decrypting or encrypting? defaults to encrypting
# decrypt
if decrypt_opt:
keyGenerate()
xorBackward()
pigpenBackward()
if not verbose_opt:
print("[DECRYPTED]: " + decrypted_message + "\n")
# encrypt
else:
keyGenerate()
pigpenForward()
xorForward()
if not verbose_opt:
print("[ENCRYPTED]: " + encrypted_message + "\n")
| forScie/FrankenCipher | franken.py | Python | gpl-3.0 | 13,770 | 0.030719 |
# coding=utf-8
#
# copyright (C) 2017 Steffen Rolapp (github@rolapp.de)
#
# based on ZattooBoxExtended by Daniel Griner (griner.ch@gmail.com) license under GPL
#
# This file is part of ZattooHiQ
#
# zattooHiQ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# zattooHiQ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with zattooHiQ. If not, see <http://www.gnu.org/licenses/>.
#
import xbmc, xbmcgui, xbmcaddon, os, xbmcplugin, datetime, time
import json
from zapisession import ZapiSession
__addon__ = xbmcaddon.Addon()
_listMode_ = __addon__.getSetting('channellist')
_channelList_=[]
localString = __addon__.getLocalizedString
local = xbmc.getLocalizedString
_umlaut_ = {ord(u'ä'): u'ae', ord(u'ö'): u'oe', ord(u'ü'): u'ue', ord(u'ß'): u'ss'}
REMOTE_DBG = False
# append pydev remote debugger
if REMOTE_DBG:
# Make pydev debugger works for auto reload.
# Note pydevd module need to be copied in XBMC\system\python\Lib\pysrc
try:
import pysrc.pydevd as pydevd # with the addon script.module.pydevd, only use `import pydevd`
# stdoutToServer and stderrToServer redirect stdout and stderr to eclipse console
#pydevd.settrace('localhost', stdoutToServer=True, stderrToServer=True, suspend=False)
pydevd.settrace('localhost', port=5678, stdoutToServer=True, stderrToServer=True)
except ImportError:
sys.stderr.write("Error: You must add org.python.pydev.debug.pysrc to your PYTHONPATH.")
sys.exit(1)
class ZattooDB(object):
def __init__(self):
self.conn = None
profilePath = xbmc.translatePath(__addon__.getAddonInfo('profile'))
if not os.path.exists(profilePath): os.makedirs(profilePath)
self.databasePath = os.path.join(profilePath, "zattoo.db")
self.connectSQL()
self.zapi=self.zapiSession()
def zapiSession(self):
zapiSession = ZapiSession(xbmc.translatePath(__addon__.getAddonInfo('profile')).decode('utf-8'))
if zapiSession.init_session(__addon__.getSetting('username'), __addon__.getSetting('password')):
return zapiSession
else:
# show home window, zattooHiQ settings and quit
xbmc.executebuiltin('ActivateWindow(10000)')
xbmcgui.Dialog().ok(__addon__.getAddonInfo('name'), __addon__.getLocalizedString(31902))
__addon__.openSettings()
zapiSession.renew_session()
import sys
sys.exit()
@staticmethod
def adapt_datetime(ts):
# http://docs.python.org/2/library/sqlite3.html#registering-an-adapter-callable
return time.mktime(ts.timetuple())
@staticmethod
def convert_datetime(ts):
try:
return datetime.datetime.fromtimestamp(float(ts))
except ValueError:
return None
def connectSQL(self):
import sqlite3
sqlite3.register_adapter(datetime.datetime, self.adapt_datetime)
sqlite3.register_converter('timestamp', self.convert_datetime)
self.conn = sqlite3.connect(self.databasePath, detect_types=sqlite3.PARSE_DECLTYPES)
self.conn.execute('PRAGMA foreign_keys = ON')
self.conn.row_factory = sqlite3.Row
# check if DB exists
c = self.conn.cursor()
try: c.execute('SELECT * FROM showinfos')
except: self._createTables()
def _createTables(self):
import sqlite3
c = self.conn.cursor()
try: c.execute('DROP TABLE channels')
except: pass
try:
c.execute('DROP TABLE programs')
print "DROP PROGRAM TABlE"
except: pass
try: c.execute('DROP TABLE updates')
except: pass
try: c.execute('DROP TABLE playing')
except: pass
try: c.execute('DROP TABLE showinfos')
except: pass
self.conn.commit()
try:
c.execute('CREATE TABLE channels(id TEXT, title TEXT, logo TEXT, weight INTEGER, favourite BOOLEAN, PRIMARY KEY (id) )')
c.execute('CREATE TABLE programs(showID TEXT, title TEXT, channel TEXT, start_date TIMESTAMP, end_date TIMESTAMP, restart BOOLEAN, series BOOLEAN, description TEXT, description_long TEXT, year TEXT, country TEXT, genre TEXT, category TEXT, image_small TEXT, image_large TEXT, updates_id INTEGER, FOREIGN KEY(channel) REFERENCES channels(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED, FOREIGN KEY(updates_id) REFERENCES updates(id) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED)')
c.execute('CREATE TABLE updates(id INTEGER, date TIMESTAMP, type TEXT, PRIMARY KEY (id) )')
#c.execute('CREATE TABLE playing(channel TEXT, start_date TIMESTAMP, action_time TIMESTAMP, current_stream INTEGER, streams TEXT, PRIMARY KEY (channel))')
c.execute('CREATE TABLE showinfos(showID INTEGER, info TEXT, PRIMARY KEY (showID))')
c.execute('CREATE TABLE playing(channel TEXT, current_stream INTEGER, streams TEXT, PRIMARY KEY (channel))')
c.execute('CREATE TABLE version(version TEXT, PRIMARY KEY (version))')
c.execute('CREATE INDEX program_list_idx ON programs(channel, start_date, end_date)')
c.execute('CREATE INDEX start_date_idx ON programs(start_date)')
c.execute('CREATE INDEX end_date_idx ON programs(end_date)')
self.conn.commit()
c.close()
except sqlite3.OperationalError, ex:
pass
def updateChannels(self, rebuild=False):
c = self.conn.cursor()
if rebuild == False:
date = datetime.date.today().strftime('%Y-%m-%d')
c.execute('SELECT * FROM updates WHERE date=? AND type=? ', [date, 'channels'])
if len(c.fetchall())>0:
c.close()
return
# always clear db on update
c.execute('DELETE FROM channels')
print "account "+ self.zapi.AccountData['account']['power_guide_hash']
api = '/zapi/v2/cached/channels/' + self.zapi.AccountData['account']['power_guide_hash'] + '?details=False'
channelsData = self.zapi.exec_zapiCall(api, None)
api = '/zapi/channels/favorites'
favoritesData = self.zapi.exec_zapiCall(api, None)
nr = 0
for group in channelsData['channel_groups']:
for channel in group['channels']:
logo = 'http://logos.zattic.com' + channel['qualities'][0]['logo_black_84'].replace('/images/channels', '')
try:
favouritePos = favoritesData['favorites'].index(channel['id'])
weight = favouritePos
favourite = True
except:
weight = 1000 + nr
favourite = False
c.execute('INSERT OR IGNORE INTO channels(id, title, logo, weight, favourite) VALUES(?, ?, ?, ?, ?)',
[channel['id'], channel['title'], logo, weight, favourite])
if not c.rowcount:
c.execute('UPDATE channels SET title=?, logo=?, weight=?, favourite=? WHERE id=?',
[channel['title'], logo, weight, favourite, channel['id']])
nr += 1
if nr>0: c.execute('INSERT INTO updates(date, type) VALUES(?, ?)', [datetime.date.today(), 'channels'])
self.conn.commit()
c.close()
return
def updateProgram(self, date=None, rebuild=False):
if date is None: date = datetime.date.today()
else: date = date.date()
c = self.conn.cursor()
if rebuild:
c.execute('DELETE FROM programs')
self.conn.commit()
# get whole day
fromTime = int(time.mktime(date.timetuple())) # UTC time for zattoo
toTime = fromTime + 86400 # is 24h maximum zattoo is sending?
#get program from DB and return if it's not empty
# if self._isDBupToDate(date, 'programs'):return
c.execute('SELECT * FROM programs WHERE start_date > ? AND end_date < ?', (fromTime+18000, fromTime+25200,)) #get shows between 05:00 and 07:00
count=c.fetchall()
if len(count)>0:
c.close()
return
xbmcgui.Dialog().notification(__addon__.getLocalizedString(31917), self.formatDate(date), __addon__.getAddonInfo('path') + '/icon.png', 5000, False)
#xbmc.executebuiltin("ActivateWindow(busydialog)")
api = '/zapi/v2/cached/program/power_guide/' + self.zapi.AccountData['account']['power_guide_hash'] + '?end=' + str(toTime) + '&start=' + str(fromTime)
print "apiData "+api
programData = self.zapi.exec_zapiCall(api, None)
print str(programData)
count=0
for channel in programData['channels']:
cid = channel['cid']
if cid =="chtv":
continue
c.execute('SELECT * FROM channels WHERE id==?', [cid])
countt=c.fetchall()
if len(countt)==0:
print "Sender NICHT : "+cid
for program in channel['programs']:
count+=1
if program['i'] != None:
image = "http://images.zattic.com/" + program['i']
#http://images.zattic.com/system/images/6dcc/8817/50d1/dfab/f21c/format_480x360.jpg
else: image = ""
try:
print 'INSERT OR IGNORE INTO programs(channel, title, start_date, end_date, description, genre, image_small, showID) VALUES(%, %, %, %, %, %, %)',cid, program['t'], program['s'], program['e'], program['et'], ', '.join(program['g']), image, program['id']
except:
pass
c.execute('INSERT OR IGNORE INTO programs(channel, title, start_date, end_date, description, genre, image_small, showID) VALUES(?, ?, ?, ?, ?, ?, ?, ?)',
[cid, program['t'], program['s'], program['e'], program['et'], ', '.join(program['g']), image, program['id'] ])
if not c.rowcount:
c.execute('UPDATE programs SET channel=?, title=?, start_date=?, end_date=?, description=?, genre=?, image_small=? WHERE showID=?',
[cid, program['t'], program['s'], program['e'], program['et'], ', '.join(program['g']), image, program['id'] ])
if count>0:
c.execute('INSERT into updates(date, type) VALUES(?, ?)', [date, 'program'])
try:
self.conn.commit()
except:
print 'IntegrityError: FOREIGN KEY constraint failed zattooDB 232'
#xbmc.executebuiltin("Dialog.Close(busydialog)")
c.close()
return
def getChannelList(self, favourites=True):
#self.updateChannels()
c = self.conn.cursor()
if favourites: c.execute('SELECT * FROM channels WHERE favourite=1 ORDER BY weight')
else: c.execute('SELECT * FROM channels ORDER BY weight')
channelList = {'index':[]}
nr=0
for row in c:
channelList[row['id']]={
'id': str(row['id']),
'title': row['title'],
'logo': row['logo'],
'weight': row['weight'],
'favourite': row['favourite'],
'nr':nr
}
channelList['index'].append(str(row['id']))
nr+=1
c.close()
return channelList
def get_channelInfo(self, channel_id):
c = self.conn.cursor()
c.execute('SELECT * FROM channels WHERE id=?', [channel_id])
row = c.fetchone()
channel = {
'id':row['id'],
'title':row['title'],
'logo':row['logo'],
'weight':row['weight'],
'favourite':row['favourite']
}
c.close()
return channel
def getPopularList(self):
channels=self.getChannelList(False)
popularList = {'index':[]}
nr=0
#max 10 items per request -> request 3times for 30 items
for page in range(3):
api = '/zapi/v2/cached/' + self.zapi.SessionData['session']['power_guide_hash'] + '/teaser_collections/most_watched_live_now_de?page='+str(page)+'&per_page=10'
mostWatched = self.zapi.exec_zapiCall(api, None)
if mostWatched is None: continue
for data in mostWatched['teasers']:
data=data['teasable']
popularList[data['cid']]={
'id': str(data['cid']),
'title': data['t'],
'logo': channels[data['cid']]['logo'],
'nr':nr
}
popularList['index'].append(str(data['cid']))
nr+=1
return popularList
def getPrograms(self, channels, get_long_description=False, startTime=datetime.datetime.now(), endTime=datetime.datetime.now()):
import urllib
c = self.conn.cursor()
programList = []
for chan in channels['index']:
c.execute('SELECT * FROM programs WHERE channel = ? AND start_date < ? AND end_date > ?', [chan, endTime, startTime])
r = c.fetchall()
for row in r:
description_long = row['description_long']
year = row['year']
country = row['country']
category =row['category']
if get_long_description and description_long is None:
#description_long = self.getShowInfo(row["showID"],'description')
info = self.getShowLongDescription(row['showID'])
print 'ProgINFO ' + str(type(info)) + ' ' + str(row['showID'])+ ' ' + str(info)
if type(info) == dict:
description_long = info.get('description','')
year = info.get('year',' ')
country = info.get('country','')
category = info.get('category','')
programList.append({
'channel': row['channel'],
'showID' : row['showID'],
'title' : row['title'],
'description' : row['description'],
'description_long' : description_long,
'year': year, #row['year'],
'genre': row['genre'],
'country': country, #row['country'],
'category': category, #row['category'],
'start_date' : row['start_date'],
'end_date' : row['end_date'],
'image_small' : row['image_small'],
'image_large': row['image_large']
})
c.close()
return programList
def getShowLongDescription(self, showID):
info = self.conn.cursor()
try:
info.execute('SELECT * FROM programs WHERE showID= ? ', [showID])
except:
info.close()
return None
show = info.fetchone()
longDesc = show['description_long']
year = show['year']
country = show['country']
category = show ['category']
series = show['series']
restart = show['restart']
if longDesc is None:
api = '/zapi/program/details?program_id=' + showID + '&complete=True'
showInfo = self.zapiSession().exec_zapiCall(api, None)
print 'Showinfo ' + str(showInfo)
if showInfo is None:
longDesc=''
year=''
category=''
info.close()
return {'description':longDesc, 'year':year, 'country':country, 'category':category}
longDesc = showInfo['program']['description']
info.execute('UPDATE programs SET description_long=? WHERE showID=?', [longDesc, showID ])
year = showInfo['program']['year']
if year is None: year=''
info.execute('UPDATE programs SET year=? WHERE showID=?', [year, showID ])
category = ', '.join(showInfo['program']['categories'])
info.execute('UPDATE programs SET category=? WHERE showID=?', [category, showID ])
country = showInfo['program']['country']
country = country.replace('|',', ')
info.execute('UPDATE programs SET country=? WHERE showID=?', [country, showID ])
series = showInfo['program']['series_recording_eligible']
info.execute('UPDATE programs SET series=? WHERE showID=?', [series, showID])
try:
restart = showInfo['program']['selective_recall_until']
info.execute('UPDATE programs SET restart=? WHERE showID=?', [True, showID])
print 'Restart ' +str(showID) + ' ' + str(restart)
except:
print 'No Restart'
info.execute('UPDATE programs SET restart=? WHERE showID=?', [False, showID])
try:
self.conn.commit()
except:
print 'IntegrityError: FOREIGN KEY constraint failed zattooDB 355'
info.close()
return {'description':longDesc, 'year':year, 'country':country, 'category':category}
def getShowInfo(self, showID, field='all'):
if field!='all':
#api = '/zapi/program/details?program_id=' + str(showID) + '&complete=True'
#showInfo = self.zapi.exec_zapiCall(api, None)
showInfo = self.getShowLongDescription(showID)
#return showInfo['program'].get(field, " ")
return showInfo[field]
#save information for recordings
import json
c = self.conn.cursor()
c.execute('SELECT * FROM showinfos WHERE showID= ? ', [int(showID)])
row = c.fetchone()
if row is not None:
showInfoJson=row['info']
showInfo=json.loads(showInfoJson)
else:
api = '/zapi/program/details?program_id=' + str(showID) + '&complete=True'
showInfo = self.zapi.exec_zapiCall(api, None)
if showInfo is None:
c.close()
return "NONE"
showInfo = showInfo['program']
try: c.execute('INSERT INTO showinfos(showID, info) VALUES(?, ?)',(int(showID), json.dumps(showInfo)))
except: pass
self.conn.commit()
c.close()
return showInfo
def set_playing(self, channel=None, streams=None, streamNr=0):
c = self.conn.cursor()
c.execute('DELETE FROM playing')
#c.execute('INSERT INTO playing(channel, start_date, action_time, current_stream, streams) VALUES(?, ?, ?, ?, ?)', [channel, start, datetime.datetime.now(), streamNr, streams])
c.execute('INSERT INTO playing(channel, current_stream, streams) VALUES(?, ?, ?)', [channel, streamNr, streams])
self.conn.commit()
c.close()
def get_playing(self):
c = self.conn.cursor()
c.execute('SELECT * FROM playing')
row = c.fetchone()
if row is not None:
playing = {'channel':row['channel'], 'current_stream':row['current_stream'], 'streams':row['streams']}
else:
c.execute('SELECT * FROM channels ORDER BY weight ASC LIMIT 1')
row = c.fetchone()
playing = {'channel':row['id'], 'start':datetime.datetime.now(), 'action_time':datetime.datetime.now()}
c.close()
return playing
def set_currentStream(self, nr):
c = self.conn.cursor()
c.execute('UPDATE playing SET current_stream=?', [nr])
self.conn.commit()
c.close()
def reloadDB(self):
'''
c = self.conn.cursor()
c.execute('DELETE FROM updates')
self.conn.commit()
c.close()
'''
#delete zapi files to force new login
profilePath = xbmc.translatePath(__addon__.getAddonInfo('profile'))
try:
os.remove(os.path.join(profilePath, 'cookie.cache'))
os.remove(os.path.join(profilePath, 'session.cache'))
os.remove(os.path.join(profilePath, 'account.cache'))
os.remove(os.path.join(profilePath, 'apicall.cache'))
except:
pass
self._createTables()
self.updateChannels(True)
self.updateProgram(datetime.datetime.now(), True)
def get_channeltitle(self, channelid):
c = self.conn.cursor()
c.execute('SELECT * FROM channels WHERE id= ? ', [channelid])
row = c.fetchone()
if row:
channeltitle=row['title']
self.conn.commit()
c.close()
return channeltitle
def get_channelid(self, channeltitle):
c = self.conn.cursor()
c.execute('SELECT * FROM channels WHERE title= ? ', [channeltitle])
row = c.fetchone()
print 'Title ' +str(channeltitle)
if row:
channelid=row['id']
self.conn.commit()
c.close()
return channelid
def get_channelweight(self, weight):
c = self.conn.cursor()
c.execute('SELECT * FROM channels WHERE weight= ? ', [weight])
row = c.fetchone()
if row:
channelid=row['id']
self.conn.commit()
c.close()
return channelid
def getProgInfo(self, notify=False, startTime=datetime.datetime.now(), endTime=datetime.datetime.now()):
fav = False
if __addon__.getSetting('onlyfav') == 'true': fav = 'favorites'
channels = self.getChannelList(fav)
c = self.conn.cursor()
print 'START Programm'
# for startup-notify
if notify:
PopUp = xbmcgui.DialogProgressBG()
#counter = len(channels)
counter = 0
for chan in channels['index']:
c.execute('SELECT * FROM programs WHERE channel = ? AND start_date < ? AND end_date > ?', [chan, endTime, startTime])
r=c.fetchall()
for row in r:
counter += 1
bar = 0 # Progressbar (Null Prozent)
PopUp.create('zattooHiQ lade Programm Informationen ...', '')
PopUp.update(bar)
for chan in channels['index']:
print str(chan) + ' - ' + str(startTime)
c.execute('SELECT * FROM programs WHERE channel = ? AND start_date < ? AND end_date > ?', [chan, endTime, startTime])
r=c.fetchall()
for row in r:
print str(row['channel']) + ' - ' + str(row['showID'])
if notify:
bar += 1
percent = int(bar * 100 / counter)
description_long = row["description_long"]
if description_long is None:
print 'Lang ' + str(row['channel'])
if notify:
PopUp.update(percent,localString(31922), localString(31923) + str(row['channel']))
description_long = self.getShowLongDescription(row["showID"])
c.close()
if notify:
PopUp.close()
return
def cleanProg(self, silent=False):
d = (datetime.datetime.today() - datetime.timedelta(days=8))
midnight = datetime.time(0)
datelow = datetime.datetime.combine(d, midnight)
print 'CleanUp ' + str(datelow)
try:
c = self.conn.cursor()
c.execute('SELECT * FROM programs WHERE start_date < ?', [datelow])
r=c.fetchall()
except:
return
if len(r)>0:
print 'Anzahl Records ' + str(len(r))
dialog = xbmcgui.Dialog()
if (silent or dialog.yesno(localString(31918), str(len(r)) + ' ' + localString(31920), '', '',local(106),local(107))):
count=len(r)
bar = 0 # Progressbar (Null Prozent)
if (not silent):
PopUp = xbmcgui.DialogProgress()
PopUp.create(localString(31913), '')
PopUp.update(bar)
for row in r:
c.execute('DELETE FROM programs WHERE showID = ?', (row['showID'],))
if (not silent):
bar += 1
PopUp.update(int(bar * 100 / count), str(count-bar) + localString(31914))
if (PopUp.iscanceled()):
c.close
return
if (not silent): PopUp.close()
self.conn.commit()
c.close()
return
def formatDate(self, timestamp):
if timestamp:
format = xbmc.getRegion('datelong')
date = timestamp.strftime(format)
date = date.replace('Monday', local(11))
date = date.replace('Tuesday', local(12))
date = date.replace('Wednesday', local(13))
date = date.replace('Thursday', local(14))
date = date.replace('Friday', local(15))
date = date.replace('Saturday', local(16))
date = date.replace('Sunday', local(17))
date = date.replace('January', local(21))
date = date.replace('February', local(22))
date = date.replace('March', local(23))
date = date.replace('April', local(24))
date = date.replace('May', local(25))
date = date.replace('June', local(26))
date = date.replace('July', local(27))
date = date.replace('August', local(28))
date = date.replace('September', local(29))
date = date.replace('October', local(30))
date = date.replace('November', local(31))
date = date.replace('December', local(32))
return date
else:
return ''
def getSeries(self, showID):
c = self.conn.cursor()
c.execute('SELECT series FROM programs WHERE showID = ?', [showID])
series = c.fetchone()
print str(showID)+' '+str(series['series'])
c.close()
return series['series']
def getRestart(self, showID):
c = self.conn.cursor()
c.execute('SELECT restart FROM programs WHERE showID = ?', [showID])
restart = c.fetchone()
print str(showID)+' '+str(restart['restart'])
c.close()
return restart['restart']
def get_version(self, version):
try:
c = self.conn.cursor()
c.execute('SELECT version FROM version')
row = c.fetchone()
version = row['version']
c.close
return version
except:
self._createTables()
self.set_version(version)
def set_version(self, version):
c = self.conn.cursor()
c.execute('DELETE FROM version')
c.execute('INSERT INTO version(version) VALUES(?)', [version])
self.conn.commit()
c.close()
| rolapp/plugin.video.zattooboxExt.beta | resources/zattooDB.py | Python | bsd-2-clause | 26,019 | 0.016529 |
###################################################################################
#
# Copyright (c) 2017-2019 MuK IT GmbH.
#
# This file is part of MuK Documents Access
# (see https://mukit.at).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###################################################################################
from odoo import models, fields, api
class AccessGroups(models.Model):
_inherit = 'muk_security.access_groups'
#----------------------------------------------------------
# Database
#----------------------------------------------------------
directories = fields.Many2many(
comodel_name='muk_dms.directory',
relation='muk_dms_directory_groups_rel',
string="Directories",
column1='gid',
column2='aid',
readonly=True)
count_directories = fields.Integer(
compute='_compute_count_directories',
string="Count Directories")
#----------------------------------------------------------
# Read, View
#----------------------------------------------------------
@api.depends('directories')
def _compute_count_directories(self):
for record in self:
record.count_directories = len(record.directories) | muk-it/muk_dms | muk_dms_access/models/access_groups.py | Python | lgpl-3.0 | 1,927 | 0.008303 |
class RepositoryCache:
def __init__(self):
self.data_dict = {}
def add_data(self, keys, data):
self.data_dict[keys, data]
def clear(self):
self.data_dict = {}
def is_data_cached(self, keys):
result = self.data_dict.has_key(keys)
return result
def get_data(self, keys):
result = self.data_dict[keys]
return result
Cache = RepositoryCache()
| jorgecasals/VoiceTrainingTool | Repositories/Cache.py | Python | gpl-3.0 | 420 | 0.004762 |
"""Testing for overlap intervals
"""
import unittest
from genda.transcripts.exon_utils import calcOverlap, collideIntervals, \
collapseIntervals
class TestOverlapFunctions(unittest.TestCase):
def setUp(self):
# Simple Overlap
self.simple = [(1,10), (6,15)]
# One interval enclosed in another
self.enclosed = [(100,200), (110,150)]
# Partial overlap
self.partial = [(150,300), (160,300), (170,330)]
# No overlap
self.no = [(150,300), (10,30)]
# Equal
self.equal = [(1,15), (1,5)]
#Complex interval list
self.full = [(7,20), (1,5), (8,11), (18,50), (100,150)]
def test_bpOverlap(self):
# Make sure overlaps are calculated correctly
self.assertEqual(calcOverlap(self.simple), 4)
self.assertEqual(calcOverlap(self.enclosed), 40)
self.assertEqual(calcOverlap(self.partial),400)
def test_collideIntervals(self):
self.assertEqual(collideIntervals(self.simple[0], self.simple[1]),
[(1,15)])
self.assertEqual(collideIntervals(self.enclosed[0], self.enclosed[1]),
[(100,200)])
self.assertEqual(collideIntervals(self.no[0], self.no[1]),self.no)
def test_collapseIntervals(self):
self.assertEqual(collapseIntervals(self.simple), [(1,15)])
print(self.partial)
self.assertEqual(collapseIntervals(self.partial), [(150,330)])
print(self.full)
self.assertEqual(collapseIntervals(self.full), [(1,5),(7,50),(100,150)])
def test_unique_bp(self):
self.assertEqual(sum(map(lambda x \
:x[1]-x[0],collapseIntervals(self.partial))) -
calcOverlap(self.partial),330-150)
if __name__ == '__main__':
unittest.main()
| jeffhsu3/genda | tests/exon_utils_tests.py | Python | bsd-3-clause | 1,802 | 0.021643 |
import lcm
from lilylcm import 03Citrus
def my_handler(channel, data):
msg = 03Citrus.decode(data)
print("Received message on channel /"%s/"" % channel)
print(" value = %s" % str(msg.value))
print("")
lc = lcm.LCM()
subscription = lc.subscribe("03Citrus", my_handler)
try:
while True:
lc.handle()
except KeyboardInterrupt:
pass
lc.unsubscribe(subscription)
| WeirdCoder/LilyPadOS | 04Dan/RandomStuff/listener.py | Python | mit | 403 | 0.012407 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- encoding: utf-8 -*-
#
# This file is part of my scripts project
#
# Copyright (c) 2013 Marco Antonio Islas Cruz
#
# This script is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This script is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# @author Marco Antonio Islas Cruz <markuz@islascruz.org>
# @copyright 2011 Marco Antonio Islas Cruz
# @license http://www.gnu.org/licenses/gpl.txt
import sys
import imaplib
import email
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-e", "--email", dest="email", action="store",
type="string",
help=("Username for the IMAP login. "
"This will be used on both servers if --new-email is "
"not defined"
))
parser.add_option("-n", "--new-email", dest="newemail", action="store",
type="string",
help="Username to connect to the new host")
parser.add_option("","--old-host", dest="oldhost",action="store",
type="string",
help="Old host. must be HOST:PORT")
parser.add_option("","--new-host", dest="newhost",action="store",
type="string",
help="New host, must be HOST:PORT")
parser.add_option("","--old-password", dest="oldpassword",action="store",
type="string",
help="old password")
parser.add_option("","--new-password", dest="newpassword",action="store",
type="string",
help="New password")
parser.add_option("","--prefix", dest="prefix",action="store",
type="string",
help="mailbox prefix")
options, args = parser.parse_args()
if not options.newemail:
options.newemail = options.email
OLDHOST= options.oldhost.split(":")[0]
OLDPORT= int(options.oldhost.split(":")[1])
NEWHOST=options.newhost.split(":")[0]
NEWPORT=int(options.newhost.split(":")[1])
def move_folder_messages(d, oldhost, newhost):
print "Entrando al directorio ", d
typ, dat = oldhost.select(d)
if typ != 'OK':
print "Cannot select %r"%d
#Seleccionar el directorio en el nuevo host.
typ, dat = newhost.select(d)
a,b = newhost.list()
print typ, dat, d,a,b
if typ != "OK":
print "Can't select folder: '%r'"%d
raise ValueError
typ, data = oldhost.search(None, "ALL")
for c, num in enumerate(data[0].split()):
typ, data = oldhost.fetch(num, "(RFC822)")
text = data[0][1]
msg = email.message_from_string(text)
subject = msg["Subject"]
message_id = msg["Message-ID"]
########try:
######## searchpattern = '(HEADER Message-ID "%s")'%message_id
######## result, data = newhost.uid('search',None,
######## searchpattern)
########except Exception, e:
######## print "No data: %s"%e
######## data = None
########print result, data
########if data and data[0]:
######## print ("Omitiendo el mensaje %s, ya se encuentra en el mailbox"
######## " destino (%r)" )%(subject, d)
######## continue
print "moviendo el mensaje %s/%s"%(d,subject)
newhost.append(d, None, None, msg.as_string())
#Conectar al host anterior
print "Connecting to %s:%d"%(OLDHOST, OLDPORT)
oldhost = imaplib.IMAP4(OLDHOST, OLDPORT)
print "Auth: %s,%s"%(options.email, options.oldpassword)
oldhost.login(options.email, options.oldpassword)
#Conectar al nuevo host
newhost = imaplib.IMAP4(NEWHOST, NEWPORT)
newhost.login(options.newemail, options.newpassword)
#Obtener la lista de directorios
result, dirs = oldhost.list()
print "Directorios encontrados"
dirs = map(lambda x: x.rsplit('"."', 1)[1].strip(), dirs)
newhdirs = map(lambda x: x.rsplit('"."', 1)[1].strip(), newhost.list()[1])
for directorio in dirs:
if directorio not in newhdirs:
print newhost.create(directorio)
for directorio in dirs:
move_folder_messages(directorio, oldhost, newhost)
####try:
#### move_folder_messages(directorio, oldhost, newhost)
####except Exception, e:
#### print "Error, mailbox: %s, error %r"%(directorio, e)
| markuz/scripts | copyemail.py | Python | gpl-2.0 | 4,573 | 0.014433 |
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import test_purchase_delivery
| OCA/purchase-workflow | purchase_propagate_qty/tests/__init__.py | Python | agpl-3.0 | 107 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Register models to admin view."""
# System imports
from django.contrib import admin
from django.contrib.admin.models import LogEntry
# Project imports
from draalcore.models.admin_log import LogEntryAdmin
admin.site.register(LogEntry, LogEntryAdmin)
| jojanper/draalcore | draalcore/models/admin.py | Python | mit | 302 | 0 |
"""
Django settings for paralelnipolis project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import dj_database_url
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
# 'south',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'paralelnipolis.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'paralelnipolis.wsgi.application'
if os.environ.get('PRODUCTION'):
DEBUG = False
SECRET_KEY = os.environ.get('SECRET_KEY')
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {}
DATABASES['default'] = dj_database_url.config()
# email settings
EMAIL_HOST = 'smtp.mandrillapp.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = '???@gmail.com'
EMAIL_HOST_PASSWORD = '???'
else:
DEBUG = True
SECRET_KEY = 'asdkfjh2i57yaw34gc6R*&@#*&Uaweyvfhaghjuy'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'paralelnipolis',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': '127.0.0.1',
'PORT': '',
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
TEMPLATE_DEBUG = DEBUG
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = False
USE_L10N = False
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| ParalelniPolis/polis-heroku | paralelnipolis/settings.py | Python | lgpl-3.0 | 3,240 | 0.000309 |
# -*- coding: UTF-8 -*-
from django.db.models.signals import post_save
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.dispatch import receiver
from matricula.models import Enroll
from .models import Bill
from django.utils.translation import ugettext_lazy as _
from paypal.standard.ipn.signals import valid_ipn_received
from paypal.standard.models import ST_PP_COMPLETED
from datetime import datetime
from django.utils.encoding import smart_text
from django.conf import settings
@receiver(post_save, sender=Enroll)
def create_bill(sender, **kwargs):
instance = kwargs['instance']
if not instance.bill_created and instance.enroll_finished\
and instance.group.cost > 0:
instance.bill_created = True
Bill.objects.create(short_description=_("Enroll in %s") % (instance.group),
description=render_to_string('invoice.html',
{ 'student': instance.student,
'enroll': smart_text(instance.group),
'date': instance.enroll_date.strftime("%Y-%m-%d %H:%M"),
'group': instance.group,
}
),
amount=instance.group.cost,
student=instance.student,
currency=instance.group.currency,
)
instance.save()
def paypal_bill_paid(sender, **kwargs):
ipn_obj = sender
if ipn_obj.payment_status == ST_PP_COMPLETED:
try:
bill = Bill.objects.get(pk=ipn_obj.invoice)
bill.is_paid = True
bill.paid_date = datetime.now()
bill.transaction_id = ipn_obj.txn_id
bill.save()
ok = True
except Exception as e:
ok = False
# FIXME do something here
if ok:
invoice = render_to_string('email_invoice.html', {'bill': bill})
send_mail(_("Academica Invoice paid"),
_("Go to Academica"),
settings.DEFAULT_FROM_EMAIL,
[bill.student.email],
html_message=invoice,
fail_silently=False
)
valid_ipn_received.connect(paypal_bill_paid)
| luisza/academica | matricula/contrib/bills/signals.py | Python | gpl-3.0 | 2,438 | 0.002871 |
# rename this file to private_config.py
# dropbox api credentials
DROPBOX_APP_ID=''
DROPBOX_API_SECRET=''
# django app sercret for salting and hashing cookies
SECRET_KEY = ''
# automatic admin configuration
AUTO_ADMINS = (
('admin_username','admin_password','admin_password@yoursite.com'),
) | kalyan02/dayone | do/private_config-sample.py | Python | gpl-2.0 | 296 | 0.023649 |
## system-config-printer
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Red Hat, Inc.
## Authors:
## Florian Festi <ffesti@redhat.com>
## Tim Waugh <twaugh@redhat.com>
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import cups, pprint, os, tempfile, re, string
import locale
from . import _debugprint
from . import config
class Printer:
_flags_blacklist = ["options", "local"]
def __init__(self, name, connection, **kw):
"""
@param name: printer name
@type name: string
@param connection: CUPS connection
@type connection: CUPS.Connection object
@param kw: printer attributes
@type kw: dict indexed by string
"""
self.name = name
self.connection = connection
self.class_members = []
have_kw = len (kw) > 0
fetch_attrs = True
if have_kw:
self.update (**kw)
if self.is_class:
fetch_attrs = True
else:
fetch_attrs = False
if fetch_attrs:
self.getAttributes ()
self._ppd = None # load on demand
def __del__ (self):
if self._ppd != None:
os.unlink(self._ppd)
def __repr__ (self):
return "<cupshelpers.Printer \"%s\">" % self.name
def _expand_flags(self):
def _ascii_lower(str):
return str.translate(string.maketrans(string.ascii_uppercase,
string.ascii_lowercase));
prefix = "CUPS_PRINTER_"
prefix_length = len(prefix)
# loop over cups constants
for name in cups.__dict__:
if name.startswith(prefix):
attr_name = \
_ascii_lower(name[prefix_length:])
if attr_name in self._flags_blacklist: continue
if attr_name == "class": attr_name = "is_class"
# set as attribute
setattr(self, attr_name,
bool(self.type & getattr(cups, name)))
def update(self, **kw):
"""
Update object from printer attributes.
@param kw: printer attributes
@type kw: dict indexed by string
"""
self.state = kw.get('printer-state', 0)
self.enabled = self.state != cups.IPP_PRINTER_STOPPED
self.device_uri = kw.get('device-uri', "")
self.info = kw.get('printer-info', "")
self.is_shared = kw.get('printer-is-shared', None)
self.location = kw.get('printer-location', "")
self.make_and_model = kw.get('printer-make-and-model', "")
self.type = kw.get('printer-type', 0)
self.uri_supported = kw.get('printer-uri-supported', "")
if type (self.uri_supported) != list:
self.uri_supported = [self.uri_supported]
self._expand_flags()
if self.is_shared is None:
self.is_shared = not self.not_shared
del self.not_shared
self.class_members = kw.get('member-names', [])
if type (self.class_members) != list:
self.class_members = [self.class_members]
self.class_members.sort ()
self.other_attributes = kw
def getAttributes(self):
"""
Fetch further attributes for the printer.
Normally only a small set of attributes is fetched. This
method is for fetching more.
"""
attrs = self.connection.getPrinterAttributes(self.name)
self.attributes = {}
self.other_attributes = {}
self.possible_attributes = {
'landscape' : ('False', ['True', 'False']),
'page-border' : ('none', ['none', 'single', 'single-thick',
'double', 'double-thick']),
}
for key, value in attrs.iteritems():
if key.endswith("-default"):
name = key[:-len("-default")]
if name in ["job-sheets", "printer-error-policy",
"printer-op-policy", # handled below
"notify-events", # cannot be set
"document-format", # cannot be set
"notify-lease-duration"]: # cannot be set
continue
supported = attrs.get(name + "-supported", None) or \
self.possible_attributes.get(name, None) or \
""
# Convert a list into a comma-separated string, since
# it can only really have been misinterpreted as a list
# by CUPS.
if isinstance (value, list):
value = reduce (lambda x, y: x+','+y, value)
self.attributes[name] = value
if attrs.has_key(name+"-supported"):
supported = attrs[name+"-supported"]
self.possible_attributes[name] = (value, supported)
elif (not key.endswith ("-supported") and
key != 'job-sheets-default' and
key != 'printer-error-policy' and
key != 'printer-op-policy' and
not key.startswith ('requesting-user-name-')):
self.other_attributes[key] = value
self.job_sheet_start, self.job_sheet_end = attrs.get(
'job-sheets-default', ('none', 'none'))
self.job_sheets_supported = attrs.get('job-sheets-supported', ['none'])
self.error_policy = attrs.get('printer-error-policy', 'none')
self.error_policy_supported = attrs.get(
'printer-error-policy-supported', ['none'])
self.op_policy = attrs.get('printer-op-policy', "") or "default"
self.op_policy_supported = attrs.get(
'printer-op-policy-supported', ["default"])
self.default_allow = True
self.except_users = []
if attrs.has_key('requesting-user-name-allowed'):
self.except_users = attrs['requesting-user-name-allowed']
self.default_allow = False
elif attrs.has_key('requesting-user-name-denied'):
self.except_users = attrs['requesting-user-name-denied']
self.except_users_string = ', '.join(self.except_users)
self.update (**attrs)
def getServer(self):
"""
Find out which server defines this printer.
@returns: server URI or None
"""
if not self.uri_supported[0].startswith('ipp://'):
return None
uri = self.uri_supported[0][6:]
uri = uri.split('/')[0]
uri = uri.split(':')[0]
if uri == "localhost.localdomain":
uri = "localhost"
return uri
def getPPD(self):
"""
Obtain the printer's PPD.
@returns: cups.PPD object, or False for raw queues
@raise cups.IPPError: IPP error
"""
result = None
if self._ppd is None:
try:
self._ppd = self.connection.getPPD(self.name)
result = cups.PPD (self._ppd)
except cups.IPPError, (e, m):
if e == cups.IPP_NOT_FOUND:
result = False
else:
raise
if result == None and self._ppd != None:
result = cups.PPD (self._ppd)
return result
def setOption(self, name, value):
"""
Set a printer's option.
@param name: option name
@type name: string
@param value: option value
@type value: option-specific
"""
if isinstance (value, float):
radixchar = locale.nl_langinfo (locale.RADIXCHAR)
if radixchar != '.':
# Convert floats to strings, being careful with decimal points.
value = str (value).replace (radixchar, '.')
self.connection.addPrinterOptionDefault(self.name, name, value)
def unsetOption(self, name):
"""
Unset a printer's option.
@param name: option name
@type name: string
"""
self.connection.deletePrinterOptionDefault(self.name, name)
def setEnabled(self, on, reason=None):
"""
Set the printer's enabled state.
@param on: whether it will be enabled
@type on: bool
@param reason: reason for this state
@type reason: string
"""
if on:
self.connection.enablePrinter(self.name)
else:
if reason:
self.connection.disablePrinter(self.name, reason=reason)
else:
self.connection.disablePrinter(self.name)
def setAccepting(self, on, reason=None):
"""
Set the printer's accepting state.
@param on: whether it will be accepting
@type on: bool
@param reason: reason for this state
@type reason: string
"""
if on:
self.connection.acceptJobs(self.name)
else:
if reason:
self.connection.rejectJobs(self.name, reason=reason)
else:
self.connection.rejectJobs(self.name)
def setShared(self,on):
"""
Set the printer's shared state.
@param on: whether it will be accepting
@type on: bool
"""
self.connection.setPrinterShared(self.name, on)
def setErrorPolicy (self, policy):
"""
Set the printer's error policy.
@param policy: error policy
@type policy: string
"""
self.connection.setPrinterErrorPolicy(self.name, policy)
def setOperationPolicy(self, policy):
"""
Set the printer's operation policy.
@param policy: operation policy
@type policy: string
"""
self.connection.setPrinterOpPolicy(self.name, policy)
def setJobSheets(self, start, end):
"""
Set the printer's job sheets.
@param start: start sheet
@type start: string
@param end: end sheet
@type end: string
"""
self.connection.setPrinterJobSheets(self.name, start, end)
def setAccess(self, allow, except_users):
"""
Set access control list.
@param allow: whether to allow by default, otherwise deny
@type allow: bool
@param except_users: exception list
@type except_users: string list
"""
if isinstance(except_users, str):
users = except_users.split()
users = [u.split(",") for u in users]
except_users = []
for u in users:
except_users.extend(u)
except_users = [u.strip() for u in except_users]
except_users = filter(None, except_users)
if allow:
self.connection.setPrinterUsersDenied(self.name, except_users)
else:
self.connection.setPrinterUsersAllowed(self.name, except_users)
def jobsQueued(self, only_tests=False, limit=None):
"""
Find out whether jobs are queued for this printer.
@param only_tests: whether to restrict search to test pages
@type only_tests: bool
@returns: list of job IDs
"""
ret = []
try:
try:
r = ['job-id', 'job-printer-uri', 'job-name']
jobs = self.connection.getJobs (requested_attributes=r)
except TypeError:
# requested_attributes requires pycups 1.9.50
jobs = self.connection.getJobs ()
except cups.IPPError:
return ret
for id, attrs in jobs.iteritems():
try:
uri = attrs['job-printer-uri']
uri = uri[uri.rindex ('/') + 1:]
except:
continue
if uri != self.name:
continue
if (not only_tests or
(attrs.has_key ('job-name') and
attrs['job-name'] == 'Test Page')):
ret.append (id)
if limit != None and len (ret) == limit:
break
return ret
def jobsPreserved(self, limit=None):
"""
Find out whether there are preserved jobs for this printer.
@return: list of job IDs
"""
ret = []
try:
try:
r = ['job-id', 'job-printer-uri', 'job-state']
jobs = self.connection.getJobs (which_jobs='completed',
requested_attributes=r)
except TypeError:
# requested_attributes requires pycups 1.9.50
jobs = self.connection.getJobs (which_jobs='completed')
except cups.IPPError:
return ret
for id, attrs in jobs.iteritems():
try:
uri = attrs['job-printer-uri']
uri = uri[uri.rindex ('/') + 1:]
except:
continue
if uri != self.name:
continue
if (attrs.get ('job-state',
cups.IPP_JOB_PENDING) < cups.IPP_JOB_COMPLETED):
continue
ret.append (id)
if limit != None and len (ret) == limit:
break
return ret
def testsQueued(self, limit=None):
"""
Find out whether test jobs are queued for this printer.
@returns: list of job IDs
"""
return self.jobsQueued (only_tests=True, limit=limit)
def setAsDefault(self):
"""
Set this printer as the system default.
"""
self.connection.setDefault(self.name)
# Also need to check system-wide lpoptions because that's how
# previous Fedora versions set the default (bug #217395).
(tmpfd, tmpfname) = tempfile.mkstemp ()
os.remove (tmpfname)
try:
resource = "/admin/conf/lpoptions"
self.connection.getFile(resource, fd=tmpfd)
except cups.HTTPError as e:
(s,) = e.args
if s == cups.HTTP_NOT_FOUND:
return False
raise cups.HTTPError (s)
f = os.fdopen (tmpfd, 'r+')
f.seek (0)
lines = f.readlines ()
changed = False
i = 0
for line in lines:
if line.startswith ("Default "):
# This is the system-wide default.
name = line.split (' ')[1]
if name != self.name:
# Stop it from over-riding the server default.
lines[i] = "Dest " + line[8:]
changed = True
i += 1
if changed:
f.seek (0)
f.writelines (lines)
f.truncate ()
os.lseek (tmpfd, 0, os.SEEK_SET)
try:
self.connection.putFile (resource, fd=tmpfd)
except cups.HTTPError:
return False
return changed
def getPrinters(connection):
"""
Obtain a list of printers.
@param connection: CUPS connection
@type connection: CUPS.Connection object
@returns: L{Printer} list
"""
printers = connection.getPrinters()
classes = connection.getClasses()
for name, printer in printers.iteritems():
printer = Printer(name, connection, **printer)
printers[name] = printer
if classes.has_key(name):
printer.class_members = classes[name]
printer.class_members.sort()
return printers
def parseDeviceID (id):
"""
Parse an IEEE 1284 Device ID, so that it may be indexed by field name.
@param id: IEEE 1284 Device ID, without the two leading length bytes
@type id: string
@returns: dict indexed by field name
"""
id_dict = {}
pieces = id.split(";")
for piece in pieces:
if piece.find(":") == -1:
continue
name, value = piece.split(":",1)
id_dict[name.strip ()] = value.strip()
if id_dict.has_key ("MANUFACTURER"):
id_dict.setdefault("MFG", id_dict["MANUFACTURER"])
if id_dict.has_key ("MODEL"):
id_dict.setdefault("MDL", id_dict["MODEL"])
if id_dict.has_key ("COMMAND SET"):
id_dict.setdefault("CMD", id_dict["COMMAND SET"])
for name in ["MFG", "MDL", "CMD", "CLS", "DES", "SN", "S", "P", "J"]:
id_dict.setdefault(name, "")
if id_dict["CMD"] == '':
id_dict["CMD"] = []
else:
id_dict["CMD"] = id_dict["CMD"].split(',')
return id_dict
class Device:
"""
This class represents a CUPS device.
"""
def __init__(self, uri, **kw):
"""
@param uri: device URI
@type uri: string
@param kw: device attributes
@type kw: dict
"""
self.uri = uri
self.device_class = kw.get('device-class', '')
self.info = kw.get('device-info', '')
self.make_and_model = kw.get('device-make-and-model', '')
self.id = kw.get('device-id', '')
self.location = kw.get('device-location', '')
if type (self.info) == unicode:
# Convert unicode objects to UTF-8 encoding so they can be
# compared with other UTF-8 encoded strings (bug #957444).
self.info = self.info.encode ('utf-8')
uri_pieces = uri.split(":")
self.type = uri_pieces[0]
self.is_class = len(uri_pieces)==1
#self.id = 'MFG:HEWLETT-PACKARD;MDL:DESKJET 990C;CMD:MLC,PCL,PML;CLS:PRINTER;DES:Hewlett-Packard DeskJet 990C;SN:US05N1J00XLG;S:00808880800010032C1000000C2000000;P:0800,FL,B0;J: ;'
self.id_dict = parseDeviceID (self.id)
s = uri.find("serial=")
if s != -1 and not self.id_dict.get ('SN',''):
self.id_dict['SN'] = uri[s + 7:]
def __repr__ (self):
return "<cupshelpers.Device \"%s\">" % self.uri
def __cmp__(self, other):
"""
Compare devices by order of preference.
"""
if other == None:
return -1
if self.is_class != other.is_class:
if other.is_class:
return -1
return 1
if not self.is_class and (self.type != other.type):
# "hp"/"hpfax" before "usb" before * before "parallel" before
# "serial"
if other.type == "serial":
return -1
if self.type == "serial":
return 1
if other.type == "parallel":
return -1
if self.type == "parallel":
return 1
if other.type == "hp":
return 1
if self.type == "hp":
return -1
if other.type == "hpfax":
return 1
if self.type == "hpfax":
return -1
if other.type == "dnssd":
return 1
if self.type == "dnssd":
return -1
if other.type == "socket":
return 1
if self.type == "socket":
return -1
if other.type == "lpd":
return 1
if self.type == "lpd":
return -1
if other.type == "ipps":
return 1
if self.type == "ipps":
return -1
if other.type == "ipp":
return 1
if self.type == "ipp":
return -1
if other.type == "usb":
return 1
if self.type == "usb":
return -1
if self.type == "dnssd" and other.type == "dnssd":
if other.uri.find("._pdl-datastream") != -1: # Socket
return 1
if self.uri.find("._pdl-datastream") != -1:
return -1
if other.uri.find("._printer") != -1: # LPD
return 1
if self.uri.find("._printer") != -1:
return -1
if other.uri.find("._ipp") != -1: # IPP
return 1
if self.uri.find("._ipp") != -1:
return -1
result = cmp(bool(self.id), bool(other.id))
if not result:
result = cmp(self.info, other.info)
return result
class _GetDevicesCall(object):
def call (self, connection, kwds):
if kwds.has_key ("reply_handler"):
self._client_reply_handler = kwds.get ("reply_handler")
kwds["reply_handler"] = self._reply_handler
return connection.getDevices (**kwds)
self._client_reply_handler = None
result = connection.getDevices (**kwds)
return self._reply_handler (connection, result)
def _reply_handler (self, connection, devices):
for uri, data in devices.iteritems():
device = Device(uri, **data)
devices[uri] = device
if device.info != '' and device.make_and_model == '':
device.make_and_model = device.info
if self._client_reply_handler:
self._client_reply_handler (connection, devices)
else:
return devices
def getDevices(connection, **kw):
"""
Obtain a list of available CUPS devices.
@param connection: CUPS connection
@type connection: cups.Connection object
@returns: a list of L{Device} objects
@raise cups.IPPError: IPP Error
"""
op = _GetDevicesCall ()
return op.call (connection, kw)
def activateNewPrinter(connection, name):
"""
Set a new printer enabled, accepting jobs, and (if necessary) the
default printer.
@param connection: CUPS connection
@type connection: cups.Connection object
@param name: printer name
@type name: string
@raise cups.IPPError: IPP error
"""
connection.enablePrinter (name)
connection.acceptJobs (name)
# Set as the default if there is not already a default printer.
if connection.getDefault () == None:
connection.setDefault (name)
def copyPPDOptions(ppd1, ppd2):
"""
Copy default options between PPDs.
@param ppd1: source PPD
@type ppd1: cups.PPD object
@param ppd2: destination PPD
@type ppd2: cups.PPD object
"""
def getPPDGroupOptions(group):
options = group.options[:]
for g in group.subgroups:
options.extend(getPPDGroupOptions(g))
return options
def iteratePPDOptions(ppd):
for group in ppd.optionGroups:
for option in getPPDGroupOptions(group):
yield option
for option in iteratePPDOptions(ppd1):
if option.keyword == "PageRegion":
continue
new_option = ppd2.findOption(option.keyword)
if new_option and option.ui==new_option.ui:
value = option.defchoice
for choice in new_option.choices:
if choice["choice"]==value:
ppd2.markOption(new_option.keyword, value)
_debugprint ("set %s = %s" % (repr (new_option.keyword),
repr (value)))
def setPPDPageSize(ppd, language):
"""
Set the PPD page size according to locale.
@param ppd: PPD
@type ppd: cups.PPD object
@param language: language, as given by the first element of
locale.setlocale
@type language: string
"""
# Just set the page size to A4 or Letter, that's all.
# Use the same method CUPS uses.
size = 'A4'
letter = [ 'C', 'POSIX', 'en', 'en_US', 'en_CA', 'fr_CA' ]
for each in letter:
if language == each:
size = 'Letter'
try:
ppd.markOption ('PageSize', size)
_debugprint ("set PageSize = %s" % size)
except:
_debugprint ("Failed to set PageSize (%s not available?)" % size)
def missingExecutables(ppd):
"""
Check that all relevant executables for a PPD are installed.
@param ppd: PPD
@type ppd: cups.PPD object
@returns: string list, representing missing executables
"""
# First, a local function. How to check that something exists
# in a path:
def pathcheck (name, path="/usr/bin:/bin"):
if name == "-":
# A filter of "-" means that no filter is required,
# i.e. the device accepts the given format as-is.
return "builtin"
# Strip out foomatic '%'-style place-holders.
p = name.find ('%')
if p != -1:
name = name[:p]
if len (name) == 0:
return "true"
if name[0] == '/':
if os.access (name, os.X_OK):
_debugprint ("%s: found" % name)
return name
else:
_debugprint ("%s: NOT found" % name)
return None
if name.find ("=") != -1:
return "builtin"
if name in [ ":", ".", "[", "alias", "bind", "break", "cd",
"continue", "declare", "echo", "else", "eval",
"exec", "exit", "export", "fi", "if", "kill", "let",
"local", "popd", "printf", "pushd", "pwd", "read",
"readonly", "set", "shift", "shopt", "source",
"test", "then", "trap", "type", "ulimit", "umask",
"unalias", "unset", "wait" ]:
return "builtin"
for component in path.split (':'):
file = component.rstrip (os.path.sep) + os.path.sep + name
if os.access (file, os.X_OK):
_debugprint ("%s: found" % file)
return file
_debugprint ("%s: NOT found in %s" % (name, path))
return None
exes_to_install = []
def add_missing (exe):
# Strip out foomatic '%'-style place-holders.
p = exe.find ('%')
if p != -1:
exe = exe[:p]
exes_to_install.append (exe)
# Find a 'FoomaticRIPCommandLine' attribute.
exe = exepath = None
attr = ppd.findAttr ('FoomaticRIPCommandLine')
if attr:
# Foomatic RIP command line to check.
cmdline = attr.value.replace ('&&\n', '')
cmdline = cmdline.replace ('"', '"')
cmdline = cmdline.replace ('<', '<')
cmdline = cmdline.replace ('>', '>')
if (cmdline.find ("(") != -1 or
cmdline.find ("&") != -1):
# Don't try to handle sub-shells or unreplaced HTML entities.
cmdline = ""
# Strip out foomatic '%'-style place-holders
pipes = cmdline.split (';')
for pipe in pipes:
cmds = pipe.strip ().split ('|')
for cmd in cmds:
args = cmd.strip ().split (' ')
exe = args[0]
exepath = pathcheck (exe)
if not exepath:
add_missing (exe)
continue
# Main executable found. But if it's 'gs',
# perhaps there is an IJS server we also need
# to check.
if os.path.basename (exepath) == 'gs':
argn = len (args)
argi = 1
search = "-sIjsServer="
while argi < argn:
arg = args[argi]
if arg.startswith (search):
exe = arg[len (search):]
exepath = pathcheck (exe)
if not exepath:
add_missing (exe)
break
argi += 1
if not exepath:
# Next pipe.
break
if exepath or not exe:
# Look for '*cupsFilter' lines in the PPD and check that
# the filters are installed.
(tmpfd, tmpfname) = tempfile.mkstemp ()
os.unlink (tmpfname)
ppd.writeFd (tmpfd)
os.lseek (tmpfd, 0, os.SEEK_SET)
f = os.fdopen (tmpfd, "r")
search = "*cupsFilter:"
for line in f.readlines ():
if line.startswith (search):
line = line[len (search):].strip ().strip ('"')
try:
(mimetype, cost, exe) = line.split (' ')
except:
continue
exepath = pathcheck (exe,
config.cupsserverbindir + "/filter:"
"/usr/lib64/cups/filter")
if not exepath:
add_missing (config.cupsserverbindir + "/filter/" + exe)
return exes_to_install
def missingPackagesAndExecutables(ppd):
"""
Check that all relevant executables for a PPD are installed.
@param ppd: PPD
@type ppd: cups.PPD object
@returns: string list pair, representing missing packages and
missing executables
"""
executables = missingExecutables(ppd)
return ([], executables)
def _main():
c = cups.Connection()
#printers = getPrinters(c)
for device in getDevices(c).itervalues():
print device.uri, device.id_dict
if __name__=="__main__":
_main()
| hamonikr-root/system-config-printer-gnome | cupshelpers/cupshelpers.py | Python | gpl-2.0 | 29,800 | 0.007282 |
"""Helper module for parsing AWS ini config files."""
import os
try:
import configparser
except ImportError:
import ConfigParser as configparser
AWS_CLI_CREDENTIALS_PATH = "~/.aws/credentials"
AWS_CLI_CONFIG_PATH = "~/.aws/config"
DEFAULT_PROFILE_NAME = os.getenv("AWS_DEFAULT_PROFILE", "default")
class NoConfigFoundException(Exception):
"""Config file not present."""
pass
def _get_config_parser(path):
"""Open and parse given config.
:type path: basestring
:rtype: ConfigParser.ConfigParser
"""
config_parser = configparser.ConfigParser()
try:
with open(os.path.expanduser(path), "rb") as f:
config_parser.readfp(f)
except IOError:
raise NoConfigFoundException("Can't find the config file: %s" % path)
else:
return config_parser
def _get_credentials_from_environment():
key = os.environ.get("AWS_ACCESS_KEY_ID")
secret = os.environ.get("AWS_SECRET_ACCESS_KEY")
return key, secret
def get_credentials(profile=None):
"""Returns AWS credentials.
Reads ~/.aws/credentials if the profile name is given or tries
to get them from environment otherwise. Returns a (key, secret)
tuple.
:type profile: basestring
:rtype: tuple
"""
if profile is None:
key, secret = _get_credentials_from_environment()
if key is not None and secret is not None:
return key, secret
raise NoConfigFoundException("AWS credentials not found.")
config = _get_config_parser(path=AWS_CLI_CREDENTIALS_PATH)
key = config.get(profile, "aws_access_key_id")
secret = config.get(profile, "aws_secret_access_key")
return key, secret
def get_credentials_dict(profile):
"""Returns credentials as a dict (for use as kwargs).
:type profile: basestring
:rtype: dict
"""
key, secret = get_credentials(profile)
return {"aws_access_key_id": key,
"aws_secret_access_key": secret}
def get_profile_names():
"""Get available profile names.
:rtype: list
:returns: list of profile names (strings)
"""
try:
return _get_config_parser(path=AWS_CLI_CREDENTIALS_PATH).sections()
except NoConfigFoundException:
return []
def has_default_profile():
"""Is default profile present?
:rtype: bool
"""
return DEFAULT_PROFILE_NAME in get_profile_names()
def get_default_region(profile):
"""Get the default region for given profile from AWS CLI tool's config.
:type profile: basestring
:rtype: basestring
:returns: name of defalt region if defined in config, None otherwise
"""
try:
config = _get_config_parser(path=AWS_CLI_CONFIG_PATH)
except NoConfigFoundException:
return None
try:
return config.get("profile %s" % profile, "region")
except (configparser.NoSectionError, configparser.NoOptionError):
pass
try:
return config.get("default", "region")
except (configparser.NoSectionError, configparser.NoOptionError):
pass
return None
| bearops/ebzl | ebzl/lib/config.py | Python | bsd-3-clause | 3,082 | 0 |
# -*- encoding:utf-8 -*-
from __future__ import unicode_literals
MESSAGES = {
"%d min remaining to read": "%d минути до прочитане",
"(active)": "(активно)",
"Also available in:": "Достъпно също на:",
"Archive": "Архив",
"Authors": "Автори",
"Categories": "Категории",
"Comments": "Коментари",
"LANGUAGE": "Български",
"Languages:": "Езици:",
"More posts about %s": "Още публикации относно %s",
"Newer posts": "Нови публикации",
"Next post": "Следваща публикация",
"Next": "",
"No posts found.": "Не са намерени публикации.",
"Nothing found.": "Нищо не е намерено.",
"Older posts": "Стари публикации",
"Original site": "Оригиналния сайт",
"Posted:": "Публикyвано:",
"Posts about %s": "Публикации относно %s",
"Posts by %s": "Публикации от %s",
"Posts for year %s": "Публикации за %s година",
"Posts for {month} {day}, {year}": "Публикации от {day} {month} {year}",
"Posts for {month} {year}": "Публикации за {month} {year}",
"Previous post": "Предишна публикация",
"Previous": "",
"Publication date": "Дата на публикуване",
"RSS feed": "RSS поток",
"Read in English": "Прочетете на български",
"Read more": "Чети нататък",
"Skip to main content": "Прескочи до основното съдържание",
"Source": "Изходен код",
"Subcategories:": "Подкатегории:",
"Tags and Categories": "Тагове и Категории",
"Tags": "Тагове",
"Toggle navigation": "",
"Uncategorized": "Без категория",
"Up": "",
"Updates": "Обновления",
"Write your page here.": "Напиши тук текста на твоята страница.",
"Write your post here.": "Напиши тук текста на твоята публикация.",
"old posts, page %d": "стари публикации, страница %d",
"page %d": "страница %d",
"{month} {day}, {year}": "",
"{month} {year}": "",
}
| andredias/nikola | nikola/data/themes/base/messages/messages_bg.py | Python | mit | 2,386 | 0 |
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import stat
import time
import shlex
import errno
import fnmatch
import glob
import platform
import re
import signal
import socket
import struct
import datetime
import getpass
import pwd
from ansible.module_utils.basic import get_all_subclasses
from ansible.module_utils.six import PY3, iteritems
from ansible.module_utils.six.moves import configparser, StringIO, reduce
from ansible.module_utils._text import to_native, to_text
try:
import selinux
HAVE_SELINUX=True
except ImportError:
HAVE_SELINUX=False
try:
# Check if we have SSLContext support
from ssl import create_default_context, SSLContext
del create_default_context
del SSLContext
HAS_SSLCONTEXT = True
except ImportError:
HAS_SSLCONTEXT = False
try:
import json
# Detect python-json which is incompatible and fallback to simplejson in
# that case
try:
json.loads
json.dumps
except AttributeError:
raise ImportError
except ImportError:
import simplejson as json
# The distutils module is not shipped with SUNWPython on Solaris.
# It's in the SUNWPython-devel package which also contains development files
# that don't belong on production boxes. Since our Solaris code doesn't
# depend on LooseVersion, do not import it on Solaris.
if platform.system() != 'SunOS':
from distutils.version import LooseVersion
# --------------------------------------------------------------
# timeout function to make sure some fact gathering
# steps do not exceed a time limit
GATHER_TIMEOUT=None
DEFAULT_GATHER_TIMEOUT = 10
class TimeoutError(Exception):
pass
def timeout(seconds=None, error_message="Timer expired"):
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
local_seconds = seconds # Make local var as we modify this every time it's invoked
if local_seconds is None:
local_seconds = globals().get('GATHER_TIMEOUT') or DEFAULT_GATHER_TIMEOUT
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(local_seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wrapper
# If we were called as @timeout, then the first parameter will be the
# function we are to wrap instead of the number of seconds. Detect this
# and correct it by setting seconds to our sentinel value and return the
# inner decorator function manually wrapped around the function
if callable(seconds):
func = seconds
seconds = None
return decorator(func)
# If we were called as @timeout([...]) then python itself will take
# care of wrapping the inner decorator around the function
return decorator
# --------------------------------------------------------------
class Facts(object):
"""
This class should only attempt to populate those facts that
are mostly generic to all systems. This includes platform facts,
service facts (e.g. ssh keys or selinux), and distribution facts.
Anything that requires extensive code or may have more than one
possible implementation to establish facts for a given topic should
subclass Facts.
"""
# i86pc is a Solaris and derivatives-ism
_I386RE = re.compile(r'i([3456]86|86pc)')
# For the most part, we assume that platform.dist() will tell the truth.
# This is the fallback to handle unknowns or exceptions
SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' }
# A list of dicts. If there is a platform with more than one
# package manager, put the preferred one last. If there is an
# ansible module, use that as the value for the 'name' key.
PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' },
{ 'path' : '/usr/bin/dnf', 'name' : 'dnf' },
{ 'path' : '/usr/bin/apt-get', 'name' : 'apt' },
{ 'path' : '/usr/bin/zypper', 'name' : 'zypper' },
{ 'path' : '/usr/sbin/urpmi', 'name' : 'urpmi' },
{ 'path' : '/usr/bin/pacman', 'name' : 'pacman' },
{ 'path' : '/bin/opkg', 'name' : 'opkg' },
{ 'path' : '/usr/pkg/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/tools/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/local/bin/port', 'name' : 'macports' },
{ 'path' : '/usr/local/bin/brew', 'name' : 'homebrew' },
{ 'path' : '/sbin/apk', 'name' : 'apk' },
{ 'path' : '/usr/sbin/pkg', 'name' : 'pkgng' },
{ 'path' : '/usr/sbin/swlist', 'name' : 'SD-UX' },
{ 'path' : '/usr/bin/emerge', 'name' : 'portage' },
{ 'path' : '/usr/sbin/pkgadd', 'name' : 'svr4pkg' },
{ 'path' : '/usr/bin/pkg', 'name' : 'pkg5' },
{ 'path' : '/usr/bin/xbps-install','name' : 'xbps' },
{ 'path' : '/usr/local/sbin/pkg', 'name' : 'pkgng' },
{ 'path' : '/usr/bin/swupd', 'name' : 'swupd' },
{ 'path' : '/usr/sbin/sorcery', 'name' : 'sorcery' },
]
def __init__(self, module, load_on_init=True, cached_facts=None):
self.module = module
if not cached_facts:
self.facts = {}
else:
self.facts = cached_facts
### TODO: Eventually, these should all get moved to populate(). But
# some of the values are currently being used by other subclasses (for
# instance, os_family and distribution). Have to sort out what to do
# about those first.
if load_on_init:
self.get_platform_facts()
self.facts.update(Distribution(module).populate())
self.get_cmdline()
self.get_public_ssh_host_keys()
self.get_selinux_facts()
self.get_apparmor_facts()
self.get_caps_facts()
self.get_fips_facts()
self.get_pkg_mgr_facts()
self.get_service_mgr_facts()
self.get_lsb_facts()
self.get_date_time_facts()
self.get_user_facts()
self.get_local_facts()
self.get_env_facts()
self.get_dns_facts()
self.get_python_facts()
def populate(self):
return self.facts
# Platform
# platform.system() can be Linux, Darwin, Java, or Windows
def get_platform_facts(self):
self.facts['system'] = platform.system()
self.facts['kernel'] = platform.release()
self.facts['machine'] = platform.machine()
self.facts['python_version'] = platform.python_version()
self.facts['fqdn'] = socket.getfqdn()
self.facts['hostname'] = platform.node().split('.')[0]
self.facts['nodename'] = platform.node()
self.facts['domain'] = '.'.join(self.facts['fqdn'].split('.')[1:])
arch_bits = platform.architecture()[0]
self.facts['userspace_bits'] = arch_bits.replace('bit', '')
if self.facts['machine'] == 'x86_64':
self.facts['architecture'] = self.facts['machine']
if self.facts['userspace_bits'] == '64':
self.facts['userspace_architecture'] = 'x86_64'
elif self.facts['userspace_bits'] == '32':
self.facts['userspace_architecture'] = 'i386'
elif Facts._I386RE.search(self.facts['machine']):
self.facts['architecture'] = 'i386'
if self.facts['userspace_bits'] == '64':
self.facts['userspace_architecture'] = 'x86_64'
elif self.facts['userspace_bits'] == '32':
self.facts['userspace_architecture'] = 'i386'
else:
self.facts['architecture'] = self.facts['machine']
if self.facts['system'] == 'AIX':
# Attempt to use getconf to figure out architecture
# fall back to bootinfo if needed
getconf_bin = self.module.get_bin_path('getconf')
if getconf_bin:
rc, out, err = self.module.run_command([getconf_bin, 'MACHINE_ARCHITECTURE'])
data = out.splitlines()
self.facts['architecture'] = data[0]
else:
bootinfo_bin = self.module.get_bin_path('bootinfo')
rc, out, err = self.module.run_command([bootinfo_bin, '-p'])
data = out.splitlines()
self.facts['architecture'] = data[0]
elif self.facts['system'] == 'OpenBSD':
self.facts['architecture'] = platform.uname()[5]
machine_id = get_file_content("/var/lib/dbus/machine-id") or get_file_content("/etc/machine-id")
if machine_id:
machine_id = machine_id.splitlines()[0]
self.facts["machine_id"] = machine_id
def get_local_facts(self):
fact_path = self.module.params.get('fact_path', None)
if not fact_path or not os.path.exists(fact_path):
return
local = {}
for fn in sorted(glob.glob(fact_path + '/*.fact')):
# where it will sit under local facts
fact_base = os.path.basename(fn).replace('.fact','')
if stat.S_IXUSR & os.stat(fn)[stat.ST_MODE]:
# run it
# try to read it as json first
# if that fails read it with ConfigParser
# if that fails, skip it
try:
rc, out, err = self.module.run_command(fn)
except UnicodeError:
fact = 'error loading fact - output of running %s was not utf-8' % fn
local[fact_base] = fact
self.facts['local'] = local
return
else:
out = get_file_content(fn, default='')
# load raw json
fact = 'loading %s' % fact_base
try:
fact = json.loads(out)
except ValueError:
# load raw ini
cp = configparser.ConfigParser()
try:
cp.readfp(StringIO(out))
except configparser.Error:
fact = "error loading fact - please check content"
else:
fact = {}
for sect in cp.sections():
if sect not in fact:
fact[sect] = {}
for opt in cp.options(sect):
val = cp.get(sect, opt)
fact[sect][opt]=val
local[fact_base] = fact
if not local:
return
self.facts['local'] = local
def get_cmdline(self):
data = get_file_content('/proc/cmdline')
if data:
self.facts['cmdline'] = {}
try:
for piece in shlex.split(data):
item = piece.split('=', 1)
if len(item) == 1:
self.facts['cmdline'][item[0]] = True
else:
self.facts['cmdline'][item[0]] = item[1]
except ValueError:
pass
def get_public_ssh_host_keys(self):
keytypes = ('dsa', 'rsa', 'ecdsa', 'ed25519')
# list of directories to check for ssh keys
# used in the order listed here, the first one with keys is used
keydirs = ['/etc/ssh', '/etc/openssh', '/etc']
for keydir in keydirs:
for type_ in keytypes:
factname = 'ssh_host_key_%s_public' % type_
if factname in self.facts:
# a previous keydir was already successful, stop looking
# for keys
return
key_filename = '%s/ssh_host_%s_key.pub' % (keydir, type_)
keydata = get_file_content(key_filename)
if keydata is not None:
self.facts[factname] = keydata.split()[1]
def get_pkg_mgr_facts(self):
if self.facts['system'] == 'OpenBSD':
self.facts['pkg_mgr'] = 'openbsd_pkg'
else:
self.facts['pkg_mgr'] = 'unknown'
for pkg in Facts.PKG_MGRS:
if os.path.isfile(pkg['path']):
self.facts['pkg_mgr'] = pkg['name']
def get_service_mgr_facts(self):
#TODO: detect more custom init setups like bootscripts, dmd, s6, Epoch, etc
# also other OSs other than linux might need to check across several possible candidates
# Mapping of proc_1 values to more useful names
proc_1_map = {
'procd': 'openwrt_init',
'runit-init': 'runit',
'svscan': 'svc',
'openrc-init': 'openrc',
}
# try various forms of querying pid 1
proc_1 = get_file_content('/proc/1/comm')
if proc_1 is None:
rc, proc_1, err = self.module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True)
# If the output of the command starts with what looks like a PID, then the 'ps' command
# probably didn't work the way we wanted, probably because it's busybox
if re.match(r' *[0-9]+ ', proc_1):
proc_1 = None
# The ps command above may return "COMMAND" if the user cannot read /proc, e.g. with grsecurity
if proc_1 == "COMMAND\n":
proc_1 = None
if proc_1 is not None:
proc_1 = os.path.basename(proc_1)
proc_1 = to_native(proc_1)
proc_1 = proc_1.strip()
if proc_1 is not None and (proc_1 == 'init' or proc_1.endswith('sh')):
# many systems return init, so this cannot be trusted, if it ends in 'sh' it probalby is a shell in a container
proc_1 = None
# if not init/None it should be an identifiable or custom init, so we are done!
if proc_1 is not None:
# Lookup proc_1 value in map and use proc_1 value itself if no match
self.facts['service_mgr'] = proc_1_map.get(proc_1, proc_1)
# start with the easy ones
elif self.facts['distribution'] == 'MacOSX':
#FIXME: find way to query executable, version matching is not ideal
if LooseVersion(platform.mac_ver()[0]) >= LooseVersion('10.4'):
self.facts['service_mgr'] = 'launchd'
else:
self.facts['service_mgr'] = 'systemstarter'
elif 'BSD' in self.facts['system'] or self.facts['system'] in ['Bitrig', 'DragonFly']:
#FIXME: we might want to break out to individual BSDs or 'rc'
self.facts['service_mgr'] = 'bsdinit'
elif self.facts['system'] == 'AIX':
self.facts['service_mgr'] = 'src'
elif self.facts['system'] == 'SunOS':
self.facts['service_mgr'] = 'smf'
elif self.facts['distribution'] == 'OpenWrt':
self.facts['service_mgr'] = 'openwrt_init'
elif self.facts['system'] == 'Linux':
if self.is_systemd_managed():
self.facts['service_mgr'] = 'systemd'
elif self.module.get_bin_path('initctl') and os.path.exists("/etc/init/"):
self.facts['service_mgr'] = 'upstart'
elif os.path.exists('/sbin/openrc'):
self.facts['service_mgr'] = 'openrc'
elif os.path.exists('/etc/init.d/'):
self.facts['service_mgr'] = 'sysvinit'
if not self.facts.get('service_mgr', False):
# if we cannot detect, fallback to generic 'service'
self.facts['service_mgr'] = 'service'
def get_lsb_facts(self):
lsb_path = self.module.get_bin_path('lsb_release')
if lsb_path:
rc, out, err = self.module.run_command([lsb_path, "-a"], errors='surrogate_then_replace')
if rc == 0:
self.facts['lsb'] = {}
for line in out.splitlines():
if len(line) < 1 or ':' not in line:
continue
value = line.split(':', 1)[1].strip()
if 'LSB Version:' in line:
self.facts['lsb']['release'] = value
elif 'Distributor ID:' in line:
self.facts['lsb']['id'] = value
elif 'Description:' in line:
self.facts['lsb']['description'] = value
elif 'Release:' in line:
self.facts['lsb']['release'] = value
elif 'Codename:' in line:
self.facts['lsb']['codename'] = value
elif lsb_path is None and os.path.exists('/etc/lsb-release'):
self.facts['lsb'] = {}
for line in get_file_lines('/etc/lsb-release'):
value = line.split('=',1)[1].strip()
if 'DISTRIB_ID' in line:
self.facts['lsb']['id'] = value
elif 'DISTRIB_RELEASE' in line:
self.facts['lsb']['release'] = value
elif 'DISTRIB_DESCRIPTION' in line:
self.facts['lsb']['description'] = value
elif 'DISTRIB_CODENAME' in line:
self.facts['lsb']['codename'] = value
if 'lsb' in self.facts and 'release' in self.facts['lsb']:
self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0]
def get_selinux_facts(self):
if not HAVE_SELINUX:
self.facts['selinux'] = False
return
self.facts['selinux'] = {}
if not selinux.is_selinux_enabled():
self.facts['selinux']['status'] = 'disabled'
else:
self.facts['selinux']['status'] = 'enabled'
try:
self.facts['selinux']['policyvers'] = selinux.security_policyvers()
except (AttributeError,OSError):
self.facts['selinux']['policyvers'] = 'unknown'
try:
(rc, configmode) = selinux.selinux_getenforcemode()
if rc == 0:
self.facts['selinux']['config_mode'] = Facts.SELINUX_MODE_DICT.get(configmode, 'unknown')
else:
self.facts['selinux']['config_mode'] = 'unknown'
except (AttributeError,OSError):
self.facts['selinux']['config_mode'] = 'unknown'
try:
mode = selinux.security_getenforce()
self.facts['selinux']['mode'] = Facts.SELINUX_MODE_DICT.get(mode, 'unknown')
except (AttributeError,OSError):
self.facts['selinux']['mode'] = 'unknown'
try:
(rc, policytype) = selinux.selinux_getpolicytype()
if rc == 0:
self.facts['selinux']['type'] = policytype
else:
self.facts['selinux']['type'] = 'unknown'
except (AttributeError,OSError):
self.facts['selinux']['type'] = 'unknown'
def get_apparmor_facts(self):
self.facts['apparmor'] = {}
if os.path.exists('/sys/kernel/security/apparmor'):
self.facts['apparmor']['status'] = 'enabled'
else:
self.facts['apparmor']['status'] = 'disabled'
def get_caps_facts(self):
capsh_path = self.module.get_bin_path('capsh')
if capsh_path:
rc, out, err = self.module.run_command([capsh_path, "--print"], errors='surrogate_then_replace')
enforced_caps = []
enforced = 'NA'
for line in out.splitlines():
if len(line) < 1:
continue
if line.startswith('Current:'):
if line.split(':')[1].strip() == '=ep':
enforced = 'False'
else:
enforced = 'True'
enforced_caps = [i.strip() for i in line.split('=')[1].split(',')]
self.facts['system_capabilities_enforced'] = enforced
self.facts['system_capabilities'] = enforced_caps
def get_fips_facts(self):
self.facts['fips'] = False
data = get_file_content('/proc/sys/crypto/fips_enabled')
if data and data == '1':
self.facts['fips'] = True
def get_date_time_facts(self):
self.facts['date_time'] = {}
now = datetime.datetime.now()
self.facts['date_time']['year'] = now.strftime('%Y')
self.facts['date_time']['month'] = now.strftime('%m')
self.facts['date_time']['weekday'] = now.strftime('%A')
self.facts['date_time']['weekday_number'] = now.strftime('%w')
self.facts['date_time']['weeknumber'] = now.strftime('%W')
self.facts['date_time']['day'] = now.strftime('%d')
self.facts['date_time']['hour'] = now.strftime('%H')
self.facts['date_time']['minute'] = now.strftime('%M')
self.facts['date_time']['second'] = now.strftime('%S')
self.facts['date_time']['epoch'] = now.strftime('%s')
if self.facts['date_time']['epoch'] == '' or self.facts['date_time']['epoch'][0] == '%':
self.facts['date_time']['epoch'] = str(int(time.time()))
self.facts['date_time']['date'] = now.strftime('%Y-%m-%d')
self.facts['date_time']['time'] = now.strftime('%H:%M:%S')
self.facts['date_time']['iso8601_micro'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
self.facts['date_time']['iso8601'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
self.facts['date_time']['iso8601_basic'] = now.strftime("%Y%m%dT%H%M%S%f")
self.facts['date_time']['iso8601_basic_short'] = now.strftime("%Y%m%dT%H%M%S")
self.facts['date_time']['tz'] = time.strftime("%Z")
self.facts['date_time']['tz_offset'] = time.strftime("%z")
def is_systemd_managed(self):
# tools must be installed
if self.module.get_bin_path('systemctl'):
# this should show if systemd is the boot init system, if checking init faild to mark as systemd
# these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html
for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]:
if os.path.exists(canary):
return True
return False
# User
def get_user_facts(self):
self.facts['user_id'] = getpass.getuser()
pwent = pwd.getpwnam(getpass.getuser())
self.facts['user_uid'] = pwent.pw_uid
self.facts['user_gid'] = pwent.pw_gid
self.facts['user_gecos'] = pwent.pw_gecos
self.facts['user_dir'] = pwent.pw_dir
self.facts['user_shell'] = pwent.pw_shell
self.facts['real_user_id'] = os.getuid()
self.facts['effective_user_id'] = os.geteuid()
self.facts['real_group_id'] = os.getgid()
self.facts['effective_group_id'] = os.getgid()
def get_env_facts(self):
self.facts['env'] = {}
for k,v in iteritems(os.environ):
self.facts['env'][k] = v
def get_dns_facts(self):
self.facts['dns'] = {}
for line in get_file_content('/etc/resolv.conf', '').splitlines():
if line.startswith('#') or line.startswith(';') or line.strip() == '':
continue
tokens = line.split()
if len(tokens) == 0:
continue
if tokens[0] == 'nameserver':
if not 'nameservers' in self.facts['dns']:
self.facts['dns']['nameservers'] = []
for nameserver in tokens[1:]:
self.facts['dns']['nameservers'].append(nameserver)
elif tokens[0] == 'domain':
if len(tokens) > 1:
self.facts['dns']['domain'] = tokens[1]
elif tokens[0] == 'search':
self.facts['dns']['search'] = []
for suffix in tokens[1:]:
self.facts['dns']['search'].append(suffix)
elif tokens[0] == 'sortlist':
self.facts['dns']['sortlist'] = []
for address in tokens[1:]:
self.facts['dns']['sortlist'].append(address)
elif tokens[0] == 'options':
self.facts['dns']['options'] = {}
if len(tokens) > 1:
for option in tokens[1:]:
option_tokens = option.split(':', 1)
if len(option_tokens) == 0:
continue
val = len(option_tokens) == 2 and option_tokens[1] or True
self.facts['dns']['options'][option_tokens[0]] = val
def _get_mount_size_facts(self, mountpoint):
size_total = None
size_available = None
try:
statvfs_result = os.statvfs(mountpoint)
size_total = statvfs_result.f_frsize * statvfs_result.f_blocks
size_available = statvfs_result.f_frsize * (statvfs_result.f_bavail)
except OSError:
pass
return size_total, size_available
def get_python_facts(self):
self.facts['python'] = {
'version': {
'major': sys.version_info[0],
'minor': sys.version_info[1],
'micro': sys.version_info[2],
'releaselevel': sys.version_info[3],
'serial': sys.version_info[4]
},
'version_info': list(sys.version_info),
'executable': sys.executable,
'has_sslcontext': HAS_SSLCONTEXT
}
try:
self.facts['python']['type'] = sys.subversion[0]
except AttributeError:
try:
self.facts['python']['type'] = sys.implementation.name
except AttributeError:
self.facts['python']['type'] = None
class Distribution(object):
"""
This subclass of Facts fills the distribution, distribution_version and distribution_release variables
To do so it checks the existence and content of typical files in /etc containing distribution information
This is unit tested. Please extend the tests to cover all distributions if you have them available.
"""
# every distribution name mentioned here, must have one of
# - allowempty == True
# - be listed in SEARCH_STRING
# - have a function get_distribution_DISTNAME implemented
OSDIST_LIST = (
{'path': '/etc/oracle-release', 'name': 'OracleLinux'},
{'path': '/etc/slackware-version', 'name': 'Slackware'},
{'path': '/etc/redhat-release', 'name': 'RedHat'},
{'path': '/etc/vmware-release', 'name': 'VMwareESX', 'allowempty': True},
{'path': '/etc/openwrt_release', 'name': 'OpenWrt'},
{'path': '/etc/system-release', 'name': 'Amazon'},
{'path': '/etc/alpine-release', 'name': 'Alpine'},
{'path': '/etc/arch-release', 'name': 'Archlinux', 'allowempty': True},
{'path': '/etc/os-release', 'name': 'SuSE'},
{'path': '/etc/SuSE-release', 'name': 'SuSE'},
{'path': '/etc/gentoo-release', 'name': 'Gentoo'},
{'path': '/etc/os-release', 'name': 'Debian'},
{'path': '/etc/lsb-release', 'name': 'Mandriva'},
{'path': '/etc/altlinux-release', 'name': 'Altlinux'},
{'path': '/etc/sourcemage-release', 'name': 'SMGL'},
{'path': '/etc/os-release', 'name': 'NA'},
{'path': '/etc/coreos/update.conf', 'name': 'Coreos'},
{'path': '/usr/lib/os-release', 'name': 'ClearLinux'},
)
SEARCH_STRING = {
'OracleLinux': 'Oracle Linux',
'RedHat': 'Red Hat',
'Altlinux': 'ALT Linux',
'ClearLinux': 'Clear Linux',
'SMGL': 'Source Mage GNU/Linux',
}
# A list with OS Family members
OS_FAMILY = dict(
RedHat = 'RedHat', Fedora = 'RedHat', CentOS = 'RedHat', Scientific = 'RedHat',
SLC = 'RedHat', Ascendos = 'RedHat', CloudLinux = 'RedHat', PSBM = 'RedHat',
OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat', Virtuozzo = 'RedHat',
XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', Raspbian = 'Debian', Slackware = 'Slackware', SLES = 'Suse',
SLED = 'Suse', openSUSE = 'Suse', openSUSE_Tumbleweed = 'Suse', SuSE = 'Suse', SLES_SAP = 'Suse', SUSE_LINUX = 'Suse', Gentoo = 'Gentoo',
Funtoo = 'Gentoo', Archlinux = 'Archlinux', Manjaro = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake', Altlinux = 'Altlinux', SMGL = 'SMGL',
Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris',
SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin',
FreeBSD = 'FreeBSD', HPUX = 'HP-UX', openSUSE_Leap = 'Suse', Neon = 'Debian'
)
def __init__(self, module):
self.system = platform.system()
self.facts = {}
self.module = module
def populate(self):
self.get_distribution_facts()
return self.facts
def get_distribution_facts(self):
# The platform module provides information about the running
# system/distribution. Use this as a baseline and fix buggy systems
# afterwards
self.facts['distribution'] = self.system
self.facts['distribution_release'] = platform.release()
self.facts['distribution_version'] = platform.version()
systems_implemented = ('AIX', 'HP-UX', 'Darwin', 'FreeBSD', 'OpenBSD', 'SunOS', 'DragonFly', 'NetBSD')
self.facts['distribution'] = self.system
if self.system in systems_implemented:
cleanedname = self.system.replace('-','')
distfunc = getattr(self, 'get_distribution_'+cleanedname)
distfunc()
elif self.system == 'Linux':
# try to find out which linux distribution this is
dist = platform.dist()
self.facts['distribution'] = dist[0].capitalize() or 'NA'
self.facts['distribution_version'] = dist[1] or 'NA'
self.facts['distribution_major_version'] = dist[1].split('.')[0] or 'NA'
self.facts['distribution_release'] = dist[2] or 'NA'
# Try to handle the exceptions now ...
# self.facts['distribution_debug'] = []
for ddict in self.OSDIST_LIST:
name = ddict['name']
path = ddict['path']
if not os.path.exists(path):
continue
# if allowempty is set, we only check for file existance but not content
if 'allowempty' in ddict and ddict['allowempty']:
self.facts['distribution'] = name
break
if os.path.getsize(path) == 0:
continue
data = get_file_content(path)
if name in self.SEARCH_STRING:
# look for the distribution string in the data and replace according to RELEASE_NAME_MAP
# only the distribution name is set, the version is assumed to be correct from platform.dist()
if self.SEARCH_STRING[name] in data:
# this sets distribution=RedHat if 'Red Hat' shows up in data
self.facts['distribution'] = name
else:
# this sets distribution to what's in the data, e.g. CentOS, Scientific, ...
self.facts['distribution'] = data.split()[0]
break
else:
# call a dedicated function for parsing the file content
try:
distfunc = getattr(self, 'get_distribution_' + name)
parsed = distfunc(name, data, path)
if parsed is None or parsed:
# distfunc return False if parsing failed
# break only if parsing was succesful
# otherwise continue with other distributions
break
except AttributeError:
# this should never happen, but if it does fail quitely and not with a traceback
pass
# to debug multiple matching release files, one can use:
# self.facts['distribution_debug'].append({path + ' ' + name:
# (parsed,
# self.facts['distribution'],
# self.facts['distribution_version'],
# self.facts['distribution_release'],
# )})
self.facts['os_family'] = self.facts['distribution']
distro = self.facts['distribution'].replace(' ', '_')
if distro in self.OS_FAMILY:
self.facts['os_family'] = self.OS_FAMILY[distro]
def get_distribution_AIX(self):
rc, out, err = self.module.run_command("/usr/bin/oslevel")
data = out.split('.')
self.facts['distribution_version'] = data[0]
self.facts['distribution_release'] = data[1]
def get_distribution_HPUX(self):
rc, out, err = self.module.run_command("/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True)
data = re.search('HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out)
if data:
self.facts['distribution_version'] = data.groups()[0]
self.facts['distribution_release'] = data.groups()[1]
def get_distribution_Darwin(self):
self.facts['distribution'] = 'MacOSX'
rc, out, err = self.module.run_command("/usr/bin/sw_vers -productVersion")
data = out.split()[-1]
self.facts['distribution_version'] = data
def get_distribution_FreeBSD(self):
self.facts['distribution_release'] = platform.release()
data = re.search('(\d+)\.(\d+)-RELEASE.*', self.facts['distribution_release'])
if data:
self.facts['distribution_major_version'] = data.group(1)
self.facts['distribution_version'] = '%s.%s' % (data.group(1), data.group(2))
def get_distribution_OpenBSD(self):
self.facts['distribution_version'] = platform.release()
rc, out, err = self.module.run_command("/sbin/sysctl -n kern.version")
match = re.match('OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out)
if match:
self.facts['distribution_release'] = match.groups()[0]
else:
self.facts['distribution_release'] = 'release'
def get_distribution_DragonFly(self):
pass
def get_distribution_NetBSD(self):
self.facts['distribution_major_version'] = self.facts['distribution_release'].split('.')[0]
def get_distribution_Slackware(self, name, data, path):
if 'Slackware' not in data:
return False # TODO: remove
self.facts['distribution'] = name
version = re.findall('\w+[.]\w+', data)
if version:
self.facts['distribution_version'] = version[0]
def get_distribution_Amazon(self, name, data, path):
if 'Amazon' not in data:
return False # TODO: remove
self.facts['distribution'] = 'Amazon'
self.facts['distribution_version'] = data.split()[-1]
def get_distribution_OpenWrt(self, name, data, path):
if 'OpenWrt' not in data:
return False # TODO: remove
self.facts['distribution'] = name
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
self.facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
self.facts['distribution_release'] = release.groups()[0]
def get_distribution_Alpine(self, name, data, path):
self.facts['distribution'] = 'Alpine'
self.facts['distribution_version'] = data
def get_distribution_SMGL(self):
self.facts['distribution'] = 'Source Mage GNU/Linux'
def get_distribution_SunOS(self):
data = get_file_content('/etc/release').splitlines()[0]
if 'Solaris' in data:
ora_prefix = ''
if 'Oracle Solaris' in data:
data = data.replace('Oracle ','')
ora_prefix = 'Oracle '
self.facts['distribution'] = data.split()[0]
self.facts['distribution_version'] = data.split()[1]
self.facts['distribution_release'] = ora_prefix + data
return
uname_v = get_uname_version(self.module)
distribution_version = None
if 'SmartOS' in data:
self.facts['distribution'] = 'SmartOS'
if os.path.exists('/etc/product'):
product_data = dict([l.split(': ', 1) for l in get_file_content('/etc/product').splitlines() if ': ' in l])
if 'Image' in product_data:
distribution_version = product_data.get('Image').split()[-1]
elif 'OpenIndiana' in data:
self.facts['distribution'] = 'OpenIndiana'
elif 'OmniOS' in data:
self.facts['distribution'] = 'OmniOS'
distribution_version = data.split()[-1]
elif uname_v is not None and 'NexentaOS_' in uname_v:
self.facts['distribution'] = 'Nexenta'
distribution_version = data.split()[-1].lstrip('v')
if self.facts['distribution'] in ('SmartOS', 'OpenIndiana', 'OmniOS', 'Nexenta'):
self.facts['distribution_release'] = data.strip()
if distribution_version is not None:
self.facts['distribution_version'] = distribution_version
elif uname_v is not None:
self.facts['distribution_version'] = uname_v.splitlines()[0].strip()
return
return False # TODO: remove if tested without this
def get_distribution_SuSE(self, name, data, path):
if 'suse' not in data.lower():
return False # TODO: remove if tested without this
if path == '/etc/os-release':
for line in data.splitlines():
distribution = re.search("^NAME=(.*)", line)
if distribution:
self.facts['distribution'] = distribution.group(1).strip('"')
# example pattern are 13.04 13.0 13
distribution_version = re.search('^VERSION_ID="?([0-9]+\.?[0-9]*)"?', line)
if distribution_version:
self.facts['distribution_version'] = distribution_version.group(1)
if 'open' in data.lower():
release = re.search('^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line)
if release:
self.facts['distribution_release'] = release.groups()[0]
elif 'enterprise' in data.lower() and 'VERSION_ID' in line:
# SLES doesn't got funny release names
release = re.search('^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line)
if release.group(1):
release = release.group(1)
else:
release = "0" # no minor number, so it is the first release
self.facts['distribution_release'] = release
elif path == '/etc/SuSE-release':
if 'open' in data.lower():
data = data.splitlines()
distdata = get_file_content(path).splitlines()[0]
self.facts['distribution'] = distdata.split()[0]
for line in data:
release = re.search('CODENAME *= *([^\n]+)', line)
if release:
self.facts['distribution_release'] = release.groups()[0].strip()
elif 'enterprise' in data.lower():
lines = data.splitlines()
distribution = lines[0].split()[0]
if "Server" in data:
self.facts['distribution'] = "SLES"
elif "Desktop" in data:
self.facts['distribution'] = "SLED"
for line in lines:
release = re.search('PATCHLEVEL = ([0-9]+)', line) # SLES doesn't got funny release names
if release:
self.facts['distribution_release'] = release.group(1)
self.facts['distribution_version'] = self.facts['distribution_version'] + '.' + release.group(1)
def get_distribution_Debian(self, name, data, path):
if 'Debian' in data or 'Raspbian' in data:
self.facts['distribution'] = 'Debian'
release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data)
if release:
self.facts['distribution_release'] = release.groups()[0]
# Last resort: try to find release from tzdata as either lsb is missing or this is very old debian
if self.facts['distribution_release'] == 'NA' and 'Debian' in data:
dpkg_cmd = self.module.get_bin_path('dpkg')
if dpkg_cmd:
cmd = "%s --status tzdata|grep Provides|cut -f2 -d'-'" % dpkg_cmd
rc, out, err = self.module.run_command(cmd)
if rc == 0:
self.facts['distribution_release'] = out.strip()
elif 'Ubuntu' in data:
self.facts['distribution'] = 'Ubuntu'
# nothing else to do, Ubuntu gets correct info from python functions
else:
return False
def get_distribution_Mandriva(self, name, data, path):
if 'Mandriva' in data:
self.facts['distribution'] = 'Mandriva'
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
self.facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
self.facts['distribution_release'] = release.groups()[0]
self.facts['distribution'] = name
else:
return False
def get_distribution_NA(self, name, data, path):
for line in data.splitlines():
distribution = re.search("^NAME=(.*)", line)
if distribution and self.facts['distribution'] == 'NA':
self.facts['distribution'] = distribution.group(1).strip('"')
version = re.search("^VERSION=(.*)", line)
if version and self.facts['distribution_version'] == 'NA':
self.facts['distribution_version'] = version.group(1).strip('"')
def get_distribution_Coreos(self, name, data, path):
if self.facts['distribution'].lower() == 'coreos':
if not data:
# include fix from #15230, #15228
return
release = re.search("^GROUP=(.*)", data)
if release:
self.facts['distribution_release'] = release.group(1).strip('"')
else:
return False # TODO: remove if tested without this
class Hardware(Facts):
"""
This is a generic Hardware subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this, it
should define:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
All subclasses MUST define platform.
"""
platform = 'Generic'
def __new__(cls, *arguments, **keyword):
# When Hardware is created, it chooses a subclass to create instead.
# This check prevents the subclass from then trying to find a subclass
# and create that.
if cls is not Hardware:
return super(Hardware, cls).__new__(cls)
subclass = cls
for sc in get_all_subclasses(Hardware):
if sc.platform == platform.system():
subclass = sc
if PY3:
return super(cls, subclass).__new__(subclass)
else:
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def populate(self):
return self.facts
def get_sysctl(self, prefixes):
sysctl_cmd = self.module.get_bin_path('sysctl')
cmd = [sysctl_cmd]
cmd.extend(prefixes)
rc, out, err = self.module.run_command(cmd)
if rc != 0:
return dict()
sysctl = dict()
for line in out.splitlines():
if not line:
continue
(key, value) = re.split('\s?=\s?|: ', line, maxsplit=1)
sysctl[key] = value.strip()
return sysctl
class LinuxHardware(Hardware):
"""
Linux-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
In addition, it also defines number of DMI facts and device facts.
"""
platform = 'Linux'
# Originally only had these four as toplevelfacts
ORIGINAL_MEMORY_FACTS = frozenset(('MemTotal', 'SwapTotal', 'MemFree', 'SwapFree'))
# Now we have all of these in a dict structure
MEMORY_FACTS = ORIGINAL_MEMORY_FACTS.union(('Buffers', 'Cached', 'SwapCached'))
# regex used against findmnt output to detect bind mounts
BIND_MOUNT_RE = re.compile(r'.*\]')
# regex used against mtab content to find entries that are bind mounts
MTAB_BIND_MOUNT_RE = re.compile(r'.*bind.*"')
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_device_facts()
self.get_uptime_facts()
self.get_lvm_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_memory_facts(self):
if not os.access("/proc/meminfo", os.R_OK):
return
memstats = {}
for line in get_file_lines("/proc/meminfo"):
data = line.split(":", 1)
key = data[0]
if key in self.ORIGINAL_MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
self.facts["%s_mb" % key.lower()] = int(val) // 1024
if key in self.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
memstats[key.lower()] = int(val) // 1024
if None not in (memstats.get('memtotal'), memstats.get('memfree')):
memstats['real:used'] = memstats['memtotal'] - memstats['memfree']
if None not in (memstats.get('cached'), memstats.get('memfree'), memstats.get('buffers')):
memstats['nocache:free'] = memstats['cached'] + memstats['memfree'] + memstats['buffers']
if None not in (memstats.get('memtotal'), memstats.get('nocache:free')):
memstats['nocache:used'] = memstats['memtotal'] - memstats['nocache:free']
if None not in (memstats.get('swaptotal'), memstats.get('swapfree')):
memstats['swap:used'] = memstats['swaptotal'] - memstats['swapfree']
self.facts['memory_mb'] = {
'real' : {
'total': memstats.get('memtotal'),
'used': memstats.get('real:used'),
'free': memstats.get('memfree'),
},
'nocache' : {
'free': memstats.get('nocache:free'),
'used': memstats.get('nocache:used'),
},
'swap' : {
'total': memstats.get('swaptotal'),
'free': memstats.get('swapfree'),
'used': memstats.get('swap:used'),
'cached': memstats.get('swapcached'),
},
}
def get_cpu_facts(self):
i = 0
vendor_id_occurrence = 0
model_name_occurrence = 0
physid = 0
coreid = 0
sockets = {}
cores = {}
xen = False
xen_paravirt = False
try:
if os.path.exists('/proc/xen'):
xen = True
else:
for line in get_file_lines('/sys/hypervisor/type'):
if line.strip() == 'xen':
xen = True
# Only interested in the first line
break
except IOError:
pass
if not os.access("/proc/cpuinfo", os.R_OK):
return
self.facts['processor'] = []
for line in get_file_lines('/proc/cpuinfo'):
data = line.split(":", 1)
key = data[0].strip()
if xen:
if key == 'flags':
# Check for vme cpu flag, Xen paravirt does not expose this.
# Need to detect Xen paravirt because it exposes cpuinfo
# differently than Xen HVM or KVM and causes reporting of
# only a single cpu core.
if 'vme' not in data:
xen_paravirt = True
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
if key in ['model name', 'Processor', 'vendor_id', 'cpu', 'Vendor']:
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(data[1].strip())
if key == 'vendor_id':
vendor_id_occurrence += 1
if key == 'model name':
model_name_occurrence += 1
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'core id':
coreid = data[1].strip()
if coreid not in sockets:
cores[coreid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
elif key == 'siblings':
cores[coreid] = int(data[1].strip())
elif key == '# processors':
self.facts['processor_cores'] = int(data[1].strip())
# Skip for platforms without vendor_id/model_name in cpuinfo (e.g ppc64le)
if vendor_id_occurrence > 0:
if vendor_id_occurrence == model_name_occurrence:
i = vendor_id_occurrence
if self.facts['architecture'] != 's390x':
if xen_paravirt:
self.facts['processor_count'] = i
self.facts['processor_cores'] = i
self.facts['processor_threads_per_core'] = 1
self.facts['processor_vcpus'] = i
else:
if sockets:
self.facts['processor_count'] = len(sockets)
else:
self.facts['processor_count'] = i
socket_values = list(sockets.values())
if socket_values:
self.facts['processor_cores'] = socket_values[0]
else:
self.facts['processor_cores'] = 1
core_values = list(cores.values())
if core_values:
self.facts['processor_threads_per_core'] = core_values[0] // self.facts['processor_cores']
else:
self.facts['processor_threads_per_core'] = 1 // self.facts['processor_cores']
self.facts['processor_vcpus'] = (self.facts['processor_threads_per_core'] *
self.facts['processor_count'] * self.facts['processor_cores'])
def get_dmi_facts(self):
''' learn dmi facts from system
Try /sys first for dmi related facts.
If that is not available, fall back to dmidecode executable '''
if os.path.exists('/sys/devices/virtual/dmi/id/product_name'):
# Use kernel DMI info, if available
# DMI SPEC -- http://www.dmtf.org/sites/default/files/standards/documents/DSP0134_2.7.0.pdf
FORM_FACTOR = [ "Unknown", "Other", "Unknown", "Desktop",
"Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower",
"Portable", "Laptop", "Notebook", "Hand Held", "Docking Station",
"All In One", "Sub Notebook", "Space-saving", "Lunch Box",
"Main Server Chassis", "Expansion Chassis", "Sub Chassis",
"Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis",
"Rack Mount Chassis", "Sealed-case PC", "Multi-system",
"CompactPCI", "AdvancedTCA", "Blade" ]
DMI_DICT = {
'bios_date': '/sys/devices/virtual/dmi/id/bios_date',
'bios_version': '/sys/devices/virtual/dmi/id/bios_version',
'form_factor': '/sys/devices/virtual/dmi/id/chassis_type',
'product_name': '/sys/devices/virtual/dmi/id/product_name',
'product_serial': '/sys/devices/virtual/dmi/id/product_serial',
'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid',
'product_version': '/sys/devices/virtual/dmi/id/product_version',
'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor'
}
for (key,path) in DMI_DICT.items():
data = get_file_content(path)
if data is not None:
if key == 'form_factor':
try:
self.facts['form_factor'] = FORM_FACTOR[int(data)]
except IndexError:
self.facts['form_factor'] = 'unknown (%s)' % data
else:
self.facts[key] = data
else:
self.facts[key] = 'NA'
else:
# Fall back to using dmidecode, if available
dmi_bin = self.module.get_bin_path('dmidecode')
DMI_DICT = {
'bios_date': 'bios-release-date',
'bios_version': 'bios-version',
'form_factor': 'chassis-type',
'product_name': 'system-product-name',
'product_serial': 'system-serial-number',
'product_uuid': 'system-uuid',
'product_version': 'system-version',
'system_vendor': 'system-manufacturer'
}
for (k, v) in DMI_DICT.items():
if dmi_bin is not None:
(rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
thisvalue = ''.join([ line for line in out.splitlines() if not line.startswith('#') ])
try:
json.dumps(thisvalue)
except UnicodeDecodeError:
thisvalue = "NA"
self.facts[k] = thisvalue
else:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
def _run_lsblk(self, lsblk_path):
# call lsblk and collect all uuids
# --exclude 2 makes lsblk ignore floppy disks, which are slower to answer than typical timeouts
# this uses the linux major device number
# for details see https://www.kernel.org/doc/Documentation/devices.txt
args = ['--list', '--noheadings', '--paths', '--output', 'NAME,UUID', '--exclude', '2']
cmd = [lsblk_path] + args
rc, out, err = self.module.run_command(cmd)
return rc, out, err
def _lsblk_uuid(self):
uuids = {}
lsblk_path = self.module.get_bin_path("lsblk")
if not lsblk_path:
return uuids
rc, out, err = self._run_lsblk(lsblk_path)
if rc != 0:
return uuids
# each line will be in format:
# <devicename><some whitespace><uuid>
# /dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0
for lsblk_line in out.splitlines():
if not lsblk_line:
continue
line = lsblk_line.strip()
fields = line.rsplit(None, 1)
if len(fields) < 2:
continue
device_name, uuid = fields[0].strip(), fields[1].strip()
if device_name in uuids:
continue
uuids[device_name] = uuid
return uuids
def _run_findmnt(self, findmnt_path):
args = ['--list', '--noheadings', '--notruncate']
cmd = [findmnt_path] + args
rc, out, err = self.module.run_command(cmd, errors='surrogate_then_replace')
return rc, out, err
def _find_bind_mounts(self):
bind_mounts = set()
findmnt_path = self.module.get_bin_path("findmnt")
if not findmnt_path:
return bind_mounts
rc, out, err = self._run_findmnt(findmnt_path)
if rc != 0:
return bind_mounts
# find bind mounts, in case /etc/mtab is a symlink to /proc/mounts
for line in out.splitlines():
fields = line.split()
# fields[0] is the TARGET, fields[1] is the SOURCE
if len(fields) < 2:
continue
# bind mounts will have a [/directory_name] in the SOURCE column
if self.BIND_MOUNT_RE.match(fields[1]):
bind_mounts.add(fields[0])
return bind_mounts
def _mtab_entries(self):
mtab_file = '/etc/mtab'
if not os.path.exists(mtab_file):
mtab_file = '/proc/mounts'
mtab = get_file_content(mtab_file, '')
mtab_entries = []
for line in mtab.splitlines():
fields = line.split()
if len(fields) < 4:
continue
mtab_entries.append(fields)
return mtab_entries
@timeout()
def get_mount_facts(self):
self.facts['mounts'] = []
bind_mounts = self._find_bind_mounts()
uuids = self._lsblk_uuid()
mtab_entries = self._mtab_entries()
mounts = []
for fields in mtab_entries:
device, mount, fstype, options = fields[0], fields[1], fields[2], fields[3]
if not device.startswith('/') and ':/' not in device:
continue
if fstype == 'none':
continue
size_total, size_available = self._get_mount_size_facts(mount)
if mount in bind_mounts:
# only add if not already there, we might have a plain /etc/mtab
if not self.MTAB_BIND_MOUNT_RE.match(options):
options += ",bind"
mount_info = {'mount': mount,
'device': device,
'fstype': fstype,
'options': options,
# statvfs data
'size_total': size_total,
'size_available': size_available,
'uuid': uuids.get(device, 'N/A')}
mounts.append(mount_info)
self.facts['mounts'] = mounts
def get_holders(self, block_dev_dict, sysdir):
block_dev_dict['holders'] = []
if os.path.isdir(sysdir + "/holders"):
for folder in os.listdir(sysdir + "/holders"):
if not folder.startswith("dm-"):
continue
name = get_file_content(sysdir + "/holders/" + folder + "/dm/name")
if name:
block_dev_dict['holders'].append(name)
else:
block_dev_dict['holders'].append(folder)
def get_device_facts(self):
self.facts['devices'] = {}
lspci = self.module.get_bin_path('lspci')
if lspci:
rc, pcidata, err = self.module.run_command([lspci, '-D'], errors='surrogate_then_replace')
else:
pcidata = None
try:
block_devs = os.listdir("/sys/block")
except OSError:
return
devs_wwn = {}
try:
devs_by_id = os.listdir("/dev/disk/by-id")
except OSError:
pass
else:
for link_name in devs_by_id:
if link_name.startswith("wwn-"):
try:
wwn_link = os.readlink(os.path.join("/dev/disk/by-id", link_name))
except OSError:
continue
devs_wwn[os.path.basename(wwn_link)] = link_name[4:]
for block in block_devs:
virtual = 1
sysfs_no_links = 0
try:
path = os.readlink(os.path.join("/sys/block/", block))
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.EINVAL:
path = block
sysfs_no_links = 1
else:
continue
if "virtual" in path:
continue
sysdir = os.path.join("/sys/block", path)
if sysfs_no_links == 1:
for folder in os.listdir(sysdir):
if "device" in folder:
virtual = 0
break
if virtual:
continue
d = {}
diskname = os.path.basename(sysdir)
for key in ['vendor', 'model', 'sas_address', 'sas_device_handle']:
d[key] = get_file_content(sysdir + "/device/" + key)
sg_inq = self.module.get_bin_path('sg_inq')
if sg_inq:
device = "/dev/%s" % (block)
rc, drivedata, err = self.module.run_command([sg_inq, device])
if rc == 0:
serial = re.search("Unit serial number:\s+(\w+)", drivedata)
if serial:
d['serial'] = serial.group(1)
for key in ['vendor', 'model']:
d[key] = get_file_content(sysdir + "/device/" + key)
for key,test in [ ('removable','/removable'), \
('support_discard','/queue/discard_granularity'),
]:
d[key] = get_file_content(sysdir + test)
if diskname in devs_wwn:
d['wwn'] = devs_wwn[diskname]
d['partitions'] = {}
for folder in os.listdir(sysdir):
m = re.search("(" + diskname + "\d+)", folder)
if m:
part = {}
partname = m.group(1)
part_sysdir = sysdir + "/" + partname
part['start'] = get_file_content(part_sysdir + "/start",0)
part['sectors'] = get_file_content(part_sysdir + "/size",0)
part['sectorsize'] = get_file_content(part_sysdir + "/queue/logical_block_size")
if not part['sectorsize']:
part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size",512)
part['size'] = self.module.pretty_bytes((float(part['sectors']) * float(part['sectorsize'])))
part['uuid'] = get_partition_uuid(partname)
self.get_holders(part, part_sysdir)
d['partitions'][partname] = part
d['rotational'] = get_file_content(sysdir + "/queue/rotational")
d['scheduler_mode'] = ""
scheduler = get_file_content(sysdir + "/queue/scheduler")
if scheduler is not None:
m = re.match(".*?(\[(.*)\])", scheduler)
if m:
d['scheduler_mode'] = m.group(2)
d['sectors'] = get_file_content(sysdir + "/size")
if not d['sectors']:
d['sectors'] = 0
d['sectorsize'] = get_file_content(sysdir + "/queue/logical_block_size")
if not d['sectorsize']:
d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size",512)
d['size'] = self.module.pretty_bytes(float(d['sectors']) * float(d['sectorsize']))
d['host'] = ""
# domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7).
m = re.match(".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir)
if m and pcidata:
pciid = m.group(1)
did = re.escape(pciid)
m = re.search("^" + did + "\s(.*)$", pcidata, re.MULTILINE)
if m:
d['host'] = m.group(1)
self.get_holders(d, sysdir)
self.facts['devices'][diskname] = d
def get_uptime_facts(self):
uptime_file_content = get_file_content('/proc/uptime')
if uptime_file_content:
uptime_seconds_string = uptime_file_content.split(' ')[0]
self.facts['uptime_seconds'] = int(float(uptime_seconds_string))
def _find_mapper_device_name(self, dm_device):
dm_prefix = '/dev/dm-'
mapper_device = dm_device
if dm_device.startswith(dm_prefix):
dmsetup_cmd = self.module.get_bin_path('dmsetup', True)
mapper_prefix = '/dev/mapper/'
rc, dm_name, err = self.module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device))
if rc == 0:
mapper_device = mapper_prefix + dm_name.rstrip()
return mapper_device
def get_lvm_facts(self):
""" Get LVM Facts if running as root and lvm utils are available """
if os.getuid() == 0 and self.module.get_bin_path('vgs'):
lvm_util_options = '--noheadings --nosuffix --units g'
vgs_path = self.module.get_bin_path('vgs')
#vgs fields: VG #PV #LV #SN Attr VSize VFree
vgs={}
if vgs_path:
rc, vg_lines, err = self.module.run_command( '%s %s' % (vgs_path, lvm_util_options))
for vg_line in vg_lines.splitlines():
items = vg_line.split()
vgs[items[0]] = {'size_g':items[-2],
'free_g':items[-1],
'num_lvs': items[2],
'num_pvs': items[1]}
lvs_path = self.module.get_bin_path('lvs')
#lvs fields:
#LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert
lvs = {}
if lvs_path:
rc, lv_lines, err = self.module.run_command( '%s %s' % (lvs_path, lvm_util_options))
for lv_line in lv_lines.splitlines():
items = lv_line.split()
lvs[items[0]] = {'size_g': items[3], 'vg': items[1]}
pvs_path = self.module.get_bin_path('pvs')
#pvs fields: PV VG #Fmt #Attr PSize PFree
pvs = {}
if pvs_path:
rc, pv_lines, err = self.module.run_command( '%s %s' % (pvs_path, lvm_util_options))
for pv_line in pv_lines.splitlines():
items = pv_line.split()
pvs[self._find_mapper_device_name(items[0])] = {
'size_g': items[4],
'free_g': items[5],
'vg': items[1]}
self.facts['lvm'] = {'lvs': lvs, 'vgs': vgs, 'pvs': pvs}
class SunOSHardware(Hardware):
"""
In addition to the generic memory and cpu facts, this also sets
swap_reserved_mb and swap_allocated_mb that is available from *swap -s*.
"""
platform = 'SunOS'
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_device_facts()
self.get_uptime_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_cpu_facts(self):
physid = 0
sockets = {}
rc, out, err = self.module.run_command("/usr/bin/kstat cpu_info")
self.facts['processor'] = []
for line in out.splitlines():
if len(line) < 1:
continue
data = line.split(None, 1)
key = data[0].strip()
# "brand" works on Solaris 10 & 11. "implementation" for Solaris 9.
if key == 'module:':
brand = ''
elif key == 'brand':
brand = data[1].strip()
elif key == 'clock_MHz':
clock_mhz = data[1].strip()
elif key == 'implementation':
processor = brand or data[1].strip()
# Add clock speed to description for SPARC CPU
if self.facts['machine'] != 'i86pc':
processor += " @ " + clock_mhz + "MHz"
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(processor)
elif key == 'chip_id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
else:
sockets[physid] += 1
# Counting cores on Solaris can be complicated.
# https://blogs.oracle.com/mandalika/entry/solaris_show_me_the_cpu
# Treat 'processor_count' as physical sockets and 'processor_cores' as
# virtual CPUs visisble to Solaris. Not a true count of cores for modern SPARC as
# these processors have: sockets -> cores -> threads/virtual CPU.
if len(sockets) > 0:
self.facts['processor_count'] = len(sockets)
self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
else:
self.facts['processor_cores'] = 'NA'
self.facts['processor_count'] = len(self.facts['processor'])
def get_memory_facts(self):
rc, out, err = self.module.run_command(["/usr/sbin/prtconf"])
for line in out.splitlines():
if 'Memory size' in line:
self.facts['memtotal_mb'] = int(line.split()[2])
rc, out, err = self.module.run_command("/usr/sbin/swap -s")
allocated = int(out.split()[1][:-1])
reserved = int(out.split()[5][:-1])
used = int(out.split()[8][:-1])
free = int(out.split()[10][:-1])
self.facts['swapfree_mb'] = free // 1024
self.facts['swaptotal_mb'] = (free + used) // 1024
self.facts['swap_allocated_mb'] = allocated // 1024
self.facts['swap_reserved_mb'] = reserved // 1024
@timeout()
def get_mount_facts(self):
self.facts['mounts'] = []
# For a detailed format description see mnttab(4)
# special mount_point fstype options time
fstab = get_file_content('/etc/mnttab')
if fstab:
for line in fstab.splitlines():
fields = line.split('\t')
size_total, size_available = self._get_mount_size_facts(fields[1])
self.facts['mounts'].append({
'mount': fields[1],
'device': fields[0],
'fstype' : fields[2],
'options': fields[3],
'time': fields[4],
'size_total': size_total,
'size_available': size_available
})
def get_dmi_facts(self):
uname_path = self.module.get_bin_path("prtdiag")
rc, out, err = self.module.run_command(uname_path)
"""
rc returns 1
"""
if out:
system_conf = out.split('\n')[0]
found = re.search(r'(\w+\sEnterprise\s\w+)',system_conf)
if found:
self.facts['product_name'] = found.group(1)
def get_device_facts(self):
# Device facts are derived for sdderr kstats. This code does not use the
# full output, but rather queries for specific stats.
# Example output:
# sderr:0:sd0,err:Hard Errors 0
# sderr:0:sd0,err:Illegal Request 6
# sderr:0:sd0,err:Media Error 0
# sderr:0:sd0,err:Predictive Failure Analysis 0
# sderr:0:sd0,err:Product VBOX HARDDISK 9
# sderr:0:sd0,err:Revision 1.0
# sderr:0:sd0,err:Serial No VB0ad2ec4d-074a
# sderr:0:sd0,err:Size 53687091200
# sderr:0:sd0,err:Soft Errors 0
# sderr:0:sd0,err:Transport Errors 0
# sderr:0:sd0,err:Vendor ATA
self.facts['devices'] = {}
disk_stats = {
'Product': 'product',
'Revision': 'revision',
'Serial No': 'serial',
'Size': 'size',
'Vendor': 'vendor',
'Hard Errors': 'hard_errors',
'Soft Errors': 'soft_errors',
'Transport Errors': 'transport_errors',
'Media Error': 'media_errors',
'Predictive Failure Analysis': 'predictive_failure_analysis',
'Illegal Request': 'illegal_request',
}
cmd = ['/usr/bin/kstat', '-p']
for ds in disk_stats:
cmd.append('sderr:::%s' % ds)
d = {}
rc, out, err = self.module.run_command(cmd)
if rc != 0:
return dict()
sd_instances = frozenset(line.split(':')[1] for line in out.split('\n') if line.startswith('sderr'))
for instance in sd_instances:
lines = (line for line in out.split('\n') if ':' in line and line.split(':')[1] == instance)
for line in lines:
text, value = line.split('\t')
stat = text.split(':')[3]
if stat == 'Size':
d[disk_stats.get(stat)] = self.module.pretty_bytes(float(value))
else:
d[disk_stats.get(stat)] = value.rstrip()
diskname = 'sd' + instance
self.facts['devices'][diskname] = d
d = {}
def get_uptime_facts(self):
# On Solaris, unix:0:system_misc:snaptime is created shortly after machine boots up
# and displays tiem in seconds. This is much easier than using uptime as we would
# need to have a parsing procedure for translating from human-readable to machine-readable
# format.
# Example output:
# unix:0:system_misc:snaptime 1175.410463590
rc, out, err = self.module.run_command('/usr/bin/kstat -p unix:0:system_misc:snaptime')
if rc != 0:
return
self.facts['uptime_seconds'] = int(float(out.split('\t')[1]))
class OpenBSDHardware(Hardware):
"""
OpenBSD-specific subclass of Hardware. Defines memory, CPU and device facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- processor_speed
In addition, it also defines number of DMI facts and device facts.
"""
platform = 'OpenBSD'
def populate(self):
self.sysctl = self.get_sysctl(['hw'])
self.get_memory_facts()
self.get_processor_facts()
self.get_device_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
self.get_dmi_facts()
return self.facts
@timeout()
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.splitlines():
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ', line).split()
if fields[1] == 'none' or fields[3] == 'xx':
continue
size_total, size_available = self._get_mount_size_facts(fields[1])
self.facts['mounts'].append({
'mount': fields[1],
'device': fields[0],
'fstype' : fields[2],
'options': fields[3],
'size_total': size_total,
'size_available': size_available
})
def get_memory_facts(self):
# Get free memory. vmstat output looks like:
# procs memory page disks traps cpu
# r b w avm fre flt re pi po fr sr wd0 fd0 int sys cs us sy id
# 0 0 0 47512 28160 51 0 0 0 0 0 1 0 116 89 17 0 1 99
rc, out, err = self.module.run_command("/usr/bin/vmstat")
if rc == 0:
self.facts['memfree_mb'] = int(out.splitlines()[-1].split()[4]) // 1024
self.facts['memtotal_mb'] = int(self.sysctl['hw.usermem']) // 1024 // 1024
# Get swapctl info. swapctl output looks like:
# total: 69268 1K-blocks allocated, 0 used, 69268 available
# And for older OpenBSD:
# total: 69268k bytes allocated = 0k used, 69268k available
rc, out, err = self.module.run_command("/sbin/swapctl -sk")
if rc == 0:
swaptrans = { ord(u'k'): None, ord(u'm'): None, ord(u'g'): None}
data = to_text(out, errors='surrogate_or_strict').split()
self.facts['swapfree_mb'] = int(data[-2].translate(swaptrans)) // 1024
self.facts['swaptotal_mb'] = int(data[1].translate(swaptrans)) // 1024
def get_processor_facts(self):
processor = []
for i in range(int(self.sysctl['hw.ncpu'])):
processor.append(self.sysctl['hw.model'])
self.facts['processor'] = processor
# The following is partly a lie because there is no reliable way to
# determine the number of physical CPUs in the system. We can only
# query the number of logical CPUs, which hides the number of cores.
# On amd64/i386 we could try to inspect the smt/core/package lines in
# dmesg, however even those have proven to be unreliable.
# So take a shortcut and report the logical number of processors in
# 'processor_count' and 'processor_cores' and leave it at that.
self.facts['processor_count'] = self.sysctl['hw.ncpu']
self.facts['processor_cores'] = self.sysctl['hw.ncpu']
def get_device_facts(self):
devices = []
devices.extend(self.sysctl['hw.disknames'].split(','))
self.facts['devices'] = devices
def get_dmi_facts(self):
# We don't use dmidecode(1) here because:
# - it would add dependency on an external package
# - dmidecode(1) can only be ran as root
# So instead we rely on sysctl(8) to provide us the information on a
# best-effort basis. As a bonus we also get facts on non-amd64/i386
# platforms this way.
sysctl_to_dmi = {
'hw.product': 'product_name',
'hw.version': 'product_version',
'hw.uuid': 'product_uuid',
'hw.serialno': 'product_serial',
'hw.vendor': 'system_vendor',
}
for mib in sysctl_to_dmi:
if mib in self.sysctl:
self.facts[sysctl_to_dmi[mib]] = self.sysctl[mib]
class FreeBSDHardware(Hardware):
"""
FreeBSD-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- devices
"""
platform = 'FreeBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_device_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_cpu_facts(self):
self.facts['processor'] = []
rc, out, err = self.module.run_command("/sbin/sysctl -n hw.ncpu")
self.facts['processor_count'] = out.strip()
dmesg_boot = get_file_content(FreeBSDHardware.DMESG_BOOT)
if not dmesg_boot:
rc, dmesg_boot, err = self.module.run_command("/sbin/dmesg")
for line in dmesg_boot.splitlines():
if 'CPU:' in line:
cpu = re.sub(r'CPU:\s+', r"", line)
self.facts['processor'].append(cpu.strip())
if 'Logical CPUs per core' in line:
self.facts['processor_cores'] = line.split()[4]
def get_memory_facts(self):
rc, out, err = self.module.run_command("/sbin/sysctl vm.stats")
for line in out.splitlines():
data = line.split()
if 'vm.stats.vm.v_page_size' in line:
pagesize = int(data[1])
if 'vm.stats.vm.v_page_count' in line:
pagecount = int(data[1])
if 'vm.stats.vm.v_free_count' in line:
freecount = int(data[1])
self.facts['memtotal_mb'] = pagesize * pagecount // 1024 // 1024
self.facts['memfree_mb'] = pagesize * freecount // 1024 // 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = self.module.run_command("/usr/sbin/swapinfo -k")
lines = out.splitlines()
if len(lines[-1]) == 0:
lines.pop()
data = lines[-1].split()
if data[0] != 'Device':
self.facts['swaptotal_mb'] = int(data[1]) // 1024
self.facts['swapfree_mb'] = int(data[3]) // 1024
@timeout()
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.splitlines():
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line).split()
size_total, size_available = self._get_mount_size_facts(fields[1])
self.facts['mounts'].append({
'mount': fields[1],
'device': fields[0],
'fstype': fields[2],
'options': fields[3],
'size_total': size_total,
'size_available': size_available
})
def get_device_facts(self):
sysdir = '/dev'
self.facts['devices'] = {}
drives = re.compile('(ada?\d+|da\d+|a?cd\d+)') #TODO: rc, disks, err = self.module.run_command("/sbin/sysctl kern.disks")
slices = re.compile('(ada?\d+s\d+\w*|da\d+s\d+\w*)')
if os.path.isdir(sysdir):
dirlist = sorted(os.listdir(sysdir))
for device in dirlist:
d = drives.match(device)
if d:
self.facts['devices'][d.group(1)] = []
s = slices.match(device)
if s:
self.facts['devices'][d.group(1)].append(s.group(1))
def get_dmi_facts(self):
''' learn dmi facts from system
Use dmidecode executable if available'''
# Fall back to using dmidecode, if available
dmi_bin = self.module.get_bin_path('dmidecode')
DMI_DICT = dict(
bios_date='bios-release-date',
bios_version='bios-version',
form_factor='chassis-type',
product_name='system-product-name',
product_serial='system-serial-number',
product_uuid='system-uuid',
product_version='system-version',
system_vendor='system-manufacturer'
)
for (k, v) in DMI_DICT.items():
if dmi_bin is not None:
(rc, out, err) = self.module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
self.facts[k] = ''.join([line for line in out.splitlines() if not line.startswith('#') ])
try:
json.dumps(self.facts[k])
except UnicodeDecodeError:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
class DragonFlyHardware(FreeBSDHardware):
platform = 'DragonFly'
class NetBSDHardware(Hardware):
"""
NetBSD-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- devices
"""
platform = 'NetBSD'
MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree']
def populate(self):
self.sysctl = self.get_sysctl(['machdep'])
self.get_cpu_facts()
self.get_memory_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
self.get_dmi_facts()
return self.facts
def get_cpu_facts(self):
i = 0
physid = 0
sockets = {}
if not os.access("/proc/cpuinfo", os.R_OK):
return
self.facts['processor'] = []
for line in get_file_lines("/proc/cpuinfo"):
data = line.split(":", 1)
key = data[0].strip()
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
if key == 'model name' or key == 'Processor':
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(data[1].strip())
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
if len(sockets) > 0:
self.facts['processor_count'] = len(sockets)
self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
else:
self.facts['processor_count'] = i
self.facts['processor_cores'] = 'NA'
def get_memory_facts(self):
if not os.access("/proc/meminfo", os.R_OK):
return
for line in get_file_lines("/proc/meminfo"):
data = line.split(":", 1)
key = data[0]
if key in NetBSDHardware.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
self.facts["%s_mb" % key.lower()] = int(val) // 1024
@timeout()
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.splitlines():
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line).split()
size_total, size_available = self._get_mount_size_facts(fields[1])
self.facts['mounts'].append({
'mount': fields[1],
'device': fields[0],
'fstype' : fields[2],
'options': fields[3],
'size_total': size_total,
'size_available': size_available
})
def get_dmi_facts(self):
# We don't use dmidecode(1) here because:
# - it would add dependency on an external package
# - dmidecode(1) can only be ran as root
# So instead we rely on sysctl(8) to provide us the information on a
# best-effort basis. As a bonus we also get facts on non-amd64/i386
# platforms this way.
sysctl_to_dmi = {
'machdep.dmi.system-product': 'product_name',
'machdep.dmi.system-version': 'product_version',
'machdep.dmi.system-uuid': 'product_uuid',
'machdep.dmi.system-serial': 'product_serial',
'machdep.dmi.system-vendor': 'system_vendor',
}
for mib in sysctl_to_dmi:
if mib in self.sysctl:
self.facts[sysctl_to_dmi[mib]] = self.sysctl[mib]
class AIX(Hardware):
"""
AIX-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
"""
platform = 'AIX'
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_vgs_facts()
self.get_mount_facts()
return self.facts
def get_cpu_facts(self):
self.facts['processor'] = []
rc, out, err = self.module.run_command("/usr/sbin/lsdev -Cc processor")
if out:
i = 0
for line in out.splitlines():
if 'Available' in line:
if i == 0:
data = line.split(' ')
cpudev = data[0]
i += 1
self.facts['processor_count'] = int(i)
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a type")
data = out.split(' ')
self.facts['processor'] = data[1]
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a smt_threads")
data = out.split(' ')
self.facts['processor_cores'] = int(data[1])
def get_memory_facts(self):
pagesize = 4096
rc, out, err = self.module.run_command("/usr/bin/vmstat -v")
for line in out.splitlines():
data = line.split()
if 'memory pages' in line:
pagecount = int(data[0])
if 'free pages' in line:
freecount = int(data[0])
self.facts['memtotal_mb'] = pagesize * pagecount // 1024 // 1024
self.facts['memfree_mb'] = pagesize * freecount // 1024 // 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = self.module.run_command("/usr/sbin/lsps -s")
if out:
lines = out.splitlines()
data = lines[1].split()
swaptotal_mb = int(data[0].rstrip('MB'))
percused = int(data[1].rstrip('%'))
self.facts['swaptotal_mb'] = swaptotal_mb
self.facts['swapfree_mb'] = int(swaptotal_mb * ( 100 - percused ) / 100)
def get_dmi_facts(self):
rc, out, err = self.module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion")
data = out.split()
self.facts['firmware_version'] = data[1].strip('IBM,')
lsconf_path = self.module.get_bin_path("lsconf")
if lsconf_path:
rc, out, err = self.module.run_command(lsconf_path)
if rc == 0 and out:
for line in out.splitlines():
data = line.split(':')
if 'Machine Serial Number' in line:
self.facts['product_serial'] = data[1].strip()
if 'LPAR Info' in line:
self.facts['lpar_info'] = data[1].strip()
if 'System Model' in line:
self.facts['product_name'] = data[1].strip()
def get_vgs_facts(self):
"""
Get vg and pv Facts
rootvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk0 active 546 0 00..00..00..00..00
hdisk1 active 546 113 00..00..00..21..92
realsyncvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk74 active 1999 6 00..00..00..00..06
testvg:
PV_NAME PV STATE TOTAL PPs FREE PPs FREE DISTRIBUTION
hdisk105 active 999 838 200..39..199..200..200
hdisk106 active 999 599 200..00..00..199..200
"""
lsvg_path = self.module.get_bin_path("lsvg")
xargs_path = self.module.get_bin_path("xargs")
cmd = "%s | %s %s -p" % (lsvg_path ,xargs_path,lsvg_path)
if lsvg_path and xargs_path:
rc, out, err = self.module.run_command(cmd,use_unsafe_shell=True)
if rc == 0 and out:
self.facts['vgs']= {}
for m in re.finditer(r'(\S+):\n.*FREE DISTRIBUTION(\n(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*)+', out):
self.facts['vgs'][m.group(1)] = []
pp_size = 0
cmd = "%s %s" % (lsvg_path,m.group(1))
rc, out, err = self.module.run_command(cmd)
if rc == 0 and out:
pp_size = re.search(r'PP SIZE:\s+(\d+\s+\S+)',out).group(1)
for n in re.finditer(r'(\S+)\s+(\w+)\s+(\d+)\s+(\d+).*',m.group(0)):
pv_info = { 'pv_name': n.group(1),
'pv_state': n.group(2),
'total_pps': n.group(3),
'free_pps': n.group(4),
'pp_size': pp_size
}
self.facts['vgs'][m.group(1)].append(pv_info)
def get_mount_facts(self):
self.facts['mounts'] = []
# AIX does not have mtab but mount command is only source of info (or to use
# api calls to get same info)
mount_path = self.module.get_bin_path('mount')
rc, mount_out, err = self.module.run_command(mount_path)
if mount_out:
for line in mount_out.split('\n'):
fields = line.split()
if len(fields) != 0 and fields[0] != 'node' and fields[0][0] != '-' and re.match('^/.*|^[a-zA-Z].*|^[0-9].*', fields[0]):
if re.match('^/', fields[0]):
# normal mount
self.facts['mounts'].append({'mount': fields[1],
'device': fields[0],
'fstype' : fields[2],
'options': fields[6],
'time': '%s %s %s' % ( fields[3], fields[4], fields[5])})
else:
# nfs or cifs based mount
# in case of nfs if no mount options are provided on command line
# add into fields empty string...
if len(fields) < 8:
fields.append("")
self.facts['mounts'].append({'mount': fields[2],
'device': '%s:%s' % (fields[0], fields[1]),
'fstype' : fields[3],
'options': fields[7],
'time': '%s %s %s' % ( fields[4], fields[5], fields[6])})
class HPUX(Hardware):
"""
HP-UX-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor
- processor_cores
- processor_count
- model
- firmware
"""
platform = 'HP-UX'
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_hw_facts()
return self.facts
def get_cpu_facts(self):
if self.facts['architecture'] == '9000/800':
rc, out, err = self.module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip())
#Working with machinfo mess
elif self.facts['architecture'] == 'ia64':
if self.facts['distribution_version'] == "B.11.23":
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep 'Number of CPUs'", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split('=')[1])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep 'processor family'", use_unsafe_shell=True)
self.facts['processor'] = re.search('.*(Intel.*)', out).groups()[0].strip()
rc, out, err = self.module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
self.facts['processor_cores'] = int(out.strip())
if self.facts['distribution_version'] == "B.11.31":
#if machinfo return cores strings release B.11.31 > 1204
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep core | wc -l", use_unsafe_shell=True)
if out.strip()== '0':
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split(" ")[0])
#If hyperthreading is active divide cores by 2
rc, out, err = self.module.run_command("/usr/sbin/psrset | grep LCPU", use_unsafe_shell=True)
data = re.sub(' +',' ',out).strip().split(' ')
if len(data) == 1:
hyperthreading = 'OFF'
else:
hyperthreading = data[1]
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep logical", use_unsafe_shell=True)
data = out.strip().split(" ")
if hyperthreading == 'ON':
self.facts['processor_cores'] = int(data[0])/2
else:
if len(data) == 1:
self.facts['processor_cores'] = self.facts['processor_count']
else:
self.facts['processor_cores'] = int(data[0])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel |cut -d' ' -f4-", use_unsafe_shell=True)
self.facts['processor'] = out.strip()
else:
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | egrep 'socket[s]?$' | tail -1", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split(" ")[0])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep -e '[0-9] core' | tail -1", use_unsafe_shell=True)
self.facts['processor_cores'] = int(out.strip().split(" ")[0])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
self.facts['processor'] = out.strip()
def get_memory_facts(self):
pagesize = 4096
rc, out, err = self.module.run_command("/usr/bin/vmstat | tail -1", use_unsafe_shell=True)
data = int(re.sub(' +',' ',out).split(' ')[5].strip())
self.facts['memfree_mb'] = pagesize * data // 1024 // 1024
if self.facts['architecture'] == '9000/800':
try:
rc, out, err = self.module.run_command("grep Physical /var/adm/syslog/syslog.log")
data = re.search('.*Physical: ([0-9]*) Kbytes.*',out).groups()[0].strip()
self.facts['memtotal_mb'] = int(data) // 1024
except AttributeError:
#For systems where memory details aren't sent to syslog or the log has rotated, use parsed
#adb output. Unfortunately /dev/kmem doesn't have world-read, so this only works as root.
if os.access("/dev/kmem", os.R_OK):
rc, out, err = self.module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'",
use_unsafe_shell=True)
if not err:
data = out
self.facts['memtotal_mb'] = int(data) / 256
else:
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True)
data = re.search('Memory[\ :=]*([0-9]*).*MB.*',out).groups()[0].strip()
self.facts['memtotal_mb'] = int(data)
rc, out, err = self.module.run_command("/usr/sbin/swapinfo -m -d -f -q")
self.facts['swaptotal_mb'] = int(out.strip())
rc, out, err = self.module.run_command("/usr/sbin/swapinfo -m -d -f | egrep '^dev|^fs'", use_unsafe_shell=True)
swap = 0
for line in out.strip().splitlines():
swap += int(re.sub(' +',' ',line).split(' ')[3].strip())
self.facts['swapfree_mb'] = swap
def get_hw_facts(self):
rc, out, err = self.module.run_command("model")
self.facts['model'] = out.strip()
if self.facts['architecture'] == 'ia64':
separator = ':'
if self.facts['distribution_version'] == "B.11.23":
separator = '='
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' | grep -v BMC", use_unsafe_shell=True)
self.facts['firmware_version'] = out.split(separator)[1].strip()
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo |grep -i 'Machine serial number' ",use_unsafe_shell=True)
if rc == 0 and out:
self.facts['product_serial'] = out.split(separator)[1].strip()
class Darwin(Hardware):
"""
Darwin-specific subclass of Hardware. Defines memory and CPU facts:
- processor
- processor_cores
- memtotal_mb
- memfree_mb
- model
- osversion
- osrevision
"""
platform = 'Darwin'
def populate(self):
self.sysctl = self.get_sysctl(['hw','machdep','kern'])
self.get_mac_facts()
self.get_cpu_facts()
self.get_memory_facts()
return self.facts
def get_system_profile(self):
rc, out, err = self.module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"])
if rc != 0:
return dict()
system_profile = dict()
for line in out.splitlines():
if ': ' in line:
(key, value) = line.split(': ', 1)
system_profile[key.strip()] = ' '.join(value.strip().split())
return system_profile
def get_mac_facts(self):
rc, out, err = self.module.run_command("sysctl hw.model")
if rc == 0:
self.facts['model'] = out.splitlines()[-1].split()[1]
self.facts['osversion'] = self.sysctl['kern.osversion']
self.facts['osrevision'] = self.sysctl['kern.osrevision']
def get_cpu_facts(self):
if 'machdep.cpu.brand_string' in self.sysctl: # Intel
self.facts['processor'] = self.sysctl['machdep.cpu.brand_string']
self.facts['processor_cores'] = self.sysctl['machdep.cpu.core_count']
else: # PowerPC
system_profile = self.get_system_profile()
self.facts['processor'] = '%s @ %s' % (system_profile['Processor Name'], system_profile['Processor Speed'])
self.facts['processor_cores'] = self.sysctl['hw.physicalcpu']
def get_memory_facts(self):
self.facts['memtotal_mb'] = int(self.sysctl['hw.memsize']) // 1024 // 1024
rc, out, err = self.module.run_command("sysctl hw.usermem")
if rc == 0:
self.facts['memfree_mb'] = int(out.splitlines()[-1].split()[1]) // 1024 // 1024
class HurdHardware(LinuxHardware):
"""
GNU Hurd specific subclass of Hardware. Define memory and mount facts
based on procfs compatibility translator mimicking the interface of
the Linux kernel.
"""
platform = 'GNU'
def populate(self):
self.get_uptime_facts()
self.get_memory_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
class Network(Facts):
"""
This is a generic Network subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this,
you must define:
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
All subclasses MUST define platform.
"""
platform = 'Generic'
IPV6_SCOPE = { '0' : 'global',
'10' : 'host',
'20' : 'link',
'40' : 'admin',
'50' : 'site',
'80' : 'organization' }
def __new__(cls, *arguments, **keyword):
# When Network is created, it chooses a subclass to create instead.
# This check prevents the subclass from then trying to find a subclass
# and create that.
if cls is not Network:
return super(Network, cls).__new__(cls)
subclass = cls
for sc in get_all_subclasses(Network):
if sc.platform == platform.system():
subclass = sc
if PY3:
return super(cls, subclass).__new__(subclass)
else:
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def populate(self):
return self.facts
class LinuxNetwork(Network):
"""
This is a Linux-specific subclass of Network. It defines
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
- ipv4_address and ipv6_address: the first non-local address for each family.
"""
platform = 'Linux'
INTERFACE_TYPE = {
'1': 'ether',
'32': 'infiniband',
'512': 'ppp',
'772': 'loopback',
'65534': 'tunnel',
}
def populate(self):
ip_path = self.module.get_bin_path('ip')
if ip_path is None:
return self.facts
default_ipv4, default_ipv6 = self.get_default_interfaces(ip_path)
interfaces, ips = self.get_interfaces_info(ip_path, default_ipv4, default_ipv6)
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
self.facts['default_ipv4'] = default_ipv4
self.facts['default_ipv6'] = default_ipv6
self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
return self.facts
def get_default_interfaces(self, ip_path):
# Use the commands:
# ip -4 route get 8.8.8.8 -> Google public DNS
# ip -6 route get 2404:6800:400a:800::1012 -> ipv6.google.com
# to find out the default outgoing interface, address, and gateway
command = dict(
v4 = [ip_path, '-4', 'route', 'get', '8.8.8.8'],
v6 = [ip_path, '-6', 'route', 'get', '2404:6800:400a:800::1012']
)
interface = dict(v4 = {}, v6 = {})
for v in 'v4', 'v6':
if (v == 'v6' and self.facts['os_family'] == 'RedHat' and
self.facts['distribution_version'].startswith('4.')):
continue
if v == 'v6' and not socket.has_ipv6:
continue
rc, out, err = self.module.run_command(command[v], errors='surrogate_then_replace')
if not out:
# v6 routing may result in
# RTNETLINK answers: Invalid argument
continue
words = out.splitlines()[0].split()
# A valid output starts with the queried address on the first line
if len(words) > 0 and words[0] == command[v][-1]:
for i in range(len(words) - 1):
if words[i] == 'dev':
interface[v]['interface'] = words[i+1]
elif words[i] == 'src':
interface[v]['address'] = words[i+1]
elif words[i] == 'via' and words[i+1] != command[v][-1]:
interface[v]['gateway'] = words[i+1]
return interface['v4'], interface['v6']
def get_interfaces_info(self, ip_path, default_ipv4, default_ipv6):
interfaces = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
for path in glob.glob('/sys/class/net/*'):
if not os.path.isdir(path):
continue
device = os.path.basename(path)
interfaces[device] = { 'device': device }
if os.path.exists(os.path.join(path, 'address')):
macaddress = get_file_content(os.path.join(path, 'address'), default='')
if macaddress and macaddress != '00:00:00:00:00:00':
interfaces[device]['macaddress'] = macaddress
if os.path.exists(os.path.join(path, 'mtu')):
interfaces[device]['mtu'] = int(get_file_content(os.path.join(path, 'mtu')))
if os.path.exists(os.path.join(path, 'operstate')):
interfaces[device]['active'] = get_file_content(os.path.join(path, 'operstate')) != 'down'
if os.path.exists(os.path.join(path, 'device','driver', 'module')):
interfaces[device]['module'] = os.path.basename(os.path.realpath(os.path.join(path, 'device', 'driver', 'module')))
if os.path.exists(os.path.join(path, 'type')):
_type = get_file_content(os.path.join(path, 'type'))
interfaces[device]['type'] = self.INTERFACE_TYPE.get(_type, 'unknown')
if os.path.exists(os.path.join(path, 'bridge')):
interfaces[device]['type'] = 'bridge'
interfaces[device]['interfaces'] = [ os.path.basename(b) for b in glob.glob(os.path.join(path, 'brif', '*')) ]
if os.path.exists(os.path.join(path, 'bridge', 'bridge_id')):
interfaces[device]['id'] = get_file_content(os.path.join(path, 'bridge', 'bridge_id'), default='')
if os.path.exists(os.path.join(path, 'bridge', 'stp_state')):
interfaces[device]['stp'] = get_file_content(os.path.join(path, 'bridge', 'stp_state')) == '1'
if os.path.exists(os.path.join(path, 'bonding')):
interfaces[device]['type'] = 'bonding'
interfaces[device]['slaves'] = get_file_content(os.path.join(path, 'bonding', 'slaves'), default='').split()
interfaces[device]['mode'] = get_file_content(os.path.join(path, 'bonding', 'mode'), default='').split()[0]
interfaces[device]['miimon'] = get_file_content(os.path.join(path, 'bonding', 'miimon'), default='').split()[0]
interfaces[device]['lacp_rate'] = get_file_content(os.path.join(path, 'bonding', 'lacp_rate'), default='').split()[0]
primary = get_file_content(os.path.join(path, 'bonding', 'primary'))
if primary:
interfaces[device]['primary'] = primary
path = os.path.join(path, 'bonding', 'all_slaves_active')
if os.path.exists(path):
interfaces[device]['all_slaves_active'] = get_file_content(path) == '1'
if os.path.exists(os.path.join(path, 'bonding_slave')):
interfaces[device]['perm_macaddress'] = get_file_content(os.path.join(path, 'bonding_slave', 'perm_hwaddr'), default='')
if os.path.exists(os.path.join(path,'device')):
interfaces[device]['pciid'] = os.path.basename(os.readlink(os.path.join(path,'device')))
if os.path.exists(os.path.join(path, 'speed')):
speed = get_file_content(os.path.join(path, 'speed'))
if speed is not None:
interfaces[device]['speed'] = int(speed)
# Check whether an interface is in promiscuous mode
if os.path.exists(os.path.join(path,'flags')):
promisc_mode = False
# The second byte indicates whether the interface is in promiscuous mode.
# 1 = promisc
# 0 = no promisc
data = int(get_file_content(os.path.join(path, 'flags')),16)
promisc_mode = (data & 0x0100 > 0)
interfaces[device]['promisc'] = promisc_mode
def parse_ip_output(output, secondary=False):
for line in output.splitlines():
if not line:
continue
words = line.split()
broadcast = ''
if words[0] == 'inet':
if '/' in words[1]:
address, netmask_length = words[1].split('/')
if len(words) > 3:
broadcast = words[3]
else:
# pointopoint interfaces do not have a prefix
address = words[1]
netmask_length = "32"
address_bin = struct.unpack('!L', socket.inet_aton(address))[0]
netmask_bin = (1<<32) - (1<<32>>int(netmask_length))
netmask = socket.inet_ntoa(struct.pack('!L', netmask_bin))
network = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
iface = words[-1]
if iface != device:
interfaces[iface] = {}
if not secondary and "ipv4" not in interfaces[iface]:
interfaces[iface]['ipv4'] = {'address': address,
'broadcast': broadcast,
'netmask': netmask,
'network': network}
else:
if "ipv4_secondaries" not in interfaces[iface]:
interfaces[iface]["ipv4_secondaries"] = []
interfaces[iface]["ipv4_secondaries"].append({
'address': address,
'broadcast': broadcast,
'netmask': netmask,
'network': network,
})
# add this secondary IP to the main device
if secondary:
if "ipv4_secondaries" not in interfaces[device]:
interfaces[device]["ipv4_secondaries"] = []
interfaces[device]["ipv4_secondaries"].append({
'address': address,
'broadcast': broadcast,
'netmask': netmask,
'network': network,
})
# If this is the default address, update default_ipv4
if 'address' in default_ipv4 and default_ipv4['address'] == address:
default_ipv4['broadcast'] = broadcast
default_ipv4['netmask'] = netmask
default_ipv4['network'] = network
default_ipv4['macaddress'] = macaddress
default_ipv4['mtu'] = interfaces[device]['mtu']
default_ipv4['type'] = interfaces[device].get("type", "unknown")
default_ipv4['alias'] = words[-1]
if not address.startswith('127.'):
ips['all_ipv4_addresses'].append(address)
elif words[0] == 'inet6':
if 'peer' == words[2]:
address = words[1]
_, prefix = words[3].split('/')
scope = words[5]
else:
address, prefix = words[1].split('/')
scope = words[3]
if 'ipv6' not in interfaces[device]:
interfaces[device]['ipv6'] = []
interfaces[device]['ipv6'].append({
'address' : address,
'prefix' : prefix,
'scope' : scope
})
# If this is the default address, update default_ipv6
if 'address' in default_ipv6 and default_ipv6['address'] == address:
default_ipv6['prefix'] = prefix
default_ipv6['scope'] = scope
default_ipv6['macaddress'] = macaddress
default_ipv6['mtu'] = interfaces[device]['mtu']
default_ipv6['type'] = interfaces[device].get("type", "unknown")
if not address == '::1':
ips['all_ipv6_addresses'].append(address)
ip_path = self.module.get_bin_path("ip")
args = [ip_path, 'addr', 'show', 'primary', device]
rc, primary_data, stderr = self.module.run_command(args, errors='surrogate_then_replace')
args = [ip_path, 'addr', 'show', 'secondary', device]
rc, secondary_data, stderr = self.module.run_command(args, errors='surrogate_then_replace')
parse_ip_output(primary_data)
parse_ip_output(secondary_data, secondary=True)
interfaces[device].update(self.get_ethtool_data(device))
# replace : by _ in interface name since they are hard to use in template
new_interfaces = {}
for i in interfaces:
if ':' in i:
new_interfaces[i.replace(':','_')] = interfaces[i]
else:
new_interfaces[i] = interfaces[i]
return new_interfaces, ips
def get_ethtool_data(self, device):
data = {}
ethtool_path = self.module.get_bin_path("ethtool")
if ethtool_path:
args = [ethtool_path, '-k', device]
rc, stdout, stderr = self.module.run_command(args, errors='surrogate_then_replace')
if rc == 0:
features = {}
for line in stdout.strip().splitlines():
if not line or line.endswith(":"):
continue
key,value = line.split(": ")
if not value:
continue
features[key.strip().replace('-','_')] = value.strip()
data['features'] = features
args = [ethtool_path, '-T', device]
rc, stdout, stderr = self.module.run_command(args, errors='surrogate_then_replace')
if rc == 0:
data['timestamping'] = [m.lower() for m in re.findall('SOF_TIMESTAMPING_(\w+)', stdout)]
data['hw_timestamp_filters'] = [m.lower() for m in re.findall('HWTSTAMP_FILTER_(\w+)', stdout)]
m = re.search('PTP Hardware Clock: (\d+)', stdout)
if m:
data['phc_index'] = int(m.groups()[0])
return data
class GenericBsdIfconfigNetwork(Network):
"""
This is a generic BSD subclass of Network using the ifconfig command.
It defines
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
"""
platform = 'Generic_BSD_Ifconfig'
def populate(self):
ifconfig_path = self.module.get_bin_path('ifconfig')
if ifconfig_path is None:
return self.facts
route_path = self.module.get_bin_path('route')
if route_path is None:
return self.facts
default_ipv4, default_ipv6 = self.get_default_interfaces(route_path)
interfaces, ips = self.get_interfaces_info(ifconfig_path)
self.detect_type_media(interfaces)
self.merge_default_interface(default_ipv4, interfaces, 'ipv4')
self.merge_default_interface(default_ipv6, interfaces, 'ipv6')
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
self.facts['default_ipv4'] = default_ipv4
self.facts['default_ipv6'] = default_ipv6
self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
return self.facts
def detect_type_media(self, interfaces):
for iface in interfaces:
if 'media' in interfaces[iface]:
if 'ether' in interfaces[iface]['media'].lower():
interfaces[iface]['type'] = 'ether'
def get_default_interfaces(self, route_path):
# Use the commands:
# route -n get 8.8.8.8 -> Google public DNS
# route -n get -inet6 2404:6800:400a:800::1012 -> ipv6.google.com
# to find out the default outgoing interface, address, and gateway
command = dict(
v4 = [route_path, '-n', 'get', '8.8.8.8'],
v6 = [route_path, '-n', 'get', '-inet6', '2404:6800:400a:800::1012']
)
interface = dict(v4 = {}, v6 = {})
for v in 'v4', 'v6':
if v == 'v6' and not socket.has_ipv6:
continue
rc, out, err = self.module.run_command(command[v])
if not out:
# v6 routing may result in
# RTNETLINK answers: Invalid argument
continue
for line in out.splitlines():
words = line.split()
# Collect output from route command
if len(words) > 1:
if words[0] == 'interface:':
interface[v]['interface'] = words[1]
if words[0] == 'gateway:':
interface[v]['gateway'] = words[1]
return interface['v4'], interface['v6']
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
# FreeBSD, DragonflyBSD, NetBSD, OpenBSD and OS X all implicitly add '-a'
# when running the command 'ifconfig'.
# Solaris must explicitly run the command 'ifconfig -a'.
rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options])
for line in out.splitlines():
if line:
words = line.split()
if words[0] == 'pass':
continue
elif re.match('^\S', line) and len(words) > 3:
current_if = self.parse_interface_line(words)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
elif words[0] == 'tunnel':
self.parse_tunnel_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
return interfaces, ips
def parse_interface_line(self, words):
device = words[0][0:-1]
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
current_if['flags'] = self.get_options(words[1])
if 'LOOPBACK' in current_if['flags']:
current_if['type'] = 'loopback'
current_if['macaddress'] = 'unknown' # will be overwritten later
if len(words) >= 5 : # Newer FreeBSD versions
current_if['metric'] = words[3]
current_if['mtu'] = words[5]
else:
current_if['mtu'] = words[3]
return current_if
def parse_options_line(self, words, current_if, ips):
# Mac has options like this...
current_if['options'] = self.get_options(words[0])
def parse_nd6_line(self, words, current_if, ips):
# FreeBSD has options like this...
current_if['options'] = self.get_options(words[1])
def parse_ether_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
current_if['type'] = 'ether'
def parse_media_line(self, words, current_if, ips):
# not sure if this is useful - we also drop information
current_if['media'] = words[1]
if len(words) > 2:
current_if['media_select'] = words[2]
if len(words) > 3:
current_if['media_type'] = words[3][1:]
if len(words) > 4:
current_if['media_options'] = self.get_options(words[4])
def parse_status_line(self, words, current_if, ips):
current_if['status'] = words[1]
def parse_lladdr_line(self, words, current_if, ips):
current_if['lladdr'] = words[1]
def parse_inet_line(self, words, current_if, ips):
# netbsd show aliases like this
# lo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> mtu 33184
# inet 127.0.0.1 netmask 0xff000000
# inet alias 127.1.1.1 netmask 0xff000000
if words[1] == 'alias':
del words[1]
address = {'address': words[1]}
# deal with hex netmask
if re.match('([0-9a-f]){8}', words[3]) and len(words[3]) == 8:
words[3] = '0x' + words[3]
if words[3].startswith('0x'):
address['netmask'] = socket.inet_ntoa(struct.pack('!L', int(words[3], base=16)))
else:
# otherwise assume this is a dotted quad
address['netmask'] = words[3]
# calculate the network
address_bin = struct.unpack('!L', socket.inet_aton(address['address']))[0]
netmask_bin = struct.unpack('!L', socket.inet_aton(address['netmask']))[0]
address['network'] = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
# broadcast may be given or we need to calculate
if len(words) > 5:
address['broadcast'] = words[5]
else:
address['broadcast'] = socket.inet_ntoa(struct.pack('!L', address_bin | (~netmask_bin & 0xffffffff)))
# add to our list of addresses
if not words[1].startswith('127.'):
ips['all_ipv4_addresses'].append(address['address'])
current_if['ipv4'].append(address)
def parse_inet6_line(self, words, current_if, ips):
address = {'address': words[1]}
if (len(words) >= 4) and (words[2] == 'prefixlen'):
address['prefix'] = words[3]
if (len(words) >= 6) and (words[4] == 'scopeid'):
address['scope'] = words[5]
localhost6 = ['::1', '::1/128', 'fe80::1%lo0']
if address['address'] not in localhost6:
ips['all_ipv6_addresses'].append(address['address'])
current_if['ipv6'].append(address)
def parse_tunnel_line(self, words, current_if, ips):
current_if['type'] = 'tunnel'
def parse_unknown_line(self, words, current_if, ips):
# we are going to ignore unknown lines here - this may be
# a bad idea - but you can override it in your subclass
pass
def get_options(self, option_string):
start = option_string.find('<') + 1
end = option_string.rfind('>')
if (start > 0) and (end > 0) and (end > start + 1):
option_csv = option_string[start:end]
return option_csv.split(',')
else:
return []
def merge_default_interface(self, defaults, interfaces, ip_type):
if 'interface' not in defaults:
return
if not defaults['interface'] in interfaces:
return
ifinfo = interfaces[defaults['interface']]
# copy all the interface values across except addresses
for item in ifinfo:
if item != 'ipv4' and item != 'ipv6':
defaults[item] = ifinfo[item]
if len(ifinfo[ip_type]) > 0:
for item in ifinfo[ip_type][0]:
defaults[item] = ifinfo[ip_type][0][item]
class HPUXNetwork(Network):
"""
HP-UX-specifig subclass of Network. Defines networking facts:
- default_interface
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4 address information.
"""
platform = 'HP-UX'
def populate(self):
netstat_path = self.module.get_bin_path('netstat')
if netstat_path is None:
return self.facts
self.get_default_interfaces()
interfaces = self.get_interfaces_info()
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
return self.facts
def get_default_interfaces(self):
rc, out, err = self.module.run_command("/usr/bin/netstat -nr")
lines = out.splitlines()
for line in lines:
words = line.split()
if len(words) > 1:
if words[0] == 'default':
self.facts['default_interface'] = words[4]
self.facts['default_gateway'] = words[1]
def get_interfaces_info(self):
interfaces = {}
rc, out, err = self.module.run_command("/usr/bin/netstat -ni")
lines = out.splitlines()
for line in lines:
words = line.split()
for i in range(len(words) - 1):
if words[i][:3] == 'lan':
device = words[i]
interfaces[device] = { 'device': device }
address = words[i+3]
interfaces[device]['ipv4'] = { 'address': address }
network = words[i+2]
interfaces[device]['ipv4'] = { 'network': network,
'interface': device,
'address': address }
return interfaces
class DarwinNetwork(GenericBsdIfconfigNetwork):
"""
This is the Mac OS X/Darwin Network Class.
It uses the GenericBsdIfconfigNetwork unchanged
"""
platform = 'Darwin'
# media line is different to the default FreeBSD one
def parse_media_line(self, words, current_if, ips):
# not sure if this is useful - we also drop information
current_if['media'] = 'Unknown' # Mac does not give us this
current_if['media_select'] = words[1]
if len(words) > 2:
# MacOSX sets the media to '<unknown type>' for bridge interface
# and parsing splits this into two words; this if/else helps
if words[1] == '<unknown' and words[2] == 'type>':
current_if['media_select'] = 'Unknown'
current_if['media_type'] = 'unknown type'
else:
current_if['media_type'] = words[2][1:-1]
if len(words) > 3:
current_if['media_options'] = self.get_options(words[3])
class FreeBSDNetwork(GenericBsdIfconfigNetwork):
"""
This is the FreeBSD Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'FreeBSD'
class DragonFlyNetwork(GenericBsdIfconfigNetwork):
"""
This is the DragonFly Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'DragonFly'
class AIXNetwork(GenericBsdIfconfigNetwork):
"""
This is the AIX Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'AIX'
def get_default_interfaces(self, route_path):
netstat_path = self.module.get_bin_path('netstat')
rc, out, err = self.module.run_command([netstat_path, '-nr'])
interface = dict(v4 = {}, v6 = {})
lines = out.splitlines()
for line in lines:
words = line.split()
if len(words) > 1 and words[0] == 'default':
if '.' in words[1]:
interface['v4']['gateway'] = words[1]
interface['v4']['interface'] = words[5]
elif ':' in words[1]:
interface['v6']['gateway'] = words[1]
interface['v6']['interface'] = words[5]
return interface['v4'], interface['v6']
# AIX 'ifconfig -a' does not have three words in the interface line
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
uname_rc = None
uname_out = None
uname_err = None
uname_path = self.module.get_bin_path('uname')
if uname_path:
uname_rc, uname_out, uname_err = self.module.run_command([uname_path, '-W'])
rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options])
for line in out.splitlines():
if line:
words = line.split()
# only this condition differs from GenericBsdIfconfigNetwork
if re.match('^\w*\d*:', line):
current_if = self.parse_interface_line(words)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
# don't bother with wpars it does not work
# zero means not in wpar
if not uname_rc and uname_out.split()[0] == '0':
if current_if['macaddress'] == 'unknown' and re.match('^en', current_if['device']):
entstat_path = self.module.get_bin_path('entstat')
if entstat_path:
rc, out, err = self.module.run_command([entstat_path, current_if['device'] ])
if rc != 0:
break
for line in out.splitlines():
if not line:
pass
buff = re.match('^Hardware Address: (.*)', line)
if buff:
current_if['macaddress'] = buff.group(1)
buff = re.match('^Device Type:', line)
if buff and re.match('.*Ethernet', line):
current_if['type'] = 'ether'
# device must have mtu attribute in ODM
if 'mtu' not in current_if:
lsattr_path = self.module.get_bin_path('lsattr')
if lsattr_path:
rc, out, err = self.module.run_command([lsattr_path,'-El', current_if['device'] ])
if rc != 0:
break
for line in out.splitlines():
if line:
words = line.split()
if words[0] == 'mtu':
current_if['mtu'] = words[1]
return interfaces, ips
# AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here
def parse_interface_line(self, words):
device = words[0][0:-1]
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
current_if['flags'] = self.get_options(words[1])
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
class OpenBSDNetwork(GenericBsdIfconfigNetwork):
"""
This is the OpenBSD Network Class.
It uses the GenericBsdIfconfigNetwork.
"""
platform = 'OpenBSD'
# OpenBSD 'ifconfig -a' does not have information about aliases
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-aA'):
return super(OpenBSDNetwork, self).get_interfaces_info(ifconfig_path, ifconfig_options)
# Return macaddress instead of lladdr
def parse_lladdr_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
current_if['type'] = 'ether'
class NetBSDNetwork(GenericBsdIfconfigNetwork):
"""
This is the NetBSD Network Class.
It uses the GenericBsdIfconfigNetwork
"""
platform = 'NetBSD'
def parse_media_line(self, words, current_if, ips):
# example of line:
# $ ifconfig
# ne0: flags=8863<UP,BROADCAST,NOTRAILERS,RUNNING,SIMPLEX,MULTICAST> mtu 1500
# ec_capabilities=1<VLAN_MTU>
# ec_enabled=0
# address: 00:20:91:45:00:78
# media: Ethernet 10baseT full-duplex
# inet 192.168.156.29 netmask 0xffffff00 broadcast 192.168.156.255
current_if['media'] = words[1]
if len(words) > 2:
current_if['media_type'] = words[2]
if len(words) > 3:
current_if['media_options'] = words[3].split(',')
class SunOSNetwork(GenericBsdIfconfigNetwork):
"""
This is the SunOS Network Class.
It uses the GenericBsdIfconfigNetwork.
Solaris can have different FLAGS and MTU for IPv4 and IPv6 on the same interface
so these facts have been moved inside the 'ipv4' and 'ipv6' lists.
"""
platform = 'SunOS'
# Solaris 'ifconfig -a' will print interfaces twice, once for IPv4 and again for IPv6.
# MTU and FLAGS also may differ between IPv4 and IPv6 on the same interface.
# 'parse_interface_line()' checks for previously seen interfaces before defining
# 'current_if' so that IPv6 facts don't clobber IPv4 facts (or vice versa).
def get_interfaces_info(self, ifconfig_path):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
rc, out, err = self.module.run_command([ifconfig_path, '-a'])
for line in out.splitlines():
if line:
words = line.split()
if re.match('^\S', line) and len(words) > 3:
current_if = self.parse_interface_line(words, current_if, interfaces)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
# 'parse_interface_line' and 'parse_inet*_line' leave two dicts in the
# ipv4/ipv6 lists which is ugly and hard to read.
# This quick hack merges the dictionaries. Purely cosmetic.
for iface in interfaces:
for v in 'ipv4', 'ipv6':
combined_facts = {}
for facts in interfaces[iface][v]:
combined_facts.update(facts)
if len(combined_facts.keys()) > 0:
interfaces[iface][v] = [combined_facts]
return interfaces, ips
def parse_interface_line(self, words, current_if, interfaces):
device = words[0][0:-1]
if device not in interfaces:
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
else:
current_if = interfaces[device]
flags = self.get_options(words[1])
v = 'ipv4'
if 'IPv6' in flags:
v = 'ipv6'
if 'LOOPBACK' in flags:
current_if['type'] = 'loopback'
current_if[v].append({'flags': flags, 'mtu': words[3]})
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
# Solaris displays single digit octets in MAC addresses e.g. 0:1:2:d:e:f
# Add leading zero to each octet where needed.
def parse_ether_line(self, words, current_if, ips):
macaddress = ''
for octet in words[1].split(':'):
octet = ('0' + octet)[-2:None]
macaddress += (octet + ':')
current_if['macaddress'] = macaddress[0:-1]
class HurdPfinetNetwork(Network):
"""
This is a GNU Hurd specific subclass of Network. It use fsysopts to
get the ip address and support only pfinet.
"""
platform = 'GNU'
_socket_dir = '/servers/socket/'
def populate(self):
fsysopts_path = self.module.get_bin_path('fsysopts')
if fsysopts_path is None:
return self.facts
socket_path = None
for l in ('inet', 'inet6'):
link = os.path.join(self._socket_dir, l)
if os.path.exists(link):
socket_path = link
break
if socket_path:
rc, out, err = self.module.run_command([fsysopts_path, '-L', socket_path])
self.facts['interfaces'] = []
for i in out.split():
if '=' in i and i.startswith('--'):
k,v = i.split('=',1)
# remove '--'
k = k[2:]
if k == 'interface':
# remove /dev/ from /dev/eth0
v = v[5:]
self.facts['interfaces'].append(v)
self.facts[v] = {
'active': True,
'device': v,
'ipv4': {},
'ipv6': [],
}
current_if = v
elif k == 'address':
self.facts[current_if]['ipv4']['address'] = v
elif k == 'netmask':
self.facts[current_if]['ipv4']['netmask'] = v
elif k == 'address6':
address,prefix = v.split('/')
self.facts[current_if]['ipv6'].append({
'address': address,
'prefix': prefix,
})
return self.facts
class Virtual(Facts):
"""
This is a generic Virtual subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this,
you should define:
- virtualization_type
- virtualization_role
- container (e.g. solaris zones, freebsd jails, linux containers)
All subclasses MUST define platform.
"""
def __new__(cls, *arguments, **keyword):
# When Virtual is created, it chooses a subclass to create instead.
# This check prevents the subclass from then trying to find a subclass
# and create that.
if cls is not Virtual:
return super(Virtual, cls).__new__(cls)
subclass = cls
for sc in get_all_subclasses(Virtual):
if sc.platform == platform.system():
subclass = sc
if PY3:
return super(cls, subclass).__new__(subclass)
else:
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
self.facts['virtualization_type'] = ''
self.facts['virtualization_role'] = ''
class LinuxVirtual(Virtual):
"""
This is a Linux-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'Linux'
# For more information, check: http://people.redhat.com/~rjones/virt-what/
def get_virtual_facts(self):
# lxc/docker
if os.path.exists('/proc/1/cgroup'):
for line in get_file_lines('/proc/1/cgroup'):
if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line):
self.facts['virtualization_type'] = 'docker'
self.facts['virtualization_role'] = 'guest'
return
if re.search('/lxc/', line) or re.search('/machine.slice/machine-lxc', line):
self.facts['virtualization_type'] = 'lxc'
self.facts['virtualization_role'] = 'guest'
return
# lxc does not always appear in cgroups anymore but sets 'container=lxc' environment var, requires root privs
if os.path.exists('/proc/1/environ'):
for line in get_file_lines('/proc/1/environ'):
if re.search('container=lxc', line):
self.facts['virtualization_type'] = 'lxc'
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/vz'):
self.facts['virtualization_type'] = 'openvz'
if os.path.exists('/proc/bc'):
self.facts['virtualization_role'] = 'host'
else:
self.facts['virtualization_role'] = 'guest'
return
systemd_container = get_file_content('/run/systemd/container')
if systemd_container:
self.facts['virtualization_type'] = systemd_container
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists("/proc/xen"):
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
try:
for line in get_file_lines('/proc/xen/capabilities'):
if "control_d" in line:
self.facts['virtualization_role'] = 'host'
except IOError:
pass
return
product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name')
if product_name in ['KVM', 'Bochs']:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'RHEV Hypervisor':
self.facts['virtualization_type'] = 'RHEV'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'VMware Virtual Platform':
self.facts['virtualization_type'] = 'VMware'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'OpenStack Nova':
self.facts['virtualization_type'] = 'openstack'
self.facts['virtualization_role'] = 'guest'
return
bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
if bios_vendor == 'Xen':
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
return
if bios_vendor == 'innotek GmbH':
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
return
sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
# FIXME: This does also match hyperv
if sys_vendor == 'Microsoft Corporation':
self.facts['virtualization_type'] = 'VirtualPC'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'Parallels Software International Inc.':
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'QEMU':
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'oVirt':
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'OpenStack Foundation':
self.facts['virtualization_type'] = 'openstack'
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/self/status'):
for line in get_file_lines('/proc/self/status'):
if re.match('^VxID: \d+', line):
self.facts['virtualization_type'] = 'linux_vserver'
if re.match('^VxID: 0', line):
self.facts['virtualization_role'] = 'host'
else:
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/cpuinfo'):
for line in get_file_lines('/proc/cpuinfo'):
if re.match('^model name.*QEMU Virtual CPU', line):
self.facts['virtualization_type'] = 'kvm'
elif re.match('^vendor_id.*User Mode Linux', line):
self.facts['virtualization_type'] = 'uml'
elif re.match('^model name.*UML', line):
self.facts['virtualization_type'] = 'uml'
elif re.match('^vendor_id.*PowerVM Lx86', line):
self.facts['virtualization_type'] = 'powervm_lx86'
elif re.match('^vendor_id.*IBM/S390', line):
self.facts['virtualization_type'] = 'PR/SM'
lscpu = self.module.get_bin_path('lscpu')
if lscpu:
rc, out, err = self.module.run_command(["lscpu"])
if rc == 0:
for line in out.splitlines():
data = line.split(":", 1)
key = data[0].strip()
if key == 'Hypervisor':
self.facts['virtualization_type'] = data[1].strip()
else:
self.facts['virtualization_type'] = 'ibm_systemz'
else:
continue
if self.facts['virtualization_type'] == 'PR/SM':
self.facts['virtualization_role'] = 'LPAR'
else:
self.facts['virtualization_role'] = 'guest'
return
# Beware that we can have both kvm and virtualbox running on a single system
if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK):
modules = []
for line in get_file_lines("/proc/modules"):
data = line.split(" ", 1)
modules.append(data[0])
if 'kvm' in modules:
if os.path.isdir('/rhev/'):
# Check whether this is a RHEV hypervisor (is vdsm running ?)
for f in glob.glob('/proc/[0-9]*/comm'):
try:
if open(f).read().rstrip() == 'vdsm':
self.facts['virtualization_type'] = 'RHEV'
break
except:
pass
else:
self.facts['virtualization_type'] = 'kvm'
else:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'host'
return
if 'vboxdrv' in modules:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'host'
return
# If none of the above matches, return 'NA' for virtualization_type
# and virtualization_role. This allows for proper grouping.
self.facts['virtualization_type'] = 'NA'
self.facts['virtualization_role'] = 'NA'
return
class VirtualSysctlDetectionMixin(object):
def detect_sysctl(self):
self.sysctl_path = self.module.get_bin_path('sysctl')
def detect_virt_product(self, key):
self.detect_sysctl()
if self.sysctl_path:
rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key))
if rc == 0:
if re.match('(KVM|Bochs|SmartDC).*', out):
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
elif re.match('.*VMware.*', out):
self.facts['virtualization_type'] = 'VMware'
self.facts['virtualization_role'] = 'guest'
elif out.rstrip() == 'VirtualBox':
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
elif out.rstrip() == 'HVM domU':
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
elif out.rstrip() == 'Parallels':
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
elif out.rstrip() == 'RHEV Hypervisor':
self.facts['virtualization_type'] = 'RHEV'
self.facts['virtualization_role'] = 'guest'
def detect_virt_vendor(self, key):
self.detect_sysctl()
if self.sysctl_path:
rc, out, err = self.module.run_command("%s -n %s" % (self.sysctl_path, key))
if rc == 0:
if out.rstrip() == 'QEMU':
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
if out.rstrip() == 'OpenBSD':
self.facts['virtualization_type'] = 'vmm'
self.facts['virtualization_role'] = 'guest'
class FreeBSDVirtual(Virtual):
"""
This is a FreeBSD-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'FreeBSD'
def get_virtual_facts(self):
# Set empty values as default
self.facts['virtualization_type'] = ''
self.facts['virtualization_role'] = ''
if os.path.exists('/dev/xen/xenstore'):
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
class DragonFlyVirtual(FreeBSDVirtual):
platform = 'DragonFly'
class OpenBSDVirtual(Virtual, VirtualSysctlDetectionMixin):
"""
This is a OpenBSD-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'OpenBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def get_virtual_facts(self):
# Set empty values as default
self.facts['virtualization_type'] = ''
self.facts['virtualization_role'] = ''
self.detect_virt_product('hw.product')
if self.facts['virtualization_type'] == '':
self.detect_virt_vendor('hw.vendor')
# Check the dmesg if vmm(4) attached, indicating the host is
# capable of virtualization.
dmesg_boot = get_file_content(OpenBSDVirtual.DMESG_BOOT)
for line in dmesg_boot.splitlines():
match = re.match('^vmm0 at mainbus0: (SVM/RVI|VMX/EPT)$', line)
if match:
self.facts['virtualization_type'] = 'vmm'
self.facts['virtualization_role'] = 'host'
class NetBSDVirtual(Virtual, VirtualSysctlDetectionMixin):
platform = 'NetBSD'
def get_virtual_facts(self):
# Set empty values as default
self.facts['virtualization_type'] = ''
self.facts['virtualization_role'] = ''
self.detect_virt_product('machdep.dmi.system-product')
if self.facts['virtualization_type'] == '':
self.detect_virt_vendor('machdep.dmi.system-vendor')
if os.path.exists('/dev/xencons'):
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
class HPUXVirtual(Virtual):
"""
This is a HP-UX specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'HP-UX'
def get_virtual_facts(self):
if os.path.exists('/usr/sbin/vecheck'):
rc, out, err = self.module.run_command("/usr/sbin/vecheck")
if rc == 0:
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HP vPar'
if os.path.exists('/opt/hpvm/bin/hpvminfo'):
rc, out, err = self.module.run_command("/opt/hpvm/bin/hpvminfo")
if rc == 0 and re.match('.*Running.*HPVM vPar.*', out):
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HPVM vPar'
elif rc == 0 and re.match('.*Running.*HPVM guest.*', out):
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HPVM IVM'
elif rc == 0 and re.match('.*Running.*HPVM host.*', out):
self.facts['virtualization_type'] = 'host'
self.facts['virtualization_role'] = 'HPVM'
if os.path.exists('/usr/sbin/parstatus'):
rc, out, err = self.module.run_command("/usr/sbin/parstatus")
if rc == 0:
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HP nPar'
class SunOSVirtual(Virtual):
"""
This is a SunOS-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
- container
"""
platform = 'SunOS'
def get_virtual_facts(self):
# Check if it's a zone
zonename = self.module.get_bin_path('zonename')
if zonename:
rc, out, err = self.module.run_command(zonename)
if rc == 0 and out.rstrip() != "global":
self.facts['container'] = 'zone'
# Check if it's a branded zone (i.e. Solaris 8/9 zone)
if os.path.isdir('/.SUNWnative'):
self.facts['container'] = 'zone'
# If it's a zone check if we can detect if our global zone is itself virtualized.
# Relies on the "guest tools" (e.g. vmware tools) to be installed
if 'container' in self.facts and self.facts['container'] == 'zone':
modinfo = self.module.get_bin_path('modinfo')
if modinfo:
rc, out, err = self.module.run_command(modinfo)
if rc == 0:
for line in out.splitlines():
if 'VMware' in line:
self.facts['virtualization_type'] = 'vmware'
self.facts['virtualization_role'] = 'guest'
if 'VirtualBox' in line:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
if os.path.exists('/proc/vz'):
self.facts['virtualization_type'] = 'virtuozzo'
self.facts['virtualization_role'] = 'guest'
# Detect domaining on Sparc hardware
virtinfo = self.module.get_bin_path('virtinfo')
if virtinfo:
# The output of virtinfo is different whether we are on a machine with logical
# domains ('LDoms') on a T-series or domains ('Domains') on a M-series. Try LDoms first.
rc, out, err = self.module.run_command("/usr/sbin/virtinfo -p")
# The output contains multiple lines with different keys like this:
# DOMAINROLE|impl=LDoms|control=false|io=false|service=false|root=false
# The output may also be not formatted and the returncode is set to 0 regardless of the error condition:
# virtinfo can only be run from the global zone
if rc == 0:
try:
for line in out.splitlines():
fields = line.split('|')
if( fields[0] == 'DOMAINROLE' and fields[1] == 'impl=LDoms' ):
self.facts['virtualization_type'] = 'ldom'
self.facts['virtualization_role'] = 'guest'
hostfeatures = []
for field in fields[2:]:
arg = field.split('=')
if( arg[1] == 'true' ):
hostfeatures.append(arg[0])
if( len(hostfeatures) > 0 ):
self.facts['virtualization_role'] = 'host (' + ','.join(hostfeatures) + ')'
except ValueError:
pass
else:
smbios = self.module.get_bin_path('smbios')
if not smbios:
return
rc, out, err = self.module.run_command(smbios)
if rc == 0:
for line in out.splitlines():
if 'VMware' in line:
self.facts['virtualization_type'] = 'vmware'
self.facts['virtualization_role'] = 'guest'
elif 'Parallels' in line:
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
elif 'VirtualBox' in line:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
elif 'HVM domU' in line:
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
elif 'KVM' in line:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
class Ohai(Facts):
"""
This is a subclass of Facts for including information gathered from Ohai.
"""
def populate(self):
self.run_ohai()
return self.facts
def run_ohai(self):
ohai_path = self.module.get_bin_path('ohai')
if ohai_path is None:
return
rc, out, err = self.module.run_command(ohai_path)
try:
self.facts.update(json.loads(out))
except:
pass
class Facter(Facts):
"""
This is a subclass of Facts for including information gathered from Facter.
"""
def populate(self):
self.run_facter()
return self.facts
def run_facter(self):
facter_path = self.module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
cfacter_path = self.module.get_bin_path('cfacter', opt_dirs=['/opt/puppetlabs/bin'])
# Prefer to use cfacter if available
if cfacter_path is not None:
facter_path = cfacter_path
if facter_path is None:
return
# if facter is installed, and we can use --json because
# ruby-json is ALSO installed, include facter data in the JSON
rc, out, err = self.module.run_command(facter_path + " --puppet --json")
try:
self.facts = json.loads(out)
except:
pass
def get_file_content(path, default=None, strip=True):
data = default
if os.path.exists(path) and os.access(path, os.R_OK):
try:
try:
datafile = open(path)
data = datafile.read()
if strip:
data = data.strip()
if len(data) == 0:
data = default
finally:
datafile.close()
except:
# ignore errors as some jails/containers might have readable permissions but not allow reads to proc
# done in 2 blocks for 2.4 compat
pass
return data
def get_uname_version(module):
rc, out, err = module.run_command(['uname', '-v'])
if rc == 0:
return out
return None
def get_partition_uuid(partname):
try:
uuids = os.listdir("/dev/disk/by-uuid")
except OSError:
return
for uuid in uuids:
dev = os.path.realpath("/dev/disk/by-uuid/" + uuid)
if dev == ("/dev/" + partname):
return uuid
return None
def get_file_lines(path):
'''get list of lines from file'''
data = get_file_content(path)
if data:
ret = data.splitlines()
else:
ret = []
return ret
def ansible_facts(module, gather_subset):
facts = {}
facts['gather_subset'] = list(gather_subset)
facts.update(Facts(module).populate())
for subset in gather_subset:
facts.update(FACT_SUBSETS[subset](module,
load_on_init=False,
cached_facts=facts).populate())
return facts
def get_all_facts(module):
setup_options = dict(module_setup=True)
# Retrieve module parameters
gather_subset = module.params['gather_subset']
global GATHER_TIMEOUT
GATHER_TIMEOUT = module.params['gather_timeout']
# Retrieve all facts elements
additional_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
additional_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
raise TypeError("Bad subset '%s' given to Ansible. gather_subset options allowed: all, %s" % (subset, ", ".join(FACT_SUBSETS.keys())))
if exclude:
exclude_subsets.add(subset)
else:
additional_subsets.add(subset)
if not additional_subsets:
additional_subsets.update(VALID_SUBSETS)
additional_subsets.difference_update(exclude_subsets)
# facter and ohai are given a different prefix than other subsets
if 'facter' in additional_subsets:
additional_subsets.difference_update(('facter',))
facter_ds = FACT_SUBSETS['facter'](module, load_on_init=False).populate()
if facter_ds:
for (k, v) in facter_ds.items():
setup_options['facter_%s' % k.replace('-', '_')] = v
if 'ohai' in additional_subsets:
additional_subsets.difference_update(('ohai',))
ohai_ds = FACT_SUBSETS['ohai'](module, load_on_init=False).populate()
if ohai_ds:
for (k, v) in ohai_ds.items():
setup_options['ohai_%s' % k.replace('-', '_')] = v
facts = ansible_facts(module, additional_subsets)
for (k, v) in facts.items():
setup_options["ansible_%s" % k.replace('-', '_')] = v
setup_result = { 'ansible_facts': {} }
for (k,v) in setup_options.items():
if module.params['filter'] == '*' or fnmatch.fnmatch(k, module.params['filter']):
setup_result['ansible_facts'][k] = v
return setup_result
# Allowed fact subset for gather_subset options and what classes they use
# Note: have to define this at the bottom as it references classes defined earlier in this file
FACT_SUBSETS = dict(
hardware=Hardware,
network=Network,
virtual=Virtual,
ohai=Ohai,
facter=Facter,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
| mensler/ansible | lib/ansible/module_utils/facts.py | Python | gpl-3.0 | 171,136 | 0.004494 |
from django import template
from go.base.utils import get_router_view_definition
register = template.Library()
@register.simple_tag
def router_screen(router, view_name='show'):
view_def = get_router_view_definition(router.router_type, router)
return view_def.get_view_url(view_name, router_key=router.key)
| praekelt/vumi-go | go/router/templatetags/router_tags.py | Python | bsd-3-clause | 319 | 0 |
#!/usr/bin/env python
import camera
import resize
import ftpupload
import time
# wait 10s to not interfer with the timelaps script
time.sleep(10)
print("taking a picture")
imagePath = camera.capture()
print("captured %s" % imagePath)
smallImagePath = resize.resizeImg(imagePath)
print("resized image")
print("uploading....")
ftpupload.uploadFile(smallImagePath)
print("upload completed")
| robinwyss/raspberry-scripts | main.py | Python | mit | 390 | 0 |
"""audio driver subsystem"""
from os.path import exists
from os import environ
from subprocess import check_call
from functools import partial
from .drive import Driver
import click
DRIVE_QUEUE = 'a.drive'
CTL_PATH = '{}/.config/pianobar/ctl'.format(environ['HOME'])
COMMANDS = {'p', 'n', '^', '(', ')'}
def callback(ctl:'file_t', cmd:str) -> "IO ()":
"""writes command to ctl pipe"""
if cmd not in COMMANDS:
return
ctl.write(cmd)
ctl.flush()
@click.command()
@click.option('--ctl_path', default=CTL_PATH)
@click.option('--queue', default=DRIVE_QUEUE)
def main(ctl_path:str, queue:str) -> "IO ()":
"""daemon for a.drive queue consumption"""
if not exists(ctl_path):
with open('/dev/null', 'w') as null:
check_call(['pianoctl'], stdout=null)
with open(ctl_path, 'w') as ctl:
Driver(callback=partial(callback, ctl), queue=queue).drive()
| acm-uiuc/DJ-Roomba | dj_roomba/adrive.py | Python | gpl-2.0 | 906 | 0.005519 |
from myhdl import *
from UK101AddressDecode import UK101AddressDecode
def bench():
AL = Signal(intbv(0)[16:])
MonitorRom = Signal(bool(0))
ACIA = Signal(bool(0))
KeyBoardPort = Signal(bool(0))
VideoMem = Signal(bool(0))
BasicRom = Signal(bool(0))
Ram = Signal(bool(0))
dut = UK101AddressDecode(
AL,
MonitorRom,
ACIA,
KeyBoardPort,
VideoMem,
BasicRom,
Ram)
@instance
def stimulus():
for i in range(0, 2**16):
AL.next = i
yield delay(10)
raise StopSimulation()
return dut, stimulus
sim = Simulation(traceSignals(bench))
sim.run()
| jandecaluwe/myhdl-examples | crusty_UK101/UK101AddressDecode/bench.py | Python | mit | 691 | 0.013025 |
from django.db import models
from django.utils.html import format_html
from sorl.thumbnail import get_thumbnail
from sorl.thumbnail.fields import ImageField
from sno.models import Sno
class SnoGalleries(models.Model):
class Meta:
verbose_name = 'Фотография в галереи СНО'
verbose_name_plural = 'Фотографии в галереи СНО'
name = models.CharField('Название фото', max_length=255, blank=True, null=True)
photo = ImageField(verbose_name='Фото', max_length=255)
description = models.TextField('Описание', blank=True, null=True)
sno = models.ForeignKey(Sno, verbose_name='СНО', on_delete=models.CASCADE)
date_created = models.DateField('Дата', auto_now_add=True)
def photo_preview(self):
img = get_thumbnail(self.photo, '75x75', crop='center')
return format_html('<a href="{}" target="_blank"><img style="width:75px; height:75px;" src="{}"></a>',
self.photo.url, img.url)
photo_preview.short_description = 'Фото'
def __str__(self):
return '%s (%s)' % (self.name, self.sno.short_name)
| glad-web-developer/zab_sno | src/sno_galleries/models.py | Python | apache-2.0 | 1,164 | 0.00184 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to configure .deb packages.
(c) 2014, Brian Coca <briancoca+ansible@gmail.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: debconf
short_description: Configure a .deb package
description:
- Configure a .deb package using debconf-set-selections. Or just query
existing selections.
version_added: "1.6"
notes:
- This module requires the command line debconf tools.
- A number of questions have to be answered (depending on the package).
Use 'debconf-show <package>' on any Debian or derivative with the package
installed to see questions/settings available.
- Some distros will always record tasks involving the setting of passwords as changed. This is due to debconf-get-selections masking passwords.
requirements: [ debconf, debconf-utils ]
options:
name:
description:
- Name of package to configure.
required: true
default: null
aliases: ['pkg']
question:
description:
- A debconf configuration setting
required: false
default: null
aliases: ['setting', 'selection']
vtype:
description:
- The type of the value supplied.
- C(seen) was added in 2.2.
required: false
default: null
choices: [string, password, boolean, select, multiselect, note, error, title, text, seen]
value:
description:
- Value to set the configuration to
required: false
default: null
aliases: ['answer']
unseen:
description:
- Do not set 'seen' flag when pre-seeding
required: false
default: False
author: "Brian Coca (@bcoca)"
'''
EXAMPLES = '''
# Set default locale to fr_FR.UTF-8
- debconf:
name: locales
question: locales/default_environment_locale
value: fr_FR.UTF-8
vtype: select
# set to generate locales:
- debconf:
name: locales
question: locales/locales_to_be_generated
value: en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8
vtype: multiselect
# Accept oracle license
- debconf:
name: oracle-java7-installer
question: shared/accepted-oracle-license-v1-1
value: true
vtype: select
# Specifying package you can register/return the list of questions and current values
- debconf:
name: tzdata
'''
def get_selections(module, pkg):
cmd = [module.get_bin_path('debconf-show', True), pkg]
rc, out, err = module.run_command(' '.join(cmd))
if rc != 0:
module.fail_json(msg=err)
selections = {}
for line in out.splitlines():
(key, value) = line.split(':', 1)
selections[ key.strip('*').strip() ] = value.strip()
return selections
def set_selection(module, pkg, question, vtype, value, unseen):
setsel = module.get_bin_path('debconf-set-selections', True)
cmd = [setsel]
if unseen:
cmd.append('-u')
if vtype == 'boolean':
if value == 'True':
value = 'true'
elif value == 'False':
value = 'false'
data = ' '.join([pkg, question, vtype, value])
return module.run_command(cmd, data=data)
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, aliases=['pkg'], type='str'),
question = dict(required=False, aliases=['setting', 'selection'], type='str'),
vtype = dict(required=False, type='str', choices=['string', 'password', 'boolean', 'select', 'multiselect', 'note', 'error', 'title', 'text', 'seen']),
value = dict(required=False, type='str', aliases=['answer']),
unseen = dict(required=False, type='bool'),
),
required_together = ( ['question','vtype', 'value'],),
supports_check_mode=True,
)
#TODO: enable passing array of options and/or debconf file from get-selections dump
pkg = module.params["name"]
question = module.params["question"]
vtype = module.params["vtype"]
value = module.params["value"]
unseen = module.params["unseen"]
prev = get_selections(module, pkg)
changed = False
msg = ""
if question is not None:
if vtype is None or value is None:
module.fail_json(msg="when supplying a question you must supply a valid vtype and value")
if not question in prev or prev[question] != value:
changed = True
if changed:
if not module.check_mode:
rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen)
if rc:
module.fail_json(msg=e)
curr = { question: value }
if question in prev:
prev = {question: prev[question]}
else:
prev[question] = ''
if module._diff:
after = prev.copy()
after.update(curr)
diff_dict = {'before': prev, 'after': after}
else:
diff_dict = {}
module.exit_json(changed=changed, msg=msg, current=curr, previous=prev, diff=diff_dict)
module.exit_json(changed=changed, msg=msg, current=prev)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| Inspq/ansible | lib/ansible/modules/system/debconf.py | Python | gpl-3.0 | 5,880 | 0.006633 |
# -*- coding: utf-8 -*-
from tespy.networks import network
from tespy.components import sink, source, solar_collector
from tespy.connections import connection
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
# %% network
fluid_list = ['H2O']
nw = network(fluids=fluid_list, p_unit='bar', T_unit='C')
# %% components
# sinks & sources
back = source('to collector')
feed = sink('from collector')
# collector
coll = solar_collector(label='solar thermal collector')
# %% connections
b_c = connection(back, 'out1', coll, 'in1')
c_f = connection(coll, 'out1', feed, 'in1')
nw.add_conns(b_c, c_f)
# %% component parameters
# set pressure ratio and heat flow, as well as dimensional parameters of
# the collector. E is missing, thus energy balance for radiation is not
# performed at this point
coll.set_attr(pr=0.99, Q=8e3)
# %% connection parameters
b_c.set_attr(p=5, T=35, fluid={'H2O': 1})
c_f.set_attr(p0=2, T=120)
# %% solving
# going through several parametrisation possibilities
print('###############')
print('simulation 1')
mode = 'design'
nw.solve(mode=mode)
nw.print_results()
# set absorption instead of outlet temperature
coll.set_attr(E=9e2, eta_opt=0.9, lkf_lin=1, lkf_quad=0.005, A=10, Tamb=10)
c_f.set_attr(T=np.nan)
print('###############')
print('simulation 2')
nw.solve(mode=mode)
nw.print_results()
# set outlet temperature and mass flow instead of heat flow and radiation
coll.set_attr(Q=np.nan, E=np.nan)
c_f.set_attr(T=100, m=1e-1)
print('###############')
print('design simulation')
nw.solve(mode=mode)
nw.print_results()
nw.save('design')
# looping over different ambient temperatures and levels of absorption
# (of the inclined surface) assuming constant mass flow
# set print_level to none
mode = 'offdesign'
nw.set_attr(iterinfo=False)
c_f.set_attr(T=np.nan)
gridnum = 10
T_amb = np.linspace(-10, 30, gridnum, dtype=float)
E_glob = np.linspace(100, 1000, gridnum, dtype=float)
df = pd.DataFrame(columns=T_amb)
for E in E_glob:
eta = []
coll.set_attr(E=E)
for T in T_amb:
coll.set_attr(Tamb=T)
nw.solve(mode=mode, design_path='design')
eta += [coll.Q.val / (coll.E.val * coll.A.val)]
# cut out efficiencies smaller than zero
if eta[-1] < 0:
eta[-1] = np.nan
df.loc[E] = eta
print('###############')
print('offdesign performance map')
E, T = np.meshgrid(T_amb, E_glob)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(E, T, df.values)
# temperature difference -> mean collector temperature to ambient temperature
ax.set_xlabel('ambient temperature t_a in °C')
# absorption on the inclined surface
ax.set_ylabel('absorption E in $\mathrm{\\frac{W}{m^2}}$')
# thermal efficiency (no optical losses)
ax.set_zlabel('efficiency $\eta$')
plt.show()
| oemof/examples | oemof_examples/tespy/solar_collector/solar_collector.py | Python | gpl-3.0 | 2,858 | 0.0007 |
from __future__ import unicode_literals
from django.contrib.auth.models import Permission, User
from django.utils import six
from djblets.avatars.services.gravatar import GravatarService
from djblets.testing.decorators import add_fixtures
from djblets.webapi.testing.decorators import webapi_test_template
from kgb import SpyAgency
from reviewboard.accounts.backends import (AuthBackend,
get_enabled_auth_backends)
from reviewboard.accounts.models import Profile
from reviewboard.avatars import avatar_services
from reviewboard.avatars.testcase import AvatarServicesTestMixin
from reviewboard.site.models import LocalSite
from reviewboard.webapi.resources import resources
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import (user_item_mimetype,
user_list_mimetype)
from reviewboard.webapi.tests.mixins import BasicTestsMetaclass
from reviewboard.webapi.tests.urls import (get_user_item_url,
get_user_list_url)
@six.add_metaclass(BasicTestsMetaclass)
class ResourceListTests(SpyAgency, BaseWebAPITestCase):
"""Testing the UserResource list API tests."""
fixtures = ['test_users']
sample_api_url = 'users/'
resource = resources.user
test_http_methods = ('GET',)
def setup_http_not_allowed_list_test(self, user):
return get_user_list_url()
def compare_item(self, item_rsp, obj):
self.assertEqual(item_rsp['id'], obj.pk)
self.assertEqual(item_rsp['username'], obj.username)
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name,
populate_items):
if not populate_items:
items = []
elif with_local_site:
local_site = self.get_local_site(name=local_site_name)
items = list(local_site.users.all())
else:
items = list(User.objects.all())
return (get_user_list_url(local_site_name),
user_list_mimetype,
items)
@webapi_test_template
def test_get_filter_inactive(self):
"""Testing the GET <URL> API filters out inactive users by default"""
dopey = User.objects.get(username='dopey')
dopey.is_active = False
dopey.save()
rsp = self.api_get(get_user_list_url(),
expected_mimetype=user_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
user_pks = [user['id'] for user in rsp['users']]
returned_users = set(User.objects.filter(pk__in=user_pks))
expected_users = set(User.objects.filter(is_active=True))
self.assertEqual(returned_users, expected_users)
@webapi_test_template
def test_get_include_inactive(self):
"""Testing the GET <URL>/?include-inactive=1 API includes inactive
users
"""
dopey = User.objects.get(username='dopey')
dopey.is_active = False
dopey.save()
rsp = self.api_get(get_user_list_url(), {'include-inactive': '1'},
expected_mimetype=user_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
user_pks = [user['id'] for user in rsp['users']]
self.assertEqual(set(User.objects.filter(pk__in=user_pks)),
set(User.objects.all()))
@webapi_test_template
def test_get_include_inactive_true(self):
"""Testing the GET <URL>/?include-inactive=true API includes inactive
users
"""
dopey = User.objects.get(username='dopey')
dopey.is_active = False
dopey.save()
rsp = self.api_get(get_user_list_url(), {'include-inactive': 'true'},
expected_mimetype=user_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
user_pks = [user['id'] for user in rsp['users']]
self.assertEqual(set(User.objects.filter(pk__in=user_pks)),
set(User.objects.all()))
def test_get_with_q(self):
"""Testing the GET users/?q= API"""
rsp = self.api_get(get_user_list_url(), {'q': 'gru'},
expected_mimetype=user_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(len(rsp['users']), 1) # grumpy
def test_query_users_auth_backend(self):
"""Testing the GET users/?q= API
with AuthBackend.query_users failure
"""
class SandboxAuthBackend(AuthBackend):
backend_id = 'test-id'
name = 'test'
def query_users(self, query, request):
raise Exception
backend = SandboxAuthBackend()
self.spy_on(get_enabled_auth_backends, call_fake=lambda: [backend])
self.spy_on(backend.query_users)
rsp = self.api_get(get_user_list_url(), {'q': 'gru'},
expected_mimetype=user_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertTrue(backend.query_users.called)
def test_search_users_auth_backend(self):
"""Testing the GET users/?q= API
with AuthBackend.search_users failure
"""
class SandboxAuthBackend(AuthBackend):
backend_id = 'test-id'
name = 'test'
def search_users(self, query, request):
raise Exception
backend = SandboxAuthBackend()
self.spy_on(get_enabled_auth_backends, call_fake=lambda: [backend])
self.spy_on(backend.search_users)
rsp = self.api_get(get_user_list_url(), {'q': 'gru'},
expected_mimetype=user_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertTrue(backend.search_users.called)
#
# HTTP POST tests
#
@webapi_test_template
def test_post_anonymous(self):
"""Testing the POST <URL> API as an anonymous user"""
self.client.logout()
rsp = self.api_post(
get_user_list_url(),
{
'username': 'username',
'password': 'password',
'email': 'email@example.com',
},
expected_status=401)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'fail')
self.assertIn('err', rsp)
self.assertIn('code', rsp['err'])
self.assertEqual(rsp['err']['code'], 103)
@webapi_test_template
def test_post(self):
"""Testing the POST <URL> API as a regular user"""
rsp = self.api_post(
get_user_list_url(),
{
'username': 'username',
'password': 'password',
'email': 'email@example.com'
},
expected_status=403)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'fail')
self.assertIn('err', rsp)
self.assertIn('code', rsp['err'])
self.assertEqual(rsp['err']['code'], 101)
@webapi_test_template
def test_post_superuser(self):
"""Testing the POST <URL> API as a superuser"""
self.client.login(username='admin', password='admin')
rsp = self.api_post(
get_user_list_url(),
{
'username': 'username',
'password': 'password',
'email': 'email@example.com',
},
expected_mimetype=user_item_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
self.compare_item(rsp['user'], User.objects.get(username='username'))
@webapi_test_template
def test_post_auth_add_user_perm(self):
"""Testing the POST <URL> API as a user with the auth.add_user
permission
"""
self.user.user_permissions.add(
Permission.objects.get(content_type__app_label='auth',
codename='add_user'))
rsp = self.api_post(
get_user_list_url(),
{
'username': 'username',
'password': 'password',
'email': 'email@example.com',
},
expected_mimetype=user_item_mimetype)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'ok')
self.compare_item(rsp['user'], User.objects.get(username='username'))
@webapi_test_template
def test_post_local_site(self):
"""Testing the POST <URL> API with a local site"""
local_site = LocalSite.objects.create(name='test', public=True)
self.client.login(username='admin', password='admin')
rsp = self.api_post(
get_user_list_url(local_site.name),
{
'username': 'username',
'password': 'password',
'email': 'email@example.com'
},
expected_status=403)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'fail')
self.assertIn('err', rsp)
self.assertIn('code', rsp['err'])
self.assertEqual(rsp['err']['code'], 101)
@webapi_test_template
def test_post_duplicate_username(self):
"""Testing the POST <URL> API for a username that already exists"""
self.client.login(username='admin', password='admin')
rsp = self.api_post(
get_user_list_url(),
{
'username': 'doc',
'password': 'password',
'email': 'doc@example.com'
},
expected_status=400)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'fail')
self.assertIn('fields', rsp)
self.assertIn('username', rsp['fields'])
@webapi_test_template
def test_post_invalid_email(self):
"""Testing the POST <URL> API for an invalid e-mail address"""
self.client.login(username='admin', password='admin')
rsp = self.api_post(
get_user_list_url(),
{
'username': 'username',
'password': 'password',
'email': 'invalid e-mail',
},
expected_status=400)
self.assertIn('stat', rsp)
self.assertEqual(rsp['stat'], 'fail')
self.assertIn('fields', rsp)
self.assertIn('email', rsp['fields'])
@six.add_metaclass(BasicTestsMetaclass)
class ResourceItemTests(AvatarServicesTestMixin, BaseWebAPITestCase):
"""Testing the UserResource item API tests."""
fixtures = ['test_users']
sample_api_url = 'users/<username>/'
resource = resources.user
def setUp(self):
super(ResourceItemTests, self).setUp()
avatar_services.enable_service(GravatarService, save=False)
def setup_http_not_allowed_item_test(self, user):
return get_user_item_url(user.username)
def compare_item(self, item_rsp, user):
self.assertEqual(item_rsp['id'], user.pk)
self.assertEqual(item_rsp['username'], user.username)
self.assertEqual(item_rsp['first_name'], user.first_name)
self.assertEqual(item_rsp['last_name'], user.last_name)
self.assertEqual(item_rsp['email'], user.email)
# There's no simple way to test the specific URLs that are returned,
# but we can at least make sure everything we expect to be present is
# present.
self.assertIn('avatar_url', item_rsp)
self.assertIn('1x', item_rsp['avatar_urls'])
self.assertIn('2x', item_rsp['avatar_urls'])
#
# HTTP GET tests
#
def setup_basic_get_test(self, user, with_local_site, local_site_name):
return (get_user_item_url(user.username, local_site_name),
user_item_mimetype,
user)
def test_get_not_modified(self):
"""Testing the GET users/<username>/ API with Not Modified response"""
self._testHttpCaching(get_user_item_url('doc'),
check_etags=True)
@add_fixtures(['test_site'])
def test_get_with_site_and_profile_private(self):
"""Testing the GET users/<username>/ API
with a local site and private profile
"""
self._login_user(local_site=True)
username = 'admin'
user = User.objects.get(username=username)
profile, is_new = Profile.objects.get_or_create(user=user)
profile.is_private = True
profile.save()
rsp = self.api_get(get_user_item_url(username, self.local_site_name),
expected_mimetype=user_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['user']['username'], user.username)
self.assertNotIn('first_name', rsp['user'])
self.assertNotIn('last_name', rsp['user'])
self.assertNotIn('email', rsp['user'])
@add_fixtures(['test_site'])
def test_get_missing_user_with_site(self):
"""Testing the GET users/<username>/ API with a local site"""
self._login_user(local_site=True)
self.api_get(get_user_item_url('dopey', self.local_site_name),
expected_status=404)
@webapi_test_template
def test_get_with_profile_private_and_only_fields(self):
"""Testing the GET <URL> API with a private profile and ?only-fields=
"""
username = 'dopey'
user = User.objects.get(username=username)
profile, is_new = Profile.objects.get_or_create(user=user)
profile.is_private = True
profile.save()
rsp = self.api_get(
'%s?only-fields=username' % get_user_item_url(username),
expected_mimetype=user_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['user']['username'], user.username)
self.assertNotIn('first_name', rsp['user'])
self.assertNotIn('last_name', rsp['user'])
self.assertNotIn('email', rsp['user'])
@webapi_test_template
def test_get_inactive_user(self):
"""Testing the GET <URL> API for an inactive user"""
dopey = User.objects.get(username='dopey')
dopey.is_active = False
dopey.save()
rsp = self.api_get(get_user_item_url('dopey'),
expected_mimetype=user_item_mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertEqual(rsp['user']['is_active'], False)
| brennie/reviewboard | reviewboard/webapi/tests/test_user.py | Python | mit | 14,393 | 0 |
import logging
import colander
from grano.core import db, celery
from grano.model import Entity
from grano.logic import properties as properties_logic
from grano.logic.references import ProjectRef, AccountRef
from grano.logic.references import SchemaRef, EntityRef
from grano.plugins import notify_plugins
log = logging.getLogger(__name__)
class EntityBaseValidator(colander.MappingSchema):
author = colander.SchemaNode(AccountRef())
project = colander.SchemaNode(ProjectRef())
class MergeValidator(colander.MappingSchema):
orig = colander.SchemaNode(EntityRef())
dest = colander.SchemaNode(EntityRef())
def validate(data, entity):
""" Due to some fairly weird interdependencies between the different
elements of the model, validation of entities has to happen in three
steps. """
validator = EntityBaseValidator()
sane = validator.deserialize(data)
project = sane.get('project')
schema_validator = colander.SchemaNode(colander.Mapping())
schema_validator.add(colander.SchemaNode(SchemaRef(project),
name='schema'))
sane.update(schema_validator.deserialize(data))
sane['properties'] = properties_logic.validate('entity', entity,
project, sane.get('schema'),
data.get('properties', []))
return sane
@celery.task
def _entity_changed(entity_id, operation):
""" Notify plugins about changes to an entity. """
def _handle(obj):
obj.entity_changed(entity_id, operation)
notify_plugins('grano.entity.change', _handle)
def save(data, files=None, entity=None):
""" Save or update an entity. """
data = validate(data, entity)
operation = 'create' if entity is None else 'update'
if entity is None:
entity = Entity()
entity.project = data.get('project')
entity.author = data.get('author')
db.session.add(entity)
entity.schema = data.get('schema')
prop_names = set()
for name, prop in data.get('properties').items():
prop_names.add(name)
prop['project'] = entity.project
prop['name'] = name
prop['author'] = data.get('author')
properties_logic.save(entity, prop, files=files)
for prop in entity.properties:
if prop.name not in prop_names:
prop.active = False
db.session.flush()
_entity_changed.delay(entity.id, operation)
return entity
def delete(entity):
""" Delete the entity and its properties, as well as any associated
relations. """
db.session.delete(entity)
_entity_changed.delay(entity.id, 'delete')
def merge(source, dest):
""" Copy all properties and relations from one entity onto another, then
mark the source entity as an ID alias for the destionation entity. """
if source.id == dest.id:
return source
if dest.same_as == source.id:
return source
if source.same_as == dest.id:
return dest
if dest.same_as is not None:
# potential infinite recursion here.
canonical = Entity.by_id(dest.same_as)
if canonical is not None:
return merge(source, canonical)
if dest.schema.is_parent(source.schema):
dest.schema = source.schema
dest_valid = [a.name for a in dest.schema.attributes]
dest_active = [p.name for p in dest.active_properties]
for prop in source.properties:
prop.entity = dest
if prop.name in dest_active:
prop.active = False
if prop.name not in dest_valid:
properties_logic.delete(prop)
for rel in source.inbound:
rel.target = dest
db.session.add(rel)
for rel in source.outbound:
rel.source = dest
db.session.add(rel)
source.same_as = dest.id
db.session.flush()
_entity_changed.delay(dest.id, 'update')
_entity_changed.delay(source.id, 'delete')
return dest
def apply_alias(project, author, canonical_name, alias_name, source_url=None):
""" Given two names, find out if there are existing entities for one or
both of them. If so, merge them into a single entity - or, if only the
entity associated with the alias exists - re-name the entity. """
# Don't import meaningless aliases.
if not len(canonical_name) or not len(alias_name):
return log.info("Not an alias: %s", canonical_name)
canonical = None
# de-duplicate existing entities with the same name.
known_names = set()
for existing in Entity.by_name_many(project, canonical_name):
for prop in existing.properties:
if prop.name != 'name':
continue
known_names.add(prop.value)
# make sure the canonical name is actually active
if prop.value == canonical_name:
prop.active = True
else:
prop.active = False
if canonical is not None and canonical.id != existing.id:
canonical = merge(existing, canonical)
else:
canonical = existing
# Find aliases, i.e. entities with the alias name which are not
# the canonical entity.
q = Entity.by_name_many(project, alias_name)
if canonical is not None:
q = q.filter(Entity.id != canonical.id)
aliases = q.all()
# If there are no existing aliases with that name, add the alias
# name to the canonical entity.
if not len(aliases) and canonical is not None:
if alias_name not in known_names:
data = {
'value': alias_name,
'active': False,
'name': 'name',
'source_url': source_url
}
properties_logic.save(canonical, data)
_entity_changed.delay(canonical.id, 'update')
log.info("Alias: %s -> %s", alias_name, canonical_name)
for alias in aliases:
if canonical is None:
# Rename an alias to its new, canonical name.
data = {
'value': canonical_name,
'active': True,
'name': 'name',
'source_url': source_url
}
properties_logic.save(alias, data)
_entity_changed.delay(alias.id, 'update')
log.info("Renamed: %s -> %s", alias_name, canonical_name)
else:
# Merge two existing entities, declare one as "same_as"
merge(alias, canonical)
log.info("Mapped: %s -> %s", alias.id, canonical.id)
db.session.commit()
| CodeForAfrica/grano | grano/logic/entities.py | Python | mit | 6,574 | 0 |
import os
from datetime import date
TODAY_DATE = date.today().strftime("%D")
LANDMAPPER_DIR = os.path.dirname(os.path.abspath(__file__))
###########################################
## Keys ###
###########################################
MAPBOX_TOKEN = 'set_in_landmapper_local_settings'
###########################################
## Map Scales ###
###########################################
# Closest: 'fit' -- fits the property as close as possible
# Moderate: 'medium' -- approximately zoom level 12 unless the property is too big
# Regional Context: 'context' -- appx zoom 14 unless the property is larger
PROPERTY_OVERVIEW_SCALE = 'fit'
STREET_SCALE = 'context'
TOPO_SCALE = 'medium'
CONTOUR_SCALE = TOPO_SCALE
AERIAL_SCALE = PROPERTY_OVERVIEW_SCALE
TAXLOTS_SCALE = AERIAL_SCALE
SOIL_SCALE = AERIAL_SCALE
FOREST_TYPES_SCALE = AERIAL_SCALE
STREAM_SCALE = AERIAL_SCALE
STUDY_REGION = {
'north': 46.292035,
'south': 41.991794,
'east': -116.463504,
'west': -124.566244,
'context': [
', OR',
', Oregon USA',
# ', WA',
]
}
###########################################
## Basemaps ###
###########################################
BASEMAPS = {
'USGS_Aerial': {
'URL': 'https://basemap.nationalmap.gov/arcgis/rest/services/USGSImageryOnly/MapServer/export',
'LAYERS': '0',
'TECHNOLOGY': 'arcgis_mapserver',
'ATTRIBUTION': {'source': 'USGS', 'attribution': 'USGS The National Map: Orthoimagery. Data refreshed October, 2020.'}
# Can get updated attribution at https://basemap.nationalmap.gov/arcgis/rest/services/USGSImageryOnly/MapServer?f=pjson ['copyrightText']
},
'ESRI_Satellite': {
'URL': 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/export',
'LAYERS': '0',
'TECHNOLOGY': 'arcgis_mapserver',
'ATTRIBUTION': {'source': 'ESRI', 'attribution': 'Source: Esri, DigitalGlobe, GeoEye, Earthstar Geographics, CNES/Airbus DS, USDA, USGS, AeroGRID, IGN, and the GIS User Community'}
},
'ESRI_Topo': {
'URL': 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Topo_Map/MapServer/export',
'LAYERS': '0',
'TECHNOLOGY': 'arcgis_mapserver',
'ATTRIBUTION': {'source': 'ESRI', 'attribution': 'Sources: Esri, HERE, Garmin, Intermap, increment P Corp., GEBCO, USGS, FAO, NPS, NRCAN, GeoBase, IGN, Kadaster NL, Ordnance Survey, Esri Japan, METI, Esri China (Hong Kong), (c) OpenStreetMap contributors, and the GIS User Community'}
},
'ESRI_Street': {
'URL': 'https://server.arcgisonline.com/arcgis/rest/services/World_Street_Map/MapServer/export',
'LAYERS': '0',
'TECHNOLOGY': 'arcgis_mapserver',
'ATTRIBUTION': 'Sources: Esri, HERE, Garmin, USGS, Intermap, INCREMENT P, NRCan, Esri Japan, METI, Esri China (Hong Kong), Esri Korea, Esri (Thailand), NGCC, (c) OpenStreetMap contributors, and the GIS User Community'
},
'ESRI_NatGeo': {
'URL': 'https://server.arcgisonline.com/arcgis/rest/services/NatGeo_World_Map/MapServer/export',
'LAYERS': '0',
'TECHNOLOGY': 'arcgis_mapserver',
'ATTRIBUTION': 'Tiles © Esri — National Geographic, Esri, DeLorme, NAVTEQ, UNEP-WCMC, USGS, NASA, ESA, METI, NRCAN, GEBCO, NOAA, iPC'
},
'TNM_Aerial': {
'URL': 'https://services.nationalmap.gov/arcgis/rest/services/USGSNAIPPlus/MapServer/export',
'LAYERS': '8',
'TECHNOLOGY': 'arcgis_mapserver',
'ATTRIBUTION': 'USGS The National Map: Orthoimagery'
},
'TNM_NAIP': {
'URL': 'https://services.nationalmap.gov/arcgis/rest/services/USGSNAIPImagery/ImageServer/exportImage',
'TECHNOLOGY': 'arcgis_imageserver',
'ATTRIBUTION': {'source': 'USGS', 'attribution': 'USGS The National Map: Imagery'}
},
'Custom_Topo': {
'URL': 'https://api.mapbox.com/styles/v1/{userid}/cke0j10sj1gta19o9agb1w8pq/tiles/256/{zoom}/{lon}/{lat}@2x?',
'TECHNOLOGY': 'mapbox',
'ATTRIBUTION': 'Sources: MapBox',
'PARAMS': {
'userid':'forestplanner',
'layerid': 'cke0j10sj1gta19o9agb1w8pq',
'lon': '',
'lat': '',
'zoom': '',
},
'QS': [
'access_token=%s' % MAPBOX_TOKEN,
],
# calculate tile assuming 256 px
'TILE_HEIGHT': 256,
'TILE_WIDTH': 256,
# retrieve image at 2x resolution
'TILE_IMAGE_HEIGHT': 512,
'TILE_IMAGE_WIDTH': 512,
'ZOOM_2X': False
},
'OSM': {
# 'URL': 'https://tile.openstreetmap.org/{zoom}/{lon}/{lat}.png',
'URL': 'https://maps.geoapify.com/v1/staticmap',
'TECHNOLOGY': 'static',
# 'ATTRIBUTION': 'Sources: MapBox',
'ATTRIBUTION': 'Powered by <a href="https://www.geoapify.com/">Geoapify</a>; © OpenStreetMap contributors',
'PARAMS': {},
'QS': [
# 'style=osm-bright-smooth',
'style=osm-carto',
'width={width}',
'height={height}',
'center=lonlat%3A{lon}%2C{lat}',
'zoom={zoom}', # float
'apiKey={apiKey}',
],
# calculate tile assuming 256 px
'TILE_HEIGHT': 256,
'TILE_WIDTH': 256,
# retrieve image at 2x resolution
'TILE_IMAGE_HEIGHT': 256,
'TILE_IMAGE_WIDTH': 256,
'ZOOM_2X': False
},
'OSM_Mapnik': {
'URL': 'https://a.tile.openstreetmap.org/{zoom}/{lon}/{lat}.png',
'TECHNOLOGY': 'XYZ',
'ATTRIBUTION': '© <a href="https://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors',
'PARAMS': {
},
'QS': [
# 'access_token=%s' % MAPBOX_TOKEN,
],
# calculate tile assuming 256 px
'TILE_HEIGHT': 256,
'TILE_WIDTH': 256,
# retrieve image at 2x resolution
'TILE_IMAGE_HEIGHT': 256,
'TILE_IMAGE_WIDTH': 256,
'ZOOM_2X': True
},
# https://tiles.wmflabs.org/hikebike/{z}/{x}/{y}.png
'Hike_Bike': {
'URL': 'https://tiles.wmflabs.org/hikebike/{zoom}/{lon}/{lat}.png',
'TECHNOLOGY': 'XYZ',
'ATTRIBUTION': '© <a href="https://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors',
'PARAMS': {
},
'QS': [
# 'access_token=%s' % MAPBOX_TOKEN,
],
# calculate tile assuming 256 px
'TILE_HEIGHT': 256,
'TILE_WIDTH': 256,
# retrieve image at 2x resolution
'TILE_IMAGE_HEIGHT': 256,
'TILE_IMAGE_WIDTH': 256,
'ZOOM_2X': False
},
'USGS_TopoImage': {
'URL': 'https://basemap.nationalmap.gov/arcgis/rest/services/USGSImageryTopo/MapServer/tile/{zoom}/{lat}/{lon}',
'TECHNOLOGY': 'XYZ',
'ATTRIBUTION': 'Tiles courtesy of the <a href="https://usgs.gov/">U.S. Geological Survey</a>',
'PARAMS': {
},
'QS': [
# 'access_token=%s' % MAPBOX_TOKEN,
],
# calculate tile assuming 256 px
'TILE_HEIGHT': 256,
'TILE_WIDTH': 256,
# retrieve image at 2x resolution
'TILE_IMAGE_HEIGHT': 256,
'TILE_IMAGE_WIDTH': 256,
'ZOOM_2X': False
},
'MAPBOX_Outdoor': {
'URL': 'https://api.mapbox.com/styles/v1/mapbox/outdoors-v11/tiles/256/{zoom}/{lon}/{lat}@2x?',
'TECHNOLOGY': 'mapbox',
'ATTRIBUTION': 'Sources: MapBox',
'PARAMS': {
# 'userid':'',
# 'layerid': '',
'lon': '',
'lat': '',
'zoom': '',
},
'QS': [
'access_token=%s' % MAPBOX_TOKEN,
],
# calculate tile assuming 256 px
'TILE_HEIGHT': 256,
'TILE_WIDTH': 256,
# retrieve image at 2x resolution
'TILE_IMAGE_HEIGHT': 512,
'TILE_IMAGE_WIDTH': 512,
'ZOOM_2X': False
},
'MAPBOX_Streets': {
'URL': 'https://api.mapbox.com/styles/v1/mapbox/streets-v11/tiles/256/{zoom}/{lon}/{lat}@2x?',
'TECHNOLOGY': 'mapbox',
'ATTRIBUTION': {'source': 'MapBox', 'attribution': None},
'PARAMS': {
# 'userid':'',
# 'layerid': '',
'lon': '',
'lat': '',
'zoom': '',
},
'QS': [
'access_token=%s' % MAPBOX_TOKEN,
],
# calculate tile assuming 256 px
'TILE_HEIGHT': 256,
'TILE_WIDTH': 256,
# retrieve image at 2x resolution
'TILE_IMAGE_HEIGHT': 512,
'TILE_IMAGE_WIDTH': 512,
'ZOOM_2X': False
},
'ESRI_NAIP': {
'URL': 'https://naip.maptiles.arcgis.com/arcgis/rest/services/NAIP/MapServer/tile/{zoom}/{lat}/{lon}',
'TECHNOLOGY': 'XYZ',
'ATTRIBUTION': 'Tiles courtesy of ESRI>',
'PARAMS': {
},
'QS': [
# 'access_token=%s' % MAPBOX_TOKEN,
],
# calculate tile assuming 256 px
'TILE_HEIGHT': 256,
'TILE_WIDTH': 256,
# retrieve image at 2x resolution
'TILE_IMAGE_HEIGHT': 256,
'TILE_IMAGE_WIDTH': 256,
'ZOOM_2X': False
},
}
AERIAL_DEFAULT = 'ESRI_Satellite'
AERIAL_UPDATED = 'ESRI_Satellite'
TOPO_DEFAULT = 'ESRI_Topo'
STREET_DEFAULT = 'MAPBOX_Streets'
# STREET_DEFAULT = 'ESRI_Street'
###########################################
## REPORTS ###
###########################################
SHOW_AERIAL_REPORT = True
SHOW_STREET_REPORT = True
SHOW_TERRAIN_REPORT = True
SHOW_STREAMS_REPORT = True
SHOW_SOILS_REPORT = True
SHOW_FOREST_TYPES_REPORT = True
# Based on map size on slide 4 in the XD Specs
# This assumes the 'landscape' report layout (image will feel like 'portrait')
# REPORT_MAP_WIDTH = 509
REPORT_MAP_WIDTH = 768
# REPORT_MAP_HEIGHT = 722
REPORT_MAP_HEIGHT = 816
REPORT_MAP_ALT_WIDTH = 509
REPORT_MAP_ALT_HEIGHT = 722
# What is this used for?
REPORT_CONTENT_WIDTH = 508
# REPORT_CONTENT_WIDTH = 616
REPORT_CONTENT_HEIGHT = REPORT_MAP_HEIGHT
REPORT_SUPPORT_ORIENTATION = False
REPORT_MAP_MIN_BUFFER = 0.1
# These values approximate zoom 12 & 14 at the Oregon/California border.
# MAX_METER_RESOLUTION_CONTEXT = 30.0 # ~15,000m/509px (current pixel width)
# MAX_METER_RESOLUTION_MEDIUM = 7.5 # 30/4 (or more illustratively: 30/2/2)
# MAX width resolution in 3857 degrees:
# MAX_WEB_MERCATOR_RESOLUTION_CONTEXT = 40 # ~20,000 degrees/509px (current pixel width)
MAX_WEB_MERCATOR_RESOLUTION_CONTEXT = 26 # ~20,000 degrees/768px (current pixel width)
# MAX_WEB_MERCATOR_RESOLUTION_MEDIUM = 10 # 40/4 (or more illustratively: 40/2/2)
MAX_WEB_MERCATOR_RESOLUTION_MEDIUM = 6.5 # 26/4 (or more illustratively: 26/2/2)
# Report Image Dots Per Inch
DPI = 300
PROPERTY_STYLE = {'lw':1, 'ec': '#FF00FF', 'fc': 'none'}
TAXLOT_STYLE = {'lw':0.2, 'ec': '#CCCCCC', 'fc': 'none'}
SOIL_STYLE = {
'lw':0.8,
'ec': '#EBAE33',
'fc': 'none',
'label': {
'fontsize': 3,
'halo': {
'size': 1,
'color': 'black'
},
'bbox': None,
# 'bbox': {
# 'facecolor': '#000000',
# 'alpha':0.4,
# 'pad': 0.2,
# 'edgecolor':'none'
# }
}
}
FOREST_TYPES_STYLE = {
'lw':0.8,
# 'ec': '#00FF44', # GREEN
# 'ec': '#E11845', # Strawberry
'ec': '#F2CA19', # Mustard
# 'ec': '#0057E9', # Blue
'fc': 'none',
'label': {
'fontsize': 3,
'halo': {
'size': 1,
'color': 'black'
},
'bbox': None,
# 'bbox': {
# 'facecolor': '#000000',
# 'alpha':0.4,
# 'pad': 0.2,
# 'edgecolor':'none'
# }
}
}
CONTOUR_STYLE = {
'fine_color': (32/255., 96/255., 0., 255/255.),
'fine_step': 40,
'fine_width': 0.05,
'bold_color': (32/255., 96/255., 0., 255/255.),
'bold_step': 200,
'bold_width': 0.25,
'font_size': 2.5,
'inline_spacing': -2,
'format_string': "{x:.0f}"
}
SCALEBAR_DEFAULT_WIDTH = 1.5
SCALEBAR_DEFAULT_HEIGHT = 0.2
# SCALEBAR_BG_W = 508
SCALEBAR_BG_W = 616
# SCALEBAR_BG_H = 70
SCALEBAR_BG_H = 77
MAXIMUM_BBOX_WIDTH = 30000
NO_RENDER_MESSAGE = "Unable to render some details. Please create smaller properties."
###########################################
## Properties ###
###########################################
# PROPERTY_OUTLINE_COLOR = (255,0,255,255)
PROPERTY_OUTLINE_COLOR = (1,0,1,1) # matplotlib does not understand 0-255, only hex or 0-1.0 vals
PROPERTY_OUTLINE_WIDTH = 1
###########################################
## Soils ###
###########################################
SOIL_BASE_LAYER = 'aerial'
# WMS (raster image tile)
# SOIL_WMS_URL = 'https://SDMDataAccess.sc.egov.usda.gov/Spatial/SDM.wms'
# SOIL_WMS_VERSION = '1.1.1'
# SOIL_TILE_LAYER = 'mapunitpoly'
SOIL_ZOOM_OVERLAY_2X = False
SOILS_URLS = {
'USDA_WMS': {
'URL': 'https://SDMDataAccess.sc.egov.usda.gov/Spatial/SDM.wms',
'WMS_VERSION': '1.1.1',
'TILE_LAYER': 'mapunitpoly',
'ZOOM_OVERLAY_2X': SOIL_ZOOM_OVERLAY_2X,
'ATTRIBUTION': ''.join([
"Soil Survey Staff, Natural Resources Conservation Service, ",
"United States Department of Agriculture. ",
"Soil Survey Geographic (SSURGO) Database. ",
"Available online at https://sdmdataaccess.sc.egov.usda.gov. ",
"Accessed %s" % TODAY_DATE
])
},
'USDA_WFS': {
'URL': 'https://sdmdataaccess.sc.egov.usda.gov/Spatial/SDMWGS84GEOGRAPHIC.wfs',
'WFS_VERSION': '1.1.0',
'DATA_LAYER': 'mapunitpolyextended',
'ID_FIELD': 'musym',
'ATTRIBUTION': ''.join([ # RDH 2020-10-20: I am not sure this is the correct acctibution for this service.
"Soil Survey Staff, Natural Resources Conservation Service, ",
"United States Department of Agriculture. ",
"U.S. General Soil Map (STATSGO2). ",
"Available online at https://sdmdataaccess.sc.egov.usda.gov. ",
"Accessed %s" % TODAY_DATE,
])
},
'MAPBOX': {
'URL': 'https://api.mapbox.com/styles/v1/{userid}/{layerid}/tiles/256/{zoom}/{lon}/{lat}@2x?',
'PARAMS': {
'userid':'forestplanner',
'layerid': 'ckg85xmw7084119mpbf5a69sf',
'lon': '',
'lat': '',
'zoom': '',
},
'QS': [
'access_token=%s' % MAPBOX_TOKEN,
],
# 'ATTRIBUTION': 'Soil Survey Staff. The Gridded Soil Survey Geographic (gSSURGO) Database for Oregon. United States Department of Agriculture, Natural Resources Conservation Service. Available online at https://gdg.sc.egov.usda.gov/. October 12, 2020 (202007 official release).',
'ATTRIBUTION': {'source': 'NRCS', 'attribution': 'Soil Survey Staff. The Gridded Soil Survey Geographic (gSSURGO) Database for Oregon. United States Department of Agriculture, Natural Resources Conservation Service. Available online at https://gdg.sc.egov.usda.gov/. October 12, 2020 (202007 official release).'},
# calculate tile assuming 256 px
'TILE_HEIGHT': 256,
'TILE_WIDTH': 256,
# retrieve image at 2x resolution
'TILE_IMAGE_HEIGHT': 512,
'TILE_IMAGE_WIDTH': 512
}
}
# WFS (soil data)
# SOIL_WFS_URL = 'https://sdmdataaccess.sc.egov.usda.gov/Spatial/SDMWGS84GEOGRAPHIC.wfs'
# SOIL_WFS_VERSION = '1.1.0'
# SOIL_DATA_LAYER = 'mapunitpolyextended'
# SOIL_ID_FIELD = 'musym'
# https://sdmdataaccess.sc.egov.usda.gov/Citation.htm
SOIL_SSURGO_ATTRIBUTION = SOILS_URLS['USDA_WMS']['ATTRIBUTION']
SOIL_SOURCE = 'MAPBOX'
SOIL_ATTRIBUTION = SOILS_URLS[SOIL_SOURCE]['ATTRIBUTION']
# Reference: https://sdmdataaccess.nrcs.usda.gov/documents/TablesAndColumnsReport.pdf
SOIL_FIELDS = {
'areasymbol': {
'name': 'Area Symbol',
'display': False,
'format': 'string',
'UOM': ''
},
'musym': {
'name': 'Map Unit Symbol',
'display': True,
'format': 'string',
'UOM': ''
},
'nationalmusym': {
'name': 'National Map Unit Symbol',
'display': True,
'format': 'string',
'UOM': ''
},
'mukey': {
'name': 'Map Unit Key',
'display': False,
'format': 'integer',
'UOM': ''
},
'spatialversion': {
'name': 'Spatial Version',
'display': False,
'format': 'integer',
'UOM': ''
},
'muname': {
'name': 'Map Unit Name',
'display': True,
'format': 'string',
'UOM': ''
},
'mustatus': {
'name': 'Map Unit Status',
'display': True,
'format': 'string',
'UOM': ''
},
'slopegraddcp': {
'name': 'Slope Gradient - Dominant Component',
'display': True,
'format': 'float',
'UOM': '%'
},
'slopegradwta': {
'name': 'Slope Gradient - Weighted Average',
'display': True,
'format': 'float',
'UOM': '%'
},
'brockdepmin': {
'name': 'Bedrock Depth - Minimum',
'display': True,
'format': 'integer',
'UOM': 'cm'
},
'wtdepannmin': {
'name': 'Water Table Depth - Annual - Minimum',
'display': True,
'format': 'integer',
'UOM': 'cm'
},
'wtdepaprjunmin': {
'name': 'Water Table Depth - April - June - Minimum',
'display': True,
'format': 'integer',
'UOM': 'cm'
},
'flodfreqdcd': {
'name': 'Flooding Frequency - Dominant Condition',
'display': True,
'format': 'string',
'UOM': ''
},
'flodfreqmax': {
'name': 'Flooding Frequency - Maximum',
'display': True,
'format': 'string',
'UOM': ''
},
'pondfreqprs': {
'name': 'pondfrePonding Frequency - Presenceqprs',
'display': True,
'format': 'string',
'UOM': ''
},
'aws025wta': {
'name': 'Available Water Storage 0-25 cm - Weighted Average',
'display': True,
'format': 'float',
'UOM': 'cm'
},
'aws050wta': {
'name': 'Available Water Storage 0-50 cm - Weighted Average',
'display': True,
'format': 'float',
'UOM': 'cm'
},
'aws0100wta': {
'name': 'Available Water Storage 0-100 cm - Weighted Average',
'display': True,
'format': 'float',
'UOM': 'cm'
},
'aws0150wta': {
'name': 'Available Water Storage 0-150 cm - Weighted Average',
'display': True,
'format': 'float',
'UOM': 'cm'
},
'drclassdcd': {
'name': 'Drainage Class - Dominant Condition',
'display': True,
'format': 'string',
'UOM': ''
},
'drclasswettest': {
'name': 'Drainage Class - Wettest',
'display': True,
'format': 'string',
'UOM': ''
},
'hydgrpdcd': {
'name': 'Hydrologic Group - Dominant Conditions',
'display': True,
'format': 'string',
'UOM': ''
},
'iccdcd': {
'name': 'Irrigated Capability Class - Dominant Condition',
'display': True,
'format': 'string',
'UOM': ''
},
'iccdcdpct': {
'name': 'Irrigated Capability Class - Dominant Condition Aggregate Percent',
'display': True,
'format': 'integer',
'UOM': '%' # not listed in docs
},
'niccdcd': {
'name': 'Non-Irrigated Capability Class - Dominant Condition',
'display': True,
'format': 'string',
'UOM': ''
},
'niccdcdpct': {
'name': 'Non-Irrigated Capability Class - Dominant Condition Aggregate Percent',
'display': True,
'format': 'integer',
'UOM': '%' # not listed in docs
},
'engdwobdcd': {
'name': 'ENG - Dwellings W/O Basements - Dominant Condition',
'display': True,
'format': 'string',
'UOM': ''
},
'engdwbdcd': {
'name': 'ENG - Dwellings with Basements - Dominant Condition',
'display': True,
'format': 'string',
'UOM': ''
},
'engdwbll': {
'name': 'ENG - Dwellings with Basements - Least Limiting',
'display': True,
'format': 'string',
'UOM': ''
},
'engdwbml': {
'name': 'ENG - Dwellings with Basements - Most Limiting',
'display': True,
'format': 'string',
'UOM': ''
},
'engstafdcd': {
'name': 'ENG - Septic Tank Absorption Fields - Dominant Condition',
'display': True,
'format': 'string',
'UOM': ''
},
'engstafll': {
'name': 'ENG - Septic Tank Absorption Fields - Least Limiting',
'display': True,
'format': 'string',
'UOM': ''
},
'engstafml': {
'name': 'ENG - Septic Tank Absorption Fields - Most Limiting',
'display': True,
'format': 'string',
'UOM': ''
},
'engsldcd': {
'name': 'ENG - Sewage Lagoons - Dominant Condition',
'display': True,
'format': 'string',
'UOM': ''
},
'engsldcp': {
'name': 'ENG - Sewage Lagoons - Dominant Component',
'display': True,
'format': 'string',
'UOM': ''
},
'englrsdcd': {
'name': 'ENG - Local Roads and Streets - Dominant Condition',
'display': True,
'format': 'string',
'UOM': ''
},
'engcmssdcd': {
'name': 'ENG - Construction Materials; Sand Source - Dominant Condition',
'display': True,
'format': 'string',
'UOM': ''
},
'engcmssmp': {
'name': 'ENG - Construction Materials; Sand Source - Most Probable',
'display': True,
'format': 'string',
'UOM': ''
},
'urbrecptdcd': {
'name': 'URB/REC - Paths and Trails - Dominant Condition',
'display': True,
'format': 'string',
'UOM': ''
},
'urbrecptwta': {
'name': 'URB/REC - Paths and Trails - Weighted Average',
'display': True,
'format': 'float',
'UOM': ''
},
'forpehrtdcp': {
'name': 'FOR - Potential Erosion Hazard (Road/Trail) - Dominant Component',
'display': True,
'format': 'string',
'UOM': ''
},
'hydclprs': {
'name': 'Hydric Classification - Presence',
'display': True,
'format': 'integer',
'UOM': ''
},
'awmmfpwwta': {
'name': 'AWM - Manure and Food Processing Waste - Weighted Average',
'display': True,
'format': 'float',
'UOM': ''
}
}
###########################################
## Streams ###
###########################################
STREAMS_BASE_LAYER = 'topo'
# STREAMS_BASE_LAYER = 'aerial'
# STREAMS_BASE_LAYER = 'ESRI_Topo'
STREAMS_URLS = {
'AGOL': {
'URL': [
'https://services.arcgis.com/P3ePLMYs2RVChkJx/arcgis/rest/services/USA_Detailed_Streams/FeatureServer/0/query?'
],
'PARAMS': {},
'QS':[
'f=geojson',
'returnGeometry=true',
'spatialRel=esriSpatialRelIntersects',
# 'maxAllowableOffset=76.43702828515632',
'geometryType=esriGeometryEnvelope',
'inSR=102100',
'outFields=*',
'returnCentroid=false',
'returnExceededLimitFeatures=false',
'maxRecordCountFactor=3',
'outSR=102100',
'resultType=tile',
],
'ATTRIBUTION': 'National Hydrography Dataset: USGS, Esri'
},
'MAPBOX_STATIC': {
'URL': 'https://api.mapbox.com/styles/v1/{userid}/{layerid}/static/{lon},{lat},{zoom}/{width}x{height}{retina}?',
'PARAMS': {
'userid':'forestplanner',
'layerid': 'ckbv10von10qw1iqs1cgdccw7',
'lon': '',
'lat': '',
'zoom': '',
'width': '',
'height': '',
'retina': '',
},
'QS': [
'access_token=%s' % MAPBOX_TOKEN,
'attribution=false',
'logo=false'
],
'ATTRIBUTION': 'Oregon Department of Forestry'
},
'MAPBOX_TILE': {
'URL': 'https://api.mapbox.com/styles/v1/{userid}/{layerid}/tiles/256/{zoom}/{lon}/{lat}@2x?',
'PARAMS': {
'userid':'forestplanner',
'layerid': 'ckbv10von10qw1iqs1cgdccw7',
'lon': '',
'lat': '',
'zoom': '',
},
'QS': [
'access_token=%s' % MAPBOX_TOKEN,
],
'ATTRIBUTION': {'source': 'ODF', 'attribution':'Oregon Department of Forestry'},
# calculate tile assuming 256 px
'TILE_HEIGHT': 256,
'TILE_WIDTH': 256,
# retrieve image at 2x resolution
'TILE_IMAGE_HEIGHT': 512,
'TILE_IMAGE_WIDTH': 512
}
}
STREAMS_SOURCE = 'MAPBOX_TILE'
STREAM_ZOOM_OVERLAY_2X = False
STREAMS_ATTRIBUTION = STREAMS_URLS[STREAMS_SOURCE]['ATTRIBUTION']
###########################################
## Taxlots ###
###########################################
TAXLOTS_URLS = {
'MAPBOX_TILE': {
'URL': 'https://api.mapbox.com/styles/v1/{userid}/{layerid}/tiles/256/{zoom}/{lon}/{lat}@2x?',
'PARAMS': {
'userid':'forestplanner',
'layerid': 'ckdgho51i084u1inx1a70iwim',
'lon': '',
'lat': '',
'zoom': '',
},
'QS': [
'access_token=%s' % MAPBOX_TOKEN,
],
'ATTRIBUTION': {'source': 'ORMAP', 'attribution': None},
# calculate tile assuming 256 px
'TILE_HEIGHT': 256,
'TILE_WIDTH': 256,
# retrieve image at 2x resolution
'TILE_IMAGE_HEIGHT': 512,
'TILE_IMAGE_WIDTH': 512
}
}
TAXLOTS_SOURCE = 'MAPBOX_TILE'
TAXLOT_ZOOM_OVERLAY_2X = False
TAXLOTS_ATTRIBUTION = TAXLOTS_URLS[TAXLOTS_SOURCE]['ATTRIBUTION']
###########################################
## Topo Conours ###
###########################################
CONTOUR_URLS = {
'TNM_TOPO': {
'URL': 'https://carto.nationalmap.gov/arcgis/rest/services/contours/MapServer/export',
'LAYERS': '21,25,26',
'TECHNOLOGY': 'arcgis_mapserver',
'SRID': 3857,
'ATTRIBUTION': 'USGS The National Map: 3D Elevation Program. Data Refreshed October, 2020.',
'INDEX_CONTOUR_SYMBOL': {
"type": "esriSLS",
"style": "esriSLSSolid",
"color": [32,96,0,255],
"width": 1.5
},
'INTERMEDIATE_CONTOUR_SYMBOL': {
"type": "esriSLS",
"style": "esriSLSSolid",
"color": [32,96,0,255],
"width": 0.5
},
'LABEL_SYMBOL': {
"type":"esriTS",
"color":[15,39,3,255],
"backgroundColor":None,
"outlineColor":None,
"verticalAlignment":"baseline",
"horizontalAlignment":"left",
"rightToLeft":False,
"angle":0,
"xoffset":0,
"yoffset":0,
"kerning":True,
"haloSize":2,
"haloColor":[255,255,255,255],
"font":{
"family":"Arial",
"size":12,
"style":"italic",
"weight":"normal",
"decoration":"none"
}
},
'STYLES': []
}
}
# CONTOUR_SOURCE = 'TNM_TOPO'
CONTOUR_SOURCE = False
if CONTOUR_SOURCE:
CONTOUR_ATTRIBUTION = CONTOUR_URLS[CONTOUR_SOURCE]['ATTRIBUTION']
CONTOUR_URLS[CONTOUR_SOURCE]['STYLES'] = [
{
"id":25,
"source":{"type":"mapLayer", "mapLayerId":25},
"drawingInfo":{
"renderer":{
"type":"simple",
"symbol":CONTOUR_URLS[CONTOUR_SOURCE]['INDEX_CONTOUR_SYMBOL'],
},
},
},
{
"id":26,
"source":{"type":"mapLayer", "mapLayerId":26},
"drawingInfo":{
"renderer":{
"type":"simple",
"symbol":CONTOUR_URLS[CONTOUR_SOURCE]['INTERMEDIATE_CONTOUR_SYMBOL'],
},
},
},
{
"id":21,
"source":{"type":"mapLayer", "mapLayerId":21},
"drawingInfo":{
"renderer":{
"type":"uniqueValue",
"field1":"FCODE",
"fieldDelimiter":",",
},
"labelingInfo":[
{
"labelPlacement":"esriServerLinePlacementCenterAlong",
"labelExpression":"[CONTOURELEVATION]",
"useCodedValues":True,
"symbol":CONTOUR_URLS[CONTOUR_SOURCE]['LABEL_SYMBOL'],
"minScale":0,
"maxScale":0
}
]
}
}
]
else:
CONTOUR_ATTRIBUTION = {'source': 'USGS', 'attribution': CONTOUR_URLS['TNM_TOPO']['ATTRIBUTION']}
###########################################
## Forest Types ###
###########################################
FOREST_TYPES_URLS = {
'LOCAL': {
'URL': None,
'PARAMS': {},
'QS': [],
'ATTRIBUTION': {'source': 'Ecotrust', 'attribution': 'Ecotrust 2021'},
# calculate tile assuming 256 px
'TILE_HEIGHT': 256,
'TILE_WIDTH': 256,
# retrieve image at 2x resolution
'TILE_IMAGE_HEIGHT': 512,
'TILE_IMAGE_WIDTH': 512
}
}
FOREST_TYPES_SOURCE = 'LOCAL'
FOREST_TYPES_ATTRIBUTION = FOREST_TYPES_URLS[FOREST_TYPES_SOURCE]['ATTRIBUTION']
###########################################
## Map Info ###
###########################################
ATTRIBUTION_KEYS = {
'aerial': BASEMAPS[AERIAL_DEFAULT]['ATTRIBUTION'],
'topo': BASEMAPS[TOPO_DEFAULT]['ATTRIBUTION'],
'streets': BASEMAPS[STREET_DEFAULT]['ATTRIBUTION'],
'streams': STREAMS_ATTRIBUTION,
'taxlot': TAXLOTS_ATTRIBUTION,
'soil': SOIL_ATTRIBUTION,
'foresttypes': FOREST_TYPES_ATTRIBUTION,
'contours': CONTOUR_ATTRIBUTION
}
ATTRIBUTION_BOX_FILL_COLOR = (255, 255, 255, 190)
ATTRIBUTION_BOX_OUTLINE = None
ATTRIBUTION_TEXT_COLOR = "black"
# ATTRIBUTION_TEXT_FONT = 'Pillow/Tests/fonts/FreeMono.ttf'
# default UBUNTU Font
# ATTRIBUTION_TEXT_FONT = '/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf'
ATTRIBUTION_TEXT_FONT = '/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf'
ATTRIBUTION_TEXT_FONT_SIZE = 10
ATTRIBUTION_TEXT_BUFFER = 3
ATTRIBUTION_TEXT_LINE_SPACING = 1
AERIAL_MAP_LEGEND_URL = '/static/landmapper/img/legend_images/directions_aerial.png'
STREET_MAP_LEGEND_URL = '/static/landmapper/img/legend_images/street_map_legend.png'
TERRAIN_MAP_LEGEND_URL = '/static/landmapper/img/legend_images/topo.png'
STREAM_MAP_LEGEND_URL = '/static/landmapper/img/legend_images/hydrology.png'
SOIL_MAP_LEGEND_URL = '/static/landmapper/img/legend_images/soils.png'
FOREST_TYPE_MAP_LEGEND_URL = '/static/landmapper/img/legend_images/forest_types.png'
###########################################
## Site URLs ###
###########################################
PRODUCTION_URL = 'http://landmapper.ecotrust.org/landmapper'
DEV_URL = 'http://localhost:8000/landmapper'
LIVE_SITE = False
if LIVE_SITE:
APP_URL = PRODUCTION_URL
else:
APP_URL = DEV_URL
###########################################
## Report creation and PDF access ###
###########################################
ALLOW_ANONYMOUS_DRAW = True
ANONYMOUS_USER_PK = 1
###########################################
## Flatblock content ###
###########################################
FLATBLOCK_IDS = [
'aside-home',
'aside-map-pin',
'aside-name'
]
###########################################
## Tests ###
###########################################
TESTING_DIR = os.path.join(LANDMAPPER_DIR, 'testing_files')
IMAGE_TEST_DIR = os.path.join(TESTING_DIR, 'image_test')
###########################################
## PDF Files ###
###########################################
PROPERTY_REPORT_PDF_TEMPLATE = LANDMAPPER_DIR + '/pdf_templates/LM_Form.pdf'
PROPERTY_REPORT_PDF_TEMPLATE_SANS_FOREST_TYPES = LANDMAPPER_DIR + '/pdf_templates/LM_Form_sans_forests.pdf'
PROPERTY_REPORT_PDF_DIR = LANDMAPPER_DIR + '/static/landmapper/report_pdf/'
PDF_PAGE_LOOKUP = {
'property_alt': 0,
'property': 1,
'aerial': 1,
'street': 2,
'terrain': 3,
'stream': 4,
'soil_types': [5,6,7,8],
'forest_types': [9,10,11,12],
}
try:
from .local_settings import *
except Exception as e:
pass
| Ecotrust/forestplanner | lot/landmapper/settings.py | Python | bsd-3-clause | 33,100 | 0.004683 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'functions call' command."""
from googlecloudsdk.api_lib.functions import util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
class Call(base.Command):
"""Call function synchronously for testing."""
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument(
'name', help='Name of the function to be called.',
type=util.ValidateFunctionNameOrRaise)
parser.add_argument(
'--data', default='',
help='Data passed to the function (JSON string)')
@util.CatchHTTPErrorRaiseHTTPException
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Function call results (error or result with execution id)
"""
project = properties.VALUES.core.project.Get(required=True)
registry = self.context['registry']
client = self.context['functions_client']
messages = self.context['functions_messages']
function_ref = registry.Parse(
args.name, params={'projectsId': project, 'locationsId': args.region},
collection='cloudfunctions.projects.locations.functions')
return client.projects_locations_functions.Call(
messages.CloudfunctionsProjectsLocationsFunctionsCallRequest(
name=function_ref.RelativeName(),
callFunctionRequest=messages.CallFunctionRequest(data=args.data)))
| KaranToor/MA450 | google-cloud-sdk/lib/surface/functions/call.py | Python | apache-2.0 | 2,127 | 0.002351 |
from django_webtest import WebTest
from candidates.views.people import MERGE_FORM_ID, SUGGESTION_FORM_ID
import people.tests.factories
from candidates.tests import factories
from candidates.tests.auth import TestUserMixin
from candidates.tests.uk_examples import UK2015ExamplesMixin
from people.models import Person
from uk_results.models import CandidateResult, ResultSet
class TestUKResultsPreserved(TestUserMixin, UK2015ExamplesMixin, WebTest):
def setUp(self):
super().setUp()
self.primary_person = people.tests.factories.PersonFactory.create(
id="3885", name="Harriet Harman"
)
self.secondary_person = people.tests.factories.PersonFactory.create(
id="10000", name="Harriet Ruth Harman"
)
def test_uk_results_for_secondary_preserved(self):
self.assertTrue(Person.objects.filter(pk=10000).exists())
factories.MembershipFactory.create(
person=self.primary_person,
post=self.camberwell_post,
party=self.labour_party,
ballot=self.camberwell_post_ballot_earlier,
)
factories.MembershipFactory.create(
person=self.secondary_person,
post=self.local_post,
party=self.labour_party,
ballot=self.local_election.ballot_set.get(post=self.local_post),
)
secondary_membership = factories.MembershipFactory.create(
person=self.secondary_person,
post=self.camberwell_post,
party=self.labour_party,
ballot=self.camberwell_post_ballot,
elected=True,
)
# Now attach a vote count to the secondary person's candidacy:
result_set = ResultSet.objects.create(
ballot=self.camberwell_post_ballot,
num_turnout_reported=51561,
num_spoilt_ballots=42,
ip_address="127.0.0.1",
)
CandidateResult.objects.create(
result_set=result_set,
membership=secondary_membership,
num_ballots=32614,
)
# Now try the merge:
response = self.app.get("/person/3885/", user=self.user_who_can_merge)
# first submit the suggestion form
suggestion_form = response.forms[SUGGESTION_FORM_ID]
suggestion_form["other_person"] = "10000"
response = suggestion_form.submit()
# as user has permission to merge directly, submit merge form
merge_form = response.forms[MERGE_FORM_ID]
response = merge_form.submit()
self.assertEqual(CandidateResult.objects.count(), 1)
# Now reget the original person and her candidacy - check it
# has a result attached.
after_merging = Person.objects.get(pk=3885)
membership = after_merging.memberships.get(
ballot__election=self.election
)
candidate_result = membership.result
self.assertEqual(candidate_result.num_ballots, 32614)
self.assertFalse(Person.objects.filter(pk=10000).exists())
self.assertTrue(membership.elected)
def test_uk_results_for_primary_preserved(self):
self.assertTrue(Person.objects.filter(pk=10000).exists())
primary_membership = factories.MembershipFactory.create(
person=self.primary_person,
post=self.camberwell_post,
party=self.labour_party,
ballot=self.camberwell_post_ballot_earlier,
elected=True,
)
factories.MembershipFactory.create(
person=self.secondary_person,
post=self.local_post,
party=self.labour_party,
ballot=self.local_election.ballot_set.get(post=self.local_post),
)
factories.MembershipFactory.create(
person=self.secondary_person,
post=self.camberwell_post,
party=self.labour_party,
ballot=self.camberwell_post_ballot,
)
# Now attach a vote count to the primary person's candidacy:
result_set = ResultSet.objects.create(
ballot=self.camberwell_post_ballot_earlier,
num_turnout_reported=46659,
num_spoilt_ballots=42,
ip_address="127.0.0.1",
)
CandidateResult.objects.create(
result_set=result_set,
membership=primary_membership,
num_ballots=27619,
)
# Now try the merge:
response = self.app.get("/person/3885/", user=self.user_who_can_merge)
# first submit the suggestion form
suggestion_form = response.forms[SUGGESTION_FORM_ID]
suggestion_form["other_person"] = "10000"
response = suggestion_form.submit()
# as user has permission to merge directly, submit merge form
merge_form = response.forms[MERGE_FORM_ID]
response = merge_form.submit()
self.assertEqual(CandidateResult.objects.count(), 1)
# Now reget the original person and her candidacy - check it
# has a result attached.
after_merging = Person.objects.get(pk=3885)
membership = after_merging.memberships.get(
ballot__election=self.earlier_election
)
candidate_result = membership.result
self.assertEqual(candidate_result.num_ballots, 27619)
self.assertFalse(Person.objects.filter(pk=10000).exists())
self.assertTrue(membership.elected)
| DemocracyClub/yournextrepresentative | ynr/apps/elections/uk/tests/test_custom_merge.py | Python | agpl-3.0 | 5,408 | 0 |
#!/usr/bin/env python
#
# File Name : rouge.py
#
# Description : Computes ROUGE-L metric as described by Lin and Hovey (2004)
#
# Creation Date : 2015-01-07 06:03
# Author : Ramakrishna Vedantam <vrama91@vt.edu>
import numpy as np
import pdb
def my_lcs(string, sub):
"""
Calculates longest common subsequence for a pair of tokenized strings
:param string : list of str : tokens from a string split using whitespace
:param sub : list of str : shorter string, also split using whitespace
:returns: length (list of int): length of the longest common subsequence between the two strings
Note: my_lcs only gives length of the longest common subsequence, not the actual LCS
"""
if(len(string)< len(sub)):
sub, string = string, sub
lengths = [[0 for i in range(0,len(sub)+1)] for j in range(0,len(string)+1)]
for j in range(1,len(sub)+1):
for i in range(1,len(string)+1):
if(string[i-1] == sub[j-1]):
lengths[i][j] = lengths[i-1][j-1] + 1
else:
lengths[i][j] = max(lengths[i-1][j] , lengths[i][j-1])
return lengths[len(string)][len(sub)]
class Rouge():
'''
Class for computing ROUGE-L score for a set of candidate sentences for the MS COCO test set
'''
def __init__(self):
# vrama91: updated the value below based on discussion with Hovey
self.beta = 1.2
def calc_score(self, candidate, refs):
"""
Compute ROUGE-L score given one candidate and references for an image
:param candidate: str : candidate sentence to be evaluated
:param refs: list of str : COCO reference sentences for the particular image to be evaluated
:returns score: int (ROUGE-L score for the candidate evaluated against references)
"""
assert(len(candidate)==1)
assert(len(refs)>0)
prec = []
rec = []
# split into tokens
token_c = candidate[0].split(" ")
for reference in refs:
# split into tokens
token_r = reference.split(" ")
# compute the longest common subsequence
lcs = my_lcs(token_r, token_c)
prec.append(lcs/float(len(token_c)))
rec.append(lcs/float(len(token_r)))
prec_max = max(prec)
rec_max = max(rec)
if(prec_max!=0 and rec_max !=0):
score = ((1 + self.beta**2)*prec_max*rec_max)/float(rec_max + self.beta**2*prec_max)
else:
score = 0.0
return score
def compute_score(self, gts, res):
"""
Computes Rouge-L score given a set of reference and candidate sentences for the dataset
Invoked by evaluate_captions.py
:param hypo_for_image: dict : candidate / test sentences with "image name" key and "tokenized sentences" as values
:param ref_for_image: dict : reference MS-COCO sentences with "image name" key and "tokenized sentences" as values
:returns: average_score: float (mean ROUGE-L score computed by averaging scores for all the images)
"""
assert(sorted(gts.keys()) == sorted(res.keys()))
imgIds = gts.keys()
score = []
for id in imgIds:
hypo = res[id]
ref = gts[id]
score.append(self.calc_score(hypo, ref))
# Sanity check.
assert(type(hypo) is list)
assert(len(hypo) == 1)
assert(type(ref) is list)
assert(len(ref) > 0)
average_score = np.mean(np.array(score))
return average_score, np.array(score)
def method(self):
return "Rouge"
| fukun07/neural-image-captioning | codes/pycoco/rouge/rouge.py | Python | mit | 3,659 | 0.008746 |
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Adapter database operations."""
import logging
import os
from compass.db.api import database
from compass.db.api import utils
from compass.db import exception
from compass.db import models
from compass.utils import setting_wrapper as setting
from compass.utils import util
def _add_installers(session, model, configs, exception_when_existing=True):
installers = []
for config in configs:
installers.append(utils.add_db_object(
session, model,
exception_when_existing, config['INSTANCE_NAME'],
name=config['NAME'],
settings=config.get('SETTINGS', {})
))
return installers
def add_os_installers_internal(session, exception_when_existing=True):
configs = util.load_configs(setting.OS_INSTALLER_DIR)
return _add_installers(
session, models.OSInstaller, configs,
exception_when_existing=exception_when_existing
)
def add_package_installers_internal(session, exception_when_existing=True):
configs = util.load_configs(setting.PACKAGE_INSTALLER_DIR)
return _add_installers(
session, models.PackageInstaller, configs,
exception_when_existing=exception_when_existing
)
| baigk/compass-core | compass/db/api/installer.py | Python | apache-2.0 | 1,791 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.