repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
varunagrawal/azure-services
varunagrawal/VarunWeb/env/Lib/site-packages/django/test/html.py
196
7967
""" Comparing two html documents. """ from __future__ import unicode_literals import re from django.utils.encoding import force_text from django.utils.html_parser import HTMLParser, HTMLParseError from django.utils import six from django.utils.encoding import python_2_unicode_compatible WHITESPACE = re.compile('\s+') def normalize_whitespace(string): return WHITESPACE.sub(' ', string) @python_2_unicode_compatible class Element(object): def __init__(self, name, attributes): self.name = name self.attributes = sorted(attributes) self.children = [] def append(self, element): if isinstance(element, six.string_types): element = force_text(element) element = normalize_whitespace(element) if self.children: if isinstance(self.children[-1], six.string_types): self.children[-1] += element self.children[-1] = normalize_whitespace(self.children[-1]) return elif self.children: # removing last children if it is only whitespace # this can result in incorrect dom representations since # whitespace between inline tags like <span> is significant if isinstance(self.children[-1], six.string_types): if self.children[-1].isspace(): self.children.pop() if element: self.children.append(element) def finalize(self): def rstrip_last_element(children): if children: if isinstance(children[-1], six.string_types): children[-1] = children[-1].rstrip() if not children[-1]: children.pop() children = rstrip_last_element(children) return children rstrip_last_element(self.children) for i, child in enumerate(self.children): if isinstance(child, six.string_types): self.children[i] = child.strip() elif hasattr(child, 'finalize'): child.finalize() def __eq__(self, element): if not hasattr(element, 'name'): return False if hasattr(element, 'name') and self.name != element.name: return False if len(self.attributes) != len(element.attributes): return False if self.attributes != element.attributes: # attributes without a value is same as attribute with value that # equals the attributes name: # <input checked> == <input checked="checked"> for i in range(len(self.attributes)): attr, value = self.attributes[i] other_attr, other_value = element.attributes[i] if value is None: value = attr if other_value is None: other_value = other_attr if attr != other_attr or value != other_value: return False if self.children != element.children: return False return True def __hash__(self): return hash((self.name,) + tuple(a for a in self.attributes)) def __ne__(self, element): return not self.__eq__(element) def _count(self, element, count=True): if not isinstance(element, six.string_types): if self == element: return 1 i = 0 for child in self.children: # child is text content and element is also text content, then # make a simple "text" in "text" if isinstance(child, six.string_types): if isinstance(element, six.string_types): if count: i += child.count(element) elif element in child: return 1 else: i += child._count(element, count=count) if not count and i: return i return i def __contains__(self, element): return self._count(element, count=False) > 0 def count(self, element): return self._count(element, count=True) def __getitem__(self, key): return self.children[key] def __str__(self): output = '<%s' % self.name for key, value in self.attributes: if value: output += ' %s="%s"' % (key, value) else: output += ' %s' % key if self.children: output += '>\n' output += ''.join(six.text_type(c) for c in self.children) output += '\n</%s>' % self.name else: output += ' />' return output def __repr__(self): return six.text_type(self) @python_2_unicode_compatible class RootElement(Element): def __init__(self): super(RootElement, self).__init__(None, ()) def __str__(self): return ''.join(six.text_type(c) for c in self.children) class Parser(HTMLParser): SELF_CLOSING_TAGS = ('br' , 'hr', 'input', 'img', 'meta', 'spacer', 'link', 'frame', 'base', 'col') def __init__(self): HTMLParser.__init__(self) self.root = RootElement() self.open_tags = [] self.element_positions = {} def error(self, msg): raise HTMLParseError(msg, self.getpos()) def format_position(self, position=None, element=None): if not position and element: position = self.element_positions[element] if position is None: position = self.getpos() if hasattr(position, 'lineno'): position = position.lineno, position.offset return 'Line %d, Column %d' % position @property def current(self): if self.open_tags: return self.open_tags[-1] else: return self.root def handle_startendtag(self, tag, attrs): self.handle_starttag(tag, attrs) if tag not in self.SELF_CLOSING_TAGS: self.handle_endtag(tag) def handle_starttag(self, tag, attrs): # Special case handling of 'class' attribute, so that comparisons of DOM # instances are not sensitive to ordering of classes. attrs = [ (name, " ".join(sorted(value.split(" ")))) if name == "class" else (name, value) for name, value in attrs ] element = Element(tag, attrs) self.current.append(element) if tag not in self.SELF_CLOSING_TAGS: self.open_tags.append(element) self.element_positions[element] = self.getpos() def handle_endtag(self, tag): if not self.open_tags: self.error("Unexpected end tag `%s` (%s)" % ( tag, self.format_position())) element = self.open_tags.pop() while element.name != tag: if not self.open_tags: self.error("Unexpected end tag `%s` (%s)" % ( tag, self.format_position())) element = self.open_tags.pop() def handle_data(self, data): self.current.append(data) def handle_charref(self, name): self.current.append('&%s;' % name) def handle_entityref(self, name): self.current.append('&%s;' % name) def parse_html(html): """ Takes a string that contains *valid* HTML and turns it into a Python object structure that can be easily compared against other HTML on semantic equivalence. Syntactical differences like which quotation is used on arguments will be ignored. """ parser = Parser() parser.feed(html) parser.close() document = parser.root document.finalize() # Removing ROOT element if it's not necessary if len(document.children) == 1: if not isinstance(document.children[0], six.string_types): document = document.children[0] return document
gpl-2.0
hortonworks/hortonworks-sandbox
desktop/core/ext-py/Django-1.2.3/tests/modeltests/invalid_models/models.py
10
24416
""" 26. Invalid models This example exists purely to point out errors in models. """ from django.db import models class FieldErrors(models.Model): charfield = models.CharField() charfield2 = models.CharField(max_length=-1) charfield3 = models.CharField(max_length="bad") decimalfield = models.DecimalField() decimalfield2 = models.DecimalField(max_digits=-1, decimal_places=-1) decimalfield3 = models.DecimalField(max_digits="bad", decimal_places="bad") filefield = models.FileField() choices = models.CharField(max_length=10, choices='bad') choices2 = models.CharField(max_length=10, choices=[(1,2,3),(1,2,3)]) index = models.CharField(max_length=10, db_index='bad') field_ = models.CharField(max_length=10) nullbool = models.BooleanField(null=True) class Target(models.Model): tgt_safe = models.CharField(max_length=10) clash1 = models.CharField(max_length=10) clash2 = models.CharField(max_length=10) clash1_set = models.CharField(max_length=10) class Clash1(models.Model): src_safe = models.CharField(max_length=10) foreign = models.ForeignKey(Target) m2m = models.ManyToManyField(Target) class Clash2(models.Model): src_safe = models.CharField(max_length=10) foreign_1 = models.ForeignKey(Target, related_name='id') foreign_2 = models.ForeignKey(Target, related_name='src_safe') m2m_1 = models.ManyToManyField(Target, related_name='id') m2m_2 = models.ManyToManyField(Target, related_name='src_safe') class Target2(models.Model): clash3 = models.CharField(max_length=10) foreign_tgt = models.ForeignKey(Target) clashforeign_set = models.ForeignKey(Target) m2m_tgt = models.ManyToManyField(Target) clashm2m_set = models.ManyToManyField(Target) class Clash3(models.Model): src_safe = models.CharField(max_length=10) foreign_1 = models.ForeignKey(Target2, related_name='foreign_tgt') foreign_2 = models.ForeignKey(Target2, related_name='m2m_tgt') m2m_1 = models.ManyToManyField(Target2, related_name='foreign_tgt') m2m_2 = models.ManyToManyField(Target2, related_name='m2m_tgt') class ClashForeign(models.Model): foreign = models.ForeignKey(Target2) class ClashM2M(models.Model): m2m = models.ManyToManyField(Target2) class SelfClashForeign(models.Model): src_safe = models.CharField(max_length=10) selfclashforeign = models.CharField(max_length=10) selfclashforeign_set = models.ForeignKey("SelfClashForeign") foreign_1 = models.ForeignKey("SelfClashForeign", related_name='id') foreign_2 = models.ForeignKey("SelfClashForeign", related_name='src_safe') class ValidM2M(models.Model): src_safe = models.CharField(max_length=10) validm2m = models.CharField(max_length=10) # M2M fields are symmetrical by default. Symmetrical M2M fields # on self don't require a related accessor, so many potential # clashes are avoided. validm2m_set = models.ManyToManyField("self") m2m_1 = models.ManyToManyField("self", related_name='id') m2m_2 = models.ManyToManyField("self", related_name='src_safe') m2m_3 = models.ManyToManyField('self') m2m_4 = models.ManyToManyField('self') class SelfClashM2M(models.Model): src_safe = models.CharField(max_length=10) selfclashm2m = models.CharField(max_length=10) # Non-symmetrical M2M fields _do_ have related accessors, so # there is potential for clashes. selfclashm2m_set = models.ManyToManyField("self", symmetrical=False) m2m_1 = models.ManyToManyField("self", related_name='id', symmetrical=False) m2m_2 = models.ManyToManyField("self", related_name='src_safe', symmetrical=False) m2m_3 = models.ManyToManyField('self', symmetrical=False) m2m_4 = models.ManyToManyField('self', symmetrical=False) class Model(models.Model): "But it's valid to call a model Model." year = models.PositiveIntegerField() #1960 make = models.CharField(max_length=10) #Aston Martin name = models.CharField(max_length=10) #DB 4 GT class Car(models.Model): colour = models.CharField(max_length=5) model = models.ForeignKey(Model) class MissingRelations(models.Model): rel1 = models.ForeignKey("Rel1") rel2 = models.ManyToManyField("Rel2") class MissingManualM2MModel(models.Model): name = models.CharField(max_length=5) missing_m2m = models.ManyToManyField(Model, through="MissingM2MModel") class Person(models.Model): name = models.CharField(max_length=5) class Group(models.Model): name = models.CharField(max_length=5) primary = models.ManyToManyField(Person, through="Membership", related_name="primary") secondary = models.ManyToManyField(Person, through="Membership", related_name="secondary") tertiary = models.ManyToManyField(Person, through="RelationshipDoubleFK", related_name="tertiary") class GroupTwo(models.Model): name = models.CharField(max_length=5) primary = models.ManyToManyField(Person, through="Membership") secondary = models.ManyToManyField(Group, through="MembershipMissingFK") class Membership(models.Model): person = models.ForeignKey(Person) group = models.ForeignKey(Group) not_default_or_null = models.CharField(max_length=5) class MembershipMissingFK(models.Model): person = models.ForeignKey(Person) class PersonSelfRefM2M(models.Model): name = models.CharField(max_length=5) friends = models.ManyToManyField('self', through="Relationship") too_many_friends = models.ManyToManyField('self', through="RelationshipTripleFK") class PersonSelfRefM2MExplicit(models.Model): name = models.CharField(max_length=5) friends = models.ManyToManyField('self', through="ExplicitRelationship", symmetrical=True) class Relationship(models.Model): first = models.ForeignKey(PersonSelfRefM2M, related_name="rel_from_set") second = models.ForeignKey(PersonSelfRefM2M, related_name="rel_to_set") date_added = models.DateTimeField() class ExplicitRelationship(models.Model): first = models.ForeignKey(PersonSelfRefM2MExplicit, related_name="rel_from_set") second = models.ForeignKey(PersonSelfRefM2MExplicit, related_name="rel_to_set") date_added = models.DateTimeField() class RelationshipTripleFK(models.Model): first = models.ForeignKey(PersonSelfRefM2M, related_name="rel_from_set_2") second = models.ForeignKey(PersonSelfRefM2M, related_name="rel_to_set_2") third = models.ForeignKey(PersonSelfRefM2M, related_name="too_many_by_far") date_added = models.DateTimeField() class RelationshipDoubleFK(models.Model): first = models.ForeignKey(Person, related_name="first_related_name") second = models.ForeignKey(Person, related_name="second_related_name") third = models.ForeignKey(Group, related_name="rel_to_set") date_added = models.DateTimeField() class AbstractModel(models.Model): name = models.CharField(max_length=10) class Meta: abstract = True class AbstractRelationModel(models.Model): fk1 = models.ForeignKey('AbstractModel') fk2 = models.ManyToManyField('AbstractModel') class UniqueM2M(models.Model): """ Model to test for unique ManyToManyFields, which are invalid. """ unique_people = models.ManyToManyField(Person, unique=True) class NonUniqueFKTarget1(models.Model): """ Model to test for non-unique FK target in yet-to-be-defined model: expect an error """ tgt = models.ForeignKey('FKTarget', to_field='bad') class UniqueFKTarget1(models.Model): """ Model to test for unique FK target in yet-to-be-defined model: expect no error """ tgt = models.ForeignKey('FKTarget', to_field='good') class FKTarget(models.Model): bad = models.IntegerField() good = models.IntegerField(unique=True) class NonUniqueFKTarget2(models.Model): """ Model to test for non-unique FK target in previously seen model: expect an error """ tgt = models.ForeignKey(FKTarget, to_field='bad') class UniqueFKTarget2(models.Model): """ Model to test for unique FK target in previously seen model: expect no error """ tgt = models.ForeignKey(FKTarget, to_field='good') model_errors = """invalid_models.fielderrors: "charfield": CharFields require a "max_length" attribute that is a positive integer. invalid_models.fielderrors: "charfield2": CharFields require a "max_length" attribute that is a positive integer. invalid_models.fielderrors: "charfield3": CharFields require a "max_length" attribute that is a positive integer. invalid_models.fielderrors: "decimalfield": DecimalFields require a "decimal_places" attribute that is a non-negative integer. invalid_models.fielderrors: "decimalfield": DecimalFields require a "max_digits" attribute that is a positive integer. invalid_models.fielderrors: "decimalfield2": DecimalFields require a "decimal_places" attribute that is a non-negative integer. invalid_models.fielderrors: "decimalfield2": DecimalFields require a "max_digits" attribute that is a positive integer. invalid_models.fielderrors: "decimalfield3": DecimalFields require a "decimal_places" attribute that is a non-negative integer. invalid_models.fielderrors: "decimalfield3": DecimalFields require a "max_digits" attribute that is a positive integer. invalid_models.fielderrors: "filefield": FileFields require an "upload_to" attribute. invalid_models.fielderrors: "choices": "choices" should be iterable (e.g., a tuple or list). invalid_models.fielderrors: "choices2": "choices" should be a sequence of two-tuples. invalid_models.fielderrors: "choices2": "choices" should be a sequence of two-tuples. invalid_models.fielderrors: "index": "db_index" should be either None, True or False. invalid_models.fielderrors: "field_": Field names cannot end with underscores, because this would lead to ambiguous queryset filters. invalid_models.fielderrors: "nullbool": BooleanFields do not accept null values. Use a NullBooleanField instead. invalid_models.clash1: Accessor for field 'foreign' clashes with field 'Target.clash1_set'. Add a related_name argument to the definition for 'foreign'. invalid_models.clash1: Accessor for field 'foreign' clashes with related m2m field 'Target.clash1_set'. Add a related_name argument to the definition for 'foreign'. invalid_models.clash1: Reverse query name for field 'foreign' clashes with field 'Target.clash1'. Add a related_name argument to the definition for 'foreign'. invalid_models.clash1: Accessor for m2m field 'm2m' clashes with field 'Target.clash1_set'. Add a related_name argument to the definition for 'm2m'. invalid_models.clash1: Accessor for m2m field 'm2m' clashes with related field 'Target.clash1_set'. Add a related_name argument to the definition for 'm2m'. invalid_models.clash1: Reverse query name for m2m field 'm2m' clashes with field 'Target.clash1'. Add a related_name argument to the definition for 'm2m'. invalid_models.clash2: Accessor for field 'foreign_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'. invalid_models.clash2: Accessor for field 'foreign_1' clashes with related m2m field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'. invalid_models.clash2: Reverse query name for field 'foreign_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'. invalid_models.clash2: Reverse query name for field 'foreign_1' clashes with related m2m field 'Target.id'. Add a related_name argument to the definition for 'foreign_1'. invalid_models.clash2: Accessor for field 'foreign_2' clashes with related m2m field 'Target.src_safe'. Add a related_name argument to the definition for 'foreign_2'. invalid_models.clash2: Reverse query name for field 'foreign_2' clashes with related m2m field 'Target.src_safe'. Add a related_name argument to the definition for 'foreign_2'. invalid_models.clash2: Accessor for m2m field 'm2m_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'. invalid_models.clash2: Accessor for m2m field 'm2m_1' clashes with related field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'. invalid_models.clash2: Reverse query name for m2m field 'm2m_1' clashes with field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'. invalid_models.clash2: Reverse query name for m2m field 'm2m_1' clashes with related field 'Target.id'. Add a related_name argument to the definition for 'm2m_1'. invalid_models.clash2: Accessor for m2m field 'm2m_2' clashes with related field 'Target.src_safe'. Add a related_name argument to the definition for 'm2m_2'. invalid_models.clash2: Reverse query name for m2m field 'm2m_2' clashes with related field 'Target.src_safe'. Add a related_name argument to the definition for 'm2m_2'. invalid_models.clash3: Accessor for field 'foreign_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'. invalid_models.clash3: Accessor for field 'foreign_1' clashes with related m2m field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'. invalid_models.clash3: Reverse query name for field 'foreign_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'. invalid_models.clash3: Reverse query name for field 'foreign_1' clashes with related m2m field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'foreign_1'. invalid_models.clash3: Accessor for field 'foreign_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'. invalid_models.clash3: Accessor for field 'foreign_2' clashes with related m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'. invalid_models.clash3: Reverse query name for field 'foreign_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'. invalid_models.clash3: Reverse query name for field 'foreign_2' clashes with related m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'foreign_2'. invalid_models.clash3: Accessor for m2m field 'm2m_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'. invalid_models.clash3: Accessor for m2m field 'm2m_1' clashes with related field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'. invalid_models.clash3: Reverse query name for m2m field 'm2m_1' clashes with field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'. invalid_models.clash3: Reverse query name for m2m field 'm2m_1' clashes with related field 'Target2.foreign_tgt'. Add a related_name argument to the definition for 'm2m_1'. invalid_models.clash3: Accessor for m2m field 'm2m_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'. invalid_models.clash3: Accessor for m2m field 'm2m_2' clashes with related field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'. invalid_models.clash3: Reverse query name for m2m field 'm2m_2' clashes with m2m field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'. invalid_models.clash3: Reverse query name for m2m field 'm2m_2' clashes with related field 'Target2.m2m_tgt'. Add a related_name argument to the definition for 'm2m_2'. invalid_models.clashforeign: Accessor for field 'foreign' clashes with field 'Target2.clashforeign_set'. Add a related_name argument to the definition for 'foreign'. invalid_models.clashm2m: Accessor for m2m field 'm2m' clashes with m2m field 'Target2.clashm2m_set'. Add a related_name argument to the definition for 'm2m'. invalid_models.target2: Accessor for field 'foreign_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'foreign_tgt'. invalid_models.target2: Accessor for field 'foreign_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'foreign_tgt'. invalid_models.target2: Accessor for field 'foreign_tgt' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'foreign_tgt'. invalid_models.target2: Accessor for field 'clashforeign_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashforeign_set'. invalid_models.target2: Accessor for field 'clashforeign_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashforeign_set'. invalid_models.target2: Accessor for field 'clashforeign_set' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'clashforeign_set'. invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'. invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'. invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'. invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'. invalid_models.target2: Accessor for m2m field 'm2m_tgt' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'm2m_tgt'. invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'. invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'. invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'. invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'. invalid_models.target2: Accessor for m2m field 'clashm2m_set' clashes with related m2m field 'Target.target2_set'. Add a related_name argument to the definition for 'clashm2m_set'. invalid_models.selfclashforeign: Accessor for field 'selfclashforeign_set' clashes with field 'SelfClashForeign.selfclashforeign_set'. Add a related_name argument to the definition for 'selfclashforeign_set'. invalid_models.selfclashforeign: Reverse query name for field 'selfclashforeign_set' clashes with field 'SelfClashForeign.selfclashforeign'. Add a related_name argument to the definition for 'selfclashforeign_set'. invalid_models.selfclashforeign: Accessor for field 'foreign_1' clashes with field 'SelfClashForeign.id'. Add a related_name argument to the definition for 'foreign_1'. invalid_models.selfclashforeign: Reverse query name for field 'foreign_1' clashes with field 'SelfClashForeign.id'. Add a related_name argument to the definition for 'foreign_1'. invalid_models.selfclashforeign: Accessor for field 'foreign_2' clashes with field 'SelfClashForeign.src_safe'. Add a related_name argument to the definition for 'foreign_2'. invalid_models.selfclashforeign: Reverse query name for field 'foreign_2' clashes with field 'SelfClashForeign.src_safe'. Add a related_name argument to the definition for 'foreign_2'. invalid_models.selfclashm2m: Accessor for m2m field 'selfclashm2m_set' clashes with m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'selfclashm2m_set'. invalid_models.selfclashm2m: Reverse query name for m2m field 'selfclashm2m_set' clashes with field 'SelfClashM2M.selfclashm2m'. Add a related_name argument to the definition for 'selfclashm2m_set'. invalid_models.selfclashm2m: Accessor for m2m field 'selfclashm2m_set' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'selfclashm2m_set'. invalid_models.selfclashm2m: Accessor for m2m field 'm2m_1' clashes with field 'SelfClashM2M.id'. Add a related_name argument to the definition for 'm2m_1'. invalid_models.selfclashm2m: Accessor for m2m field 'm2m_2' clashes with field 'SelfClashM2M.src_safe'. Add a related_name argument to the definition for 'm2m_2'. invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_1' clashes with field 'SelfClashM2M.id'. Add a related_name argument to the definition for 'm2m_1'. invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_2' clashes with field 'SelfClashM2M.src_safe'. Add a related_name argument to the definition for 'm2m_2'. invalid_models.selfclashm2m: Accessor for m2m field 'm2m_3' clashes with m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_3'. invalid_models.selfclashm2m: Accessor for m2m field 'm2m_3' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_3'. invalid_models.selfclashm2m: Accessor for m2m field 'm2m_3' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_3'. invalid_models.selfclashm2m: Accessor for m2m field 'm2m_4' clashes with m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_4'. invalid_models.selfclashm2m: Accessor for m2m field 'm2m_4' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_4'. invalid_models.selfclashm2m: Accessor for m2m field 'm2m_4' clashes with related m2m field 'SelfClashM2M.selfclashm2m_set'. Add a related_name argument to the definition for 'm2m_4'. invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_3' clashes with field 'SelfClashM2M.selfclashm2m'. Add a related_name argument to the definition for 'm2m_3'. invalid_models.selfclashm2m: Reverse query name for m2m field 'm2m_4' clashes with field 'SelfClashM2M.selfclashm2m'. Add a related_name argument to the definition for 'm2m_4'. invalid_models.missingrelations: 'rel1' has a relation with model Rel1, which has either not been installed or is abstract. invalid_models.missingrelations: 'rel2' has an m2m relation with model Rel2, which has either not been installed or is abstract. invalid_models.grouptwo: 'primary' is a manually-defined m2m relation through model Membership, which does not have foreign keys to Person and GroupTwo invalid_models.grouptwo: 'secondary' is a manually-defined m2m relation through model MembershipMissingFK, which does not have foreign keys to Group and GroupTwo invalid_models.missingmanualm2mmodel: 'missing_m2m' specifies an m2m relation through model MissingM2MModel, which has not been installed invalid_models.group: The model Group has two manually-defined m2m relations through the model Membership, which is not permitted. Please consider using an extra field on your intermediary model instead. invalid_models.group: Intermediary model RelationshipDoubleFK has more than one foreign key to Person, which is ambiguous and is not permitted. invalid_models.personselfrefm2m: Many-to-many fields with intermediate tables cannot be symmetrical. invalid_models.personselfrefm2m: Intermediary model RelationshipTripleFK has more than two foreign keys to PersonSelfRefM2M, which is ambiguous and is not permitted. invalid_models.personselfrefm2mexplicit: Many-to-many fields with intermediate tables cannot be symmetrical. invalid_models.abstractrelationmodel: 'fk1' has a relation with model AbstractModel, which has either not been installed or is abstract. invalid_models.abstractrelationmodel: 'fk2' has an m2m relation with model AbstractModel, which has either not been installed or is abstract. invalid_models.uniquem2m: ManyToManyFields cannot be unique. Remove the unique argument on 'unique_people'. invalid_models.nonuniquefktarget1: Field 'bad' under model 'FKTarget' must have a unique=True constraint. invalid_models.nonuniquefktarget2: Field 'bad' under model 'FKTarget' must have a unique=True constraint. """
apache-2.0
Arafatk/sympy
sympy/liealgebras/type_a.py
76
4403
from __future__ import print_function, division from sympy.core.compatibility import range from sympy.liealgebras.cartan_type import Standard_Cartan from sympy.matrices import eye class TypeA(Standard_Cartan): """ This class contains the information about the A series of simple Lie algebras. ==== """ def __new__(cls, n): if n < 1: raise ValueError("n can not be less than 1") return Standard_Cartan.__new__(cls, "A", n) def dimension(self): """Dimension of the vector space V underlying the Lie algebra Examples ======== >>> from sympy.liealgebras.cartan_type import CartanType >>> c = CartanType("A4") >>> c.dimension() 5 """ return self.n+1 def basic_root(self, i, j): """ This is a method just to generate roots with a 1 iin the ith position and a -1 in the jth postion. """ n = self.n root = [0]*(n+1) root[i] = 1 root[j] = -1 return root def simple_root(self, i): """ Every lie algebra has a unique root system. Given a root system Q, there is a subset of the roots such that an element of Q is called a simple root if it cannot be written as the sum of two elements in Q. If we let D denote the set of simple roots, then it is clear that every element of Q can be written as a linear combination of elements of D with all coefficients non-negative. In A_n the ith simple root is the root which has a 1 in the ith position, a -1 in the (i+1)th position, and zeroes elsewhere. This method returns the ith simple root for the A series. Examples ======== >>> from sympy.liealgebras.cartan_type import CartanType >>> c = CartanType("A4") >>> c.simple_root(1) [1, -1, 0, 0, 0] """ return self.basic_root(i-1, i) def positive_roots(self): """ This method generates all the positive roots of A_n. This is half of all of the roots of A_n; by multiplying all the positive roots by -1 we get the negative roots. Examples ======== >>> from sympy.liealgebras.cartan_type import CartanType >>> c = CartanType("A3") >>> c.positive_roots() {1: [1, -1, 0, 0], 2: [1, 0, -1, 0], 3: [1, 0, 0, -1], 4: [0, 1, -1, 0], 5: [0, 1, 0, -1], 6: [0, 0, 1, -1]} """ n = self.n posroots = {} k = 0 for i in range(0, n): for j in range(i+1, n+1): k += 1 posroots[k] = self.basic_root(i, j) return posroots def highest_root(self): """ Returns the heighest weight root for A_n """ return self.basic_root(0, self.n) def roots(self): """ Returns the total number of roots for A_n """ n = self.n return n*(n+1) def cartan_matrix(self): """ Returns the Cartan matrix for A_n. The Cartan matrix matrix for a Lie algebra is generated by assigning an ordering to the simple roots, (alpha[1], ...., alpha[l]). Then the ijth entry of the Cartan matrix is (<alpha[i],alpha[j]>). Examples ======== >>> from sympy.liealgebras.cartan_type import CartanType >>> c = CartanType('A4') >>> c.cartan_matrix() Matrix([ [ 2, -1, 0, 0], [-1, 2, -1, 0], [ 0, -1, 2, -1], [ 0, 0, -1, 2]]) """ n = self.n m = 2 * eye(n) i = 1 while i < n-1: m[i, i+1] = -1 m[i, i-1] = -1 i += 1 m[0,1] = -1 m[n-1, n-2] = -1 return m def basis(self): """ Returns the number of independent generators of A_n """ n = self.n return n**2 - 1 def lie_algebra(self): """ Returns the Lie algebra associated with A_n """ n = self.n return "su(" + str(n + 1) + ")" def dynkin_diagram(self): n = self.n diag = "---".join("0" for i in range(1, n+1)) + "\n" diag += " ".join(str(i) for i in range(1, n+1)) return diag
bsd-3-clause
proversity-org/edx-platform
lms/djangoapps/bulk_reset_attempts/tests/test_views.py
1
6441
""" Tests for the Bulk Enrollment views. """ import ddt import json from django.core.urlresolvers import reverse from rest_framework.test import \ APIRequestFactory, APITestCase, force_authenticate from bulk_reset_attempts.serializers import BulkResetStudentAttemptsSerializer from bulk_reset_attempts.views import BulkResetStudentAttemptsView from student.tests.factories import UserFactory from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory @ddt.ddt class BulkResetStudentAttemptsViewTest(SharedModuleStoreTestCase, APITestCase): """ Test the bulk reset attempts endpoint """ USERNAME = "Bob" EMAIL = "bob@example.com" PASSWORD = "edx" @classmethod def setUpClass(cls): super(BulkResetStudentAttemptsViewTest, cls).setUpClass() cls.course =\ CourseFactory.create( display_name='Read My Mind', run="2017", org="KIL", number="KIL001" ) def setUp(self): super(BulkResetStudentAttemptsViewTest, self).setUp() self.view = BulkResetStudentAttemptsView.as_view() self.request_factory = APIRequestFactory() self.url = reverse('bulk_reset_student_attempts') self.staff = UserFactory.create( username=self.USERNAME, email=self.EMAIL, password=self.PASSWORD, is_staff=True, ) def request_bulk_reset_attempts(self, data=None, use_json=False, **extra): """ Make an authenticated request to the bulk enrollment API. """ content_type = None if use_json: content_type = 'application/json' data = json.dumps(data) request = self.request_factory.post( self.url, data=data, content_type=content_type, **extra ) force_authenticate(request, user=self.staff) response = self.view(request) response.render() return response @ddt.data( (dict( course_id="course-v1:KIL+KIL001+2017", identifiers="staff", problems="") ), (dict( course_id="course-v1:KIL+KIL001+2017", identifiers="staff", problems="problem_id") ), (dict( course_id="course-v1:KIL+KIL001+2017", identifiers="", problems="problem_id") ), (dict( course_id="course-v1:KIL+KIL001+2017", identifiers="staff", problems="", email_extension="@example.com") ), (dict( course_id="course-v1:KIL+KIL001+2017", identifiers="", problems="problem_id", email_extension="@example.com") ), (dict( course_id="course-v1:KIL+KIL001+2017", identifiers="staff", problems="problem_id", email_extension="@example.com") ), ) def test_serializer(self, data): """ Test that passing valid data the serializer works. """ serializer = BulkResetStudentAttemptsSerializer(data=data) self.assertTrue(serializer.is_valid()) def test_non_staff(self): """ Test that non global staff users are forbidden from API use. """ self.staff.is_staff = False self.staff.save() response = self.request_bulk_reset_attempts() self.assertEqual(response.status_code, 403) @ddt.data( ( dict(), dict( course_id=["This field is required."], identifiers=["This field is required."], problems=["This field is required."] ) ), ( dict(identifiers="", problems=""), dict(course_id=["This field is required."]) ), ( dict(course_id="course-v1:KIL+KIL001+2017", problems=""), dict(identifiers=["This field is required."]) ), ( dict(course_id="course-v1:KIL+KIL001+2017", identifiers=""), dict(problems=["This field is required."]) ), ) @ddt.unpack def test_missing_data(self, data, error): """ Test that we are not passing the required fields. """ response = self.request_bulk_reset_attempts(data, use_json=True) self.assertEqual(response.status_code, 400) self.assertEqual(json.loads(response.content), error) @ddt.data( ( dict( course_id="course-v1:edX", identifiers="", problems="" ), dict(error="Invalid course id: 'course-v1:edX'") ), ) @ddt.unpack def test_wrong_course_id(self, data, error): """ Test that we are passing an invalid course id. """ response = self.request_bulk_reset_attempts(data, use_json=True) self.assertEqual(response.status_code, 400) self.assertEqual(json.loads(response.content), error) @ddt.data( ( dict( course_id="course-v1:KIL+KIL001+2017", identifiers="user1, user2, user3", problems="" ), dict( user1="User does not exist", user2="User does not exist", user3="User does not exist" ) ), ) @ddt.unpack def test_user_does_not_exist(self, data, error): """ Test that we are passing users that don't exists. """ response = self.request_bulk_reset_attempts(data, use_json=True) self.assertEqual(response.status_code, 200) self.assertEqual(json.loads(response.content)["errors"], error) @ddt.data( ( dict( course_id="course-v1:KIL+KIL001+2017", identifiers="Bob", problems="problem_id" ), dict(problem_id="The problem does not exist") ), ) @ddt.unpack def test_problem_does_not_exist(self, data, error): """ Test that we are passing problems that don't exists. """ response = self.request_bulk_reset_attempts(data, use_json=True) self.assertEqual(response.status_code, 200) self.assertEqual(json.loads(response.content)["errors"], error)
agpl-3.0
atopuzov/nitro-python
nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnsessionpolicy_binding.py
3
4362
# # Copyright (c) 2008-2015 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response from nssrc.com.citrix.netscaler.nitro.service.options import options from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util class vpnsessionpolicy_binding(base_resource): """ Binding class showing the resources that can be bound to vpnsessionpolicy_binding. """ def __init__(self) : self._name = "" self.vpnsessionpolicy_aaauser_binding = [] self.vpnsessionpolicy_vpnglobal_binding = [] self.vpnsessionpolicy_vpnvserver_binding = [] self.vpnsessionpolicy_aaagroup_binding = [] @property def name(self) : ur"""Name of the session policy to display.<br/>Minimum length = 1. """ try : return self._name except Exception as e: raise e @name.setter def name(self, name) : ur"""Name of the session policy to display.<br/>Minimum length = 1 """ try : self._name = name except Exception as e: raise e @property def vpnsessionpolicy_aaauser_bindings(self) : ur"""aaauser that can be bound to vpnsessionpolicy. """ try : return self._vpnsessionpolicy_aaauser_binding except Exception as e: raise e @property def vpnsessionpolicy_vpnglobal_bindings(self) : ur"""vpnglobal that can be bound to vpnsessionpolicy. """ try : return self._vpnsessionpolicy_vpnglobal_binding except Exception as e: raise e @property def vpnsessionpolicy_vpnvserver_bindings(self) : ur"""vpnvserver that can be bound to vpnsessionpolicy. """ try : return self._vpnsessionpolicy_vpnvserver_binding except Exception as e: raise e @property def vpnsessionpolicy_aaagroup_bindings(self) : ur"""aaagroup that can be bound to vpnsessionpolicy. """ try : return self._vpnsessionpolicy_aaagroup_binding except Exception as e: raise e def _get_nitro_response(self, service, response) : ur""" converts nitro response into object and returns the object array in case of get request. """ try : result = service.payload_formatter.string_to_resource(vpnsessionpolicy_binding_response, response, self.__class__.__name__) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == "ERROR") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.vpnsessionpolicy_binding except Exception as e : raise e def _get_object_name(self) : ur""" Returns the value of object identifier argument """ try : if self.name is not None : return str(self.name) return None except Exception as e : raise e @classmethod def get(self, service, name) : ur""" Use this API to fetch vpnsessionpolicy_binding resource. """ try : if type(name) is not list : obj = vpnsessionpolicy_binding() obj.name = name response = obj.get_resource(service) else : if name and len(name) > 0 : obj = [vpnsessionpolicy_binding() for _ in range(len(name))] for i in range(len(name)) : obj[i].name = name[i]; response[i] = obj[i].get_resource(service) return response except Exception as e: raise e class vpnsessionpolicy_binding_response(base_response) : def __init__(self, length=1) : self.vpnsessionpolicy_binding = [] self.errorcode = 0 self.message = "" self.severity = "" self.sessionid = "" self.vpnsessionpolicy_binding = [vpnsessionpolicy_binding() for _ in range(length)]
apache-2.0
Averroes/statsmodels
statsmodels/genmod/families/varfuncs.py
38
5845
""" Variance functions for use with the link functions in statsmodels.family.links """ __docformat__ = 'restructuredtext' import numpy as np FLOAT_EPS = np.finfo(float).eps class VarianceFunction(object): """ Relates the variance of a random variable to its mean. Defaults to 1. Methods ------- call Returns an array of ones that is the same shape as `mu` Notes ----- After a variance function is initialized, its call method can be used. Alias for VarianceFunction: constant = VarianceFunction() See also -------- statsmodels.family.family """ def __call__(self, mu): """ Default variance function Parameters ----------- mu : array-like mean parameters Returns ------- v : array ones(mu.shape) """ mu = np.asarray(mu) return np.ones(mu.shape, np.float64) def deriv(self, mu): """ Derivative of the variance function v'(mu) """ from statsmodels.tools.numdiff import approx_fprime_cs # TODO: diag workaround proplem with numdiff for 1d return np.diag(approx_fprime_cs(mu, self)) constant = VarianceFunction() constant.__doc__ = """ The call method of constant returns a constant variance, i.e., a vector of ones. constant is an alias of VarianceFunction() """ class Power(object): """ Power variance function Parameters ---------- power : float exponent used in power variance function Methods ------- call Returns the power variance Formulas -------- V(mu) = numpy.fabs(mu)**power Notes ----- Aliases for Power: mu = Power() mu_squared = Power(power=2) mu_cubed = Power(power=3) """ def __init__(self, power=1.): self.power = power def __call__(self, mu): """ Power variance function Parameters ---------- mu : array-like mean parameters Returns ------- variance : array numpy.fabs(mu)**self.power """ return np.power(np.fabs(mu), self.power) def deriv(self, mu): """ Derivative of the variance function v'(mu) """ from statsmodels.tools.numdiff import approx_fprime_cs, approx_fprime #return approx_fprime_cs(mu, self) # TODO fix breaks in `fabs # TODO: diag is workaround problem with numdiff for 1d return np.diag(approx_fprime(mu, self)) mu = Power() mu.__doc__ = """ Returns np.fabs(mu) Notes ----- This is an alias of Power() """ mu_squared = Power(power=2) mu_squared.__doc__ = """ Returns np.fabs(mu)**2 Notes ----- This is an alias of statsmodels.family.links.Power(power=2) """ mu_cubed = Power(power=3) mu_cubed.__doc__ = """ Returns np.fabs(mu)**3 Notes ----- This is an alias of statsmodels.family.links.Power(power=3) """ class Binomial(object): """ Binomial variance function Parameters ---------- n : int, optional The number of trials for a binomial variable. The default is 1 for p in (0,1) Methods ------- call Returns the binomial variance Formulas -------- V(mu) = p * (1 - p) * n where p = mu / n Notes ----- Alias for Binomial: binary = Binomial() A private method _clean trims the data by machine epsilon so that p is in (0,1) """ def __init__(self, n=1): self.n = n def _clean(self, p): return np.clip(p, FLOAT_EPS, 1 - FLOAT_EPS) def __call__(self, mu): """ Binomial variance function Parameters ----------- mu : array-like mean parameters Returns ------- variance : array variance = mu/n * (1 - mu/n) * self.n """ p = self._clean(mu / self.n) return p * (1 - p) * self.n #TODO: inherit from super def deriv(self, mu): """ Derivative of the variance function v'(mu) """ from statsmodels.tools.numdiff import approx_fprime_cs, approx_fprime # TODO: diag workaround proplem with numdiff for 1d return np.diag(approx_fprime_cs(mu, self)) binary = Binomial() binary.__doc__ = """ The binomial variance function for n = 1 Notes ----- This is an alias of Binomial(n=1) """ class NegativeBinomial(object): ''' Negative binomial variance function Parameters ---------- alpha : float The ancillary parameter for the negative binomial variance function. `alpha` is assumed to be nonstochastic. The default is 1. Methods ------- call Returns the negative binomial variance Formulas -------- V(mu) = mu + alpha*mu**2 Notes ----- Alias for NegativeBinomial: nbinom = NegativeBinomial() A private method _clean trims the data by machine epsilon so that p is in (0,inf) ''' def __init__(self, alpha=1.): self.alpha = alpha def _clean(self, p): return np.clip(p, FLOAT_EPS, np.inf) def __call__(self, mu): """ Negative binomial variance function Parameters ---------- mu : array-like mean parameters Returns ------- variance : array variance = mu + alpha*mu**2 """ p = self._clean(mu) return p + self.alpha*p**2 def deriv(self, mu): """ Derivative of the negative binomial variance function. """ p = self._clean(mu) return 1 + 2 * self.alpha * p nbinom = NegativeBinomial() nbinom.__doc__ = """ Negative Binomial variance function. Notes ----- This is an alias of NegativeBinomial(alpha=1.) """
bsd-3-clause
undefx/delphi-epidata
src/acquisition/fluview/fluview_notify.py
2
1963
""" =============== === Purpose === =============== Checks the `fluview` table to see if new data is available. If so, it queues the automation flow 'New FluView Available'. This will cause, at least: - impute state ILI - predict next issue wILI (sar3 and arch) - score the last epicast round - send epicast email notifications ================= === Changelog === ================= 2017-02-22 * use secrets 2016-04-07 + Initial version """ # standard library import argparse # third party import mysql.connector # first party import delphi.operations.secrets as secrets if __name__ == '__main__': # Args and usage parser = argparse.ArgumentParser() parser.add_argument('-t', '--test', action='store_const', const=True, default=False, help="do dry run only, don't update the database") args = parser.parse_args() # connect u, p = secrets.db.epi cnx = mysql.connector.connect(user=u, password=p, database='epidata') cur = cnx.cursor() # get the last known issue from the automation table `variables` cur.execute('SELECT `value` FROM automation.`variables` WHERE `name` = %s', ('most_recent_issue',)) for (issue1,) in cur: issue1 = int(issue1) print('last known issue:', issue1) # get the most recent issue from the epidata table `fluview` cur.execute('SELECT max(`issue`) FROM `fluview`') for (issue2,) in cur: issue2 = int(issue2) print('most recent issue:', issue2) if issue2 > issue1: print('new data is available!') if args.test: print('test mode - not making any changes') else: # update the variable cur.execute('UPDATE automation.`variables` SET `value` = %s WHERE `name` = %s', (issue2, 'most_recent_issue')) # queue the 'New FluView Available' flow cur.execute('CALL automation.RunStep(36)') elif issue2 < issue2: raise Exception('most recent issue is older than the last known issue') # cleanup cnx.commit() cur.close() cnx.close()
mit
sumitsourabh/opencog
opencog/python/pln/examples/temporal/composition_agent.py
31
1096
""" A MindAgent to test the application of the temporal rules, specifically the composition of Allen Interval relations """ from opencog.cogserver import MindAgent from pln.chainers import Chainer from pln.rules.temporal_rules import create_composition_rules __author__ = 'Sebastian Ruder' composition_table = "composition_table.txt" class CompositionAgent(MindAgent): def __init__(self): self.chainer = None def create_chainer(self, atomspace): self.chainer = Chainer(atomspace, stimulateAtoms=False, preferAttentionalFocus=False, allow_output_with_variables=True, delete_temporary_variables=True) for rule in create_composition_rules(self.chainer, composition_table): self.chainer.add_rule(rule) print(rule.name) def run(self, atomspace): if self.chainer is None: self.create_chainer(atomspace) return result = self.chainer.forward_step() return result
agpl-3.0
listen-lavender/pholcus
gdc/task/news/toutiao.py
1
2955
#!/usr/bin/env python # coding=utf-8 from datetime import * from webcrawl.character import * from webcrawl.task import * from webcrawl.urlkit import URLParse from newsspider import * from webcrawl import request #_print, logger = logprint(modulename(__file__), modulepath(__file__)) class SpiderToutiao(SpiderNewsOrigin): """ 今日头条 数据爬虫 """ def __init__(self, worknum=6, queuetype='P', worktype='COROUTINE', timeout=-1, tid=0, settings={}, callback=None): super(SpiderToutiao, self).__init__(worknum=worknum, queuetype=queuetype, worktype=worktype, timeout=timeout, tid=tid, settings=settings, callback=callback) self.clsname = self.__class__.__name__ self.tid = tid @store(withData(datacfg.W), Data.insert, update=True, method='MANY') @index('url') @timelimit(20) @initflow('www') def fetchList(self, url, additions={}, timeout=TIMEOUT, implementor=None): result = request.get(url, timeout=timeout, format='JSON') news = result['data'] if not 'next' in result: nextpage = None else: urlobj, params = URLParse.decode(url) if str(params['max_behot_time']) == str(result['next']['max_behot_time']): nextpage = None else: params['max_behot_time'] = result['next']['max_behot_time'] nextpage = URLParse.encode(urlobj, params) nextpage = None yield nextpage for one in news: name = one['title'] if 'image_url' in one: icon = one['image_url'] elif 'image_list' in one and one['image_list']: icon = one['image_list'][0]['url'] else: icon = '' detail_link = one['display_url'] desc = one['source'] + ',' + one['abstract'] src = '今日头条' category = additions.get('category', '') # '财经' atime = datetime.strptime(one['datetime'], '%Y-%m-%d %H:%M') create_time = datetime.now() update_time = datetime.now() data = {"name":name, "icon":icon, "detail_link":detail_link, "desc":desc, "src":src, "category":category, 'group':'text', 'content':'', "atime":atime, "create_time":create_time, "update_time":update_time, 'tid':self.tid} yield Data(**data) if __name__ == '__main__': import time spider = SpiderToutiao(worknum=2, queuetype='P', worktype='THREAD') spider.fetchDatas('www', 0, datetime.datetime.now().strftime('%Y%m%d'), 'http://www.toutiao.com/api/article/recent/?source=2&count=2&category=news_finance&max_behot_time=%s&utm_source=toutiao&offset=0&max_create_time=1464674041&_=1464682558376' % str(int(time.time())))
mit
rdblue/Impala
thirdparty/gtest-1.6.0/scripts/pump.py
603
23316
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """pump v0.2.0 - Pretty Useful for Meta Programming. A tool for preprocessor meta programming. Useful for generating repetitive boilerplate code. Especially useful for writing C++ classes, functions, macros, and templates that need to work with various number of arguments. USAGE: pump.py SOURCE_FILE EXAMPLES: pump.py foo.cc.pump Converts foo.cc.pump to foo.cc. GRAMMAR: CODE ::= ATOMIC_CODE* ATOMIC_CODE ::= $var ID = EXPRESSION | $var ID = [[ CODE ]] | $range ID EXPRESSION..EXPRESSION | $for ID SEPARATOR [[ CODE ]] | $($) | $ID | $(EXPRESSION) | $if EXPRESSION [[ CODE ]] ELSE_BRANCH | [[ CODE ]] | RAW_CODE SEPARATOR ::= RAW_CODE | EMPTY ELSE_BRANCH ::= $else [[ CODE ]] | $elif EXPRESSION [[ CODE ]] ELSE_BRANCH | EMPTY EXPRESSION has Python syntax. """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import re import sys TOKEN_TABLE = [ (re.compile(r'\$var\s+'), '$var'), (re.compile(r'\$elif\s+'), '$elif'), (re.compile(r'\$else\s+'), '$else'), (re.compile(r'\$for\s+'), '$for'), (re.compile(r'\$if\s+'), '$if'), (re.compile(r'\$range\s+'), '$range'), (re.compile(r'\$[_A-Za-z]\w*'), '$id'), (re.compile(r'\$\(\$\)'), '$($)'), (re.compile(r'\$'), '$'), (re.compile(r'\[\[\n?'), '[['), (re.compile(r'\]\]\n?'), ']]'), ] class Cursor: """Represents a position (line and column) in a text file.""" def __init__(self, line=-1, column=-1): self.line = line self.column = column def __eq__(self, rhs): return self.line == rhs.line and self.column == rhs.column def __ne__(self, rhs): return not self == rhs def __lt__(self, rhs): return self.line < rhs.line or ( self.line == rhs.line and self.column < rhs.column) def __le__(self, rhs): return self < rhs or self == rhs def __gt__(self, rhs): return rhs < self def __ge__(self, rhs): return rhs <= self def __str__(self): if self == Eof(): return 'EOF' else: return '%s(%s)' % (self.line + 1, self.column) def __add__(self, offset): return Cursor(self.line, self.column + offset) def __sub__(self, offset): return Cursor(self.line, self.column - offset) def Clone(self): """Returns a copy of self.""" return Cursor(self.line, self.column) # Special cursor to indicate the end-of-file. def Eof(): """Returns the special cursor to denote the end-of-file.""" return Cursor(-1, -1) class Token: """Represents a token in a Pump source file.""" def __init__(self, start=None, end=None, value=None, token_type=None): if start is None: self.start = Eof() else: self.start = start if end is None: self.end = Eof() else: self.end = end self.value = value self.token_type = token_type def __str__(self): return 'Token @%s: \'%s\' type=%s' % ( self.start, self.value, self.token_type) def Clone(self): """Returns a copy of self.""" return Token(self.start.Clone(), self.end.Clone(), self.value, self.token_type) def StartsWith(lines, pos, string): """Returns True iff the given position in lines starts with 'string'.""" return lines[pos.line][pos.column:].startswith(string) def FindFirstInLine(line, token_table): best_match_start = -1 for (regex, token_type) in token_table: m = regex.search(line) if m: # We found regex in lines if best_match_start < 0 or m.start() < best_match_start: best_match_start = m.start() best_match_length = m.end() - m.start() best_match_token_type = token_type if best_match_start < 0: return None return (best_match_start, best_match_length, best_match_token_type) def FindFirst(lines, token_table, cursor): """Finds the first occurrence of any string in strings in lines.""" start = cursor.Clone() cur_line_number = cursor.line for line in lines[start.line:]: if cur_line_number == start.line: line = line[start.column:] m = FindFirstInLine(line, token_table) if m: # We found a regex in line. (start_column, length, token_type) = m if cur_line_number == start.line: start_column += start.column found_start = Cursor(cur_line_number, start_column) found_end = found_start + length return MakeToken(lines, found_start, found_end, token_type) cur_line_number += 1 # We failed to find str in lines return None def SubString(lines, start, end): """Returns a substring in lines.""" if end == Eof(): end = Cursor(len(lines) - 1, len(lines[-1])) if start >= end: return '' if start.line == end.line: return lines[start.line][start.column:end.column] result_lines = ([lines[start.line][start.column:]] + lines[start.line + 1:end.line] + [lines[end.line][:end.column]]) return ''.join(result_lines) def StripMetaComments(str): """Strip meta comments from each line in the given string.""" # First, completely remove lines containing nothing but a meta # comment, including the trailing \n. str = re.sub(r'^\s*\$\$.*\n', '', str) # Then, remove meta comments from contentful lines. return re.sub(r'\s*\$\$.*', '', str) def MakeToken(lines, start, end, token_type): """Creates a new instance of Token.""" return Token(start, end, SubString(lines, start, end), token_type) def ParseToken(lines, pos, regex, token_type): line = lines[pos.line][pos.column:] m = regex.search(line) if m and not m.start(): return MakeToken(lines, pos, pos + m.end(), token_type) else: print 'ERROR: %s expected at %s.' % (token_type, pos) sys.exit(1) ID_REGEX = re.compile(r'[_A-Za-z]\w*') EQ_REGEX = re.compile(r'=') REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)') OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*') WHITE_SPACE_REGEX = re.compile(r'\s') DOT_DOT_REGEX = re.compile(r'\.\.') def Skip(lines, pos, regex): line = lines[pos.line][pos.column:] m = re.search(regex, line) if m and not m.start(): return pos + m.end() else: return pos def SkipUntil(lines, pos, regex, token_type): line = lines[pos.line][pos.column:] m = re.search(regex, line) if m: return pos + m.start() else: print ('ERROR: %s expected on line %s after column %s.' % (token_type, pos.line + 1, pos.column)) sys.exit(1) def ParseExpTokenInParens(lines, pos): def ParseInParens(pos): pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX) pos = Skip(lines, pos, r'\(') pos = Parse(pos) pos = Skip(lines, pos, r'\)') return pos def Parse(pos): pos = SkipUntil(lines, pos, r'\(|\)', ')') if SubString(lines, pos, pos + 1) == '(': pos = Parse(pos + 1) pos = Skip(lines, pos, r'\)') return Parse(pos) else: return pos start = pos.Clone() pos = ParseInParens(pos) return MakeToken(lines, start, pos, 'exp') def RStripNewLineFromToken(token): if token.value.endswith('\n'): return Token(token.start, token.end, token.value[:-1], token.token_type) else: return token def TokenizeLines(lines, pos): while True: found = FindFirst(lines, TOKEN_TABLE, pos) if not found: yield MakeToken(lines, pos, Eof(), 'code') return if found.start == pos: prev_token = None prev_token_rstripped = None else: prev_token = MakeToken(lines, pos, found.start, 'code') prev_token_rstripped = RStripNewLineFromToken(prev_token) if found.token_type == '$var': if prev_token_rstripped: yield prev_token_rstripped yield found id_token = ParseToken(lines, found.end, ID_REGEX, 'id') yield id_token pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX) eq_token = ParseToken(lines, pos, EQ_REGEX, '=') yield eq_token pos = Skip(lines, eq_token.end, r'\s*') if SubString(lines, pos, pos + 2) != '[[': exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp') yield exp_token pos = Cursor(exp_token.end.line + 1, 0) elif found.token_type == '$for': if prev_token_rstripped: yield prev_token_rstripped yield found id_token = ParseToken(lines, found.end, ID_REGEX, 'id') yield id_token pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX) elif found.token_type == '$range': if prev_token_rstripped: yield prev_token_rstripped yield found id_token = ParseToken(lines, found.end, ID_REGEX, 'id') yield id_token pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX) dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..') yield MakeToken(lines, pos, dots_pos, 'exp') yield MakeToken(lines, dots_pos, dots_pos + 2, '..') pos = dots_pos + 2 new_pos = Cursor(pos.line + 1, 0) yield MakeToken(lines, pos, new_pos, 'exp') pos = new_pos elif found.token_type == '$': if prev_token: yield prev_token yield found exp_token = ParseExpTokenInParens(lines, found.end) yield exp_token pos = exp_token.end elif (found.token_type == ']]' or found.token_type == '$if' or found.token_type == '$elif' or found.token_type == '$else'): if prev_token_rstripped: yield prev_token_rstripped yield found pos = found.end else: if prev_token: yield prev_token yield found pos = found.end def Tokenize(s): """A generator that yields the tokens in the given string.""" if s != '': lines = s.splitlines(True) for token in TokenizeLines(lines, Cursor(0, 0)): yield token class CodeNode: def __init__(self, atomic_code_list=None): self.atomic_code = atomic_code_list class VarNode: def __init__(self, identifier=None, atomic_code=None): self.identifier = identifier self.atomic_code = atomic_code class RangeNode: def __init__(self, identifier=None, exp1=None, exp2=None): self.identifier = identifier self.exp1 = exp1 self.exp2 = exp2 class ForNode: def __init__(self, identifier=None, sep=None, code=None): self.identifier = identifier self.sep = sep self.code = code class ElseNode: def __init__(self, else_branch=None): self.else_branch = else_branch class IfNode: def __init__(self, exp=None, then_branch=None, else_branch=None): self.exp = exp self.then_branch = then_branch self.else_branch = else_branch class RawCodeNode: def __init__(self, token=None): self.raw_code = token class LiteralDollarNode: def __init__(self, token): self.token = token class ExpNode: def __init__(self, token, python_exp): self.token = token self.python_exp = python_exp def PopFront(a_list): head = a_list[0] a_list[:1] = [] return head def PushFront(a_list, elem): a_list[:0] = [elem] def PopToken(a_list, token_type=None): token = PopFront(a_list) if token_type is not None and token.token_type != token_type: print 'ERROR: %s expected at %s' % (token_type, token.start) print 'ERROR: %s found instead' % (token,) sys.exit(1) return token def PeekToken(a_list): if not a_list: return None return a_list[0] def ParseExpNode(token): python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value) return ExpNode(token, python_exp) def ParseElseNode(tokens): def Pop(token_type=None): return PopToken(tokens, token_type) next = PeekToken(tokens) if not next: return None if next.token_type == '$else': Pop('$else') Pop('[[') code_node = ParseCodeNode(tokens) Pop(']]') return code_node elif next.token_type == '$elif': Pop('$elif') exp = Pop('code') Pop('[[') code_node = ParseCodeNode(tokens) Pop(']]') inner_else_node = ParseElseNode(tokens) return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)]) elif not next.value.strip(): Pop('code') return ParseElseNode(tokens) else: return None def ParseAtomicCodeNode(tokens): def Pop(token_type=None): return PopToken(tokens, token_type) head = PopFront(tokens) t = head.token_type if t == 'code': return RawCodeNode(head) elif t == '$var': id_token = Pop('id') Pop('=') next = PeekToken(tokens) if next.token_type == 'exp': exp_token = Pop() return VarNode(id_token, ParseExpNode(exp_token)) Pop('[[') code_node = ParseCodeNode(tokens) Pop(']]') return VarNode(id_token, code_node) elif t == '$for': id_token = Pop('id') next_token = PeekToken(tokens) if next_token.token_type == 'code': sep_token = next_token Pop('code') else: sep_token = None Pop('[[') code_node = ParseCodeNode(tokens) Pop(']]') return ForNode(id_token, sep_token, code_node) elif t == '$if': exp_token = Pop('code') Pop('[[') code_node = ParseCodeNode(tokens) Pop(']]') else_node = ParseElseNode(tokens) return IfNode(ParseExpNode(exp_token), code_node, else_node) elif t == '$range': id_token = Pop('id') exp1_token = Pop('exp') Pop('..') exp2_token = Pop('exp') return RangeNode(id_token, ParseExpNode(exp1_token), ParseExpNode(exp2_token)) elif t == '$id': return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id')) elif t == '$($)': return LiteralDollarNode(head) elif t == '$': exp_token = Pop('exp') return ParseExpNode(exp_token) elif t == '[[': code_node = ParseCodeNode(tokens) Pop(']]') return code_node else: PushFront(tokens, head) return None def ParseCodeNode(tokens): atomic_code_list = [] while True: if not tokens: break atomic_code_node = ParseAtomicCodeNode(tokens) if atomic_code_node: atomic_code_list.append(atomic_code_node) else: break return CodeNode(atomic_code_list) def ParseToAST(pump_src_text): """Convert the given Pump source text into an AST.""" tokens = list(Tokenize(pump_src_text)) code_node = ParseCodeNode(tokens) return code_node class Env: def __init__(self): self.variables = [] self.ranges = [] def Clone(self): clone = Env() clone.variables = self.variables[:] clone.ranges = self.ranges[:] return clone def PushVariable(self, var, value): # If value looks like an int, store it as an int. try: int_value = int(value) if ('%s' % int_value) == value: value = int_value except Exception: pass self.variables[:0] = [(var, value)] def PopVariable(self): self.variables[:1] = [] def PushRange(self, var, lower, upper): self.ranges[:0] = [(var, lower, upper)] def PopRange(self): self.ranges[:1] = [] def GetValue(self, identifier): for (var, value) in self.variables: if identifier == var: return value print 'ERROR: meta variable %s is undefined.' % (identifier,) sys.exit(1) def EvalExp(self, exp): try: result = eval(exp.python_exp) except Exception, e: print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e) print ('ERROR: failed to evaluate meta expression %s at %s' % (exp.python_exp, exp.token.start)) sys.exit(1) return result def GetRange(self, identifier): for (var, lower, upper) in self.ranges: if identifier == var: return (lower, upper) print 'ERROR: range %s is undefined.' % (identifier,) sys.exit(1) class Output: def __init__(self): self.string = '' def GetLastLine(self): index = self.string.rfind('\n') if index < 0: return '' return self.string[index + 1:] def Append(self, s): self.string += s def RunAtomicCode(env, node, output): if isinstance(node, VarNode): identifier = node.identifier.value.strip() result = Output() RunAtomicCode(env.Clone(), node.atomic_code, result) value = result.string env.PushVariable(identifier, value) elif isinstance(node, RangeNode): identifier = node.identifier.value.strip() lower = int(env.EvalExp(node.exp1)) upper = int(env.EvalExp(node.exp2)) env.PushRange(identifier, lower, upper) elif isinstance(node, ForNode): identifier = node.identifier.value.strip() if node.sep is None: sep = '' else: sep = node.sep.value (lower, upper) = env.GetRange(identifier) for i in range(lower, upper + 1): new_env = env.Clone() new_env.PushVariable(identifier, i) RunCode(new_env, node.code, output) if i != upper: output.Append(sep) elif isinstance(node, RawCodeNode): output.Append(node.raw_code.value) elif isinstance(node, IfNode): cond = env.EvalExp(node.exp) if cond: RunCode(env.Clone(), node.then_branch, output) elif node.else_branch is not None: RunCode(env.Clone(), node.else_branch, output) elif isinstance(node, ExpNode): value = env.EvalExp(node) output.Append('%s' % (value,)) elif isinstance(node, LiteralDollarNode): output.Append('$') elif isinstance(node, CodeNode): RunCode(env.Clone(), node, output) else: print 'BAD' print node sys.exit(1) def RunCode(env, code_node, output): for atomic_code in code_node.atomic_code: RunAtomicCode(env, atomic_code, output) def IsComment(cur_line): return '//' in cur_line def IsInPreprocessorDirevative(prev_lines, cur_line): if cur_line.lstrip().startswith('#'): return True return prev_lines != [] and prev_lines[-1].endswith('\\') def WrapComment(line, output): loc = line.find('//') before_comment = line[:loc].rstrip() if before_comment == '': indent = loc else: output.append(before_comment) indent = len(before_comment) - len(before_comment.lstrip()) prefix = indent*' ' + '// ' max_len = 80 - len(prefix) comment = line[loc + 2:].strip() segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != ''] cur_line = '' for seg in segs: if len((cur_line + seg).rstrip()) < max_len: cur_line += seg else: if cur_line.strip() != '': output.append(prefix + cur_line.rstrip()) cur_line = seg.lstrip() if cur_line.strip() != '': output.append(prefix + cur_line.strip()) def WrapCode(line, line_concat, output): indent = len(line) - len(line.lstrip()) prefix = indent*' ' # Prefix of the current line max_len = 80 - indent - len(line_concat) # Maximum length of the current line new_prefix = prefix + 4*' ' # Prefix of a continuation line new_max_len = max_len - 4 # Maximum length of a continuation line # Prefers to wrap a line after a ',' or ';'. segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != ''] cur_line = '' # The current line without leading spaces. for seg in segs: # If the line is still too long, wrap at a space. while cur_line == '' and len(seg.strip()) > max_len: seg = seg.lstrip() split_at = seg.rfind(' ', 0, max_len) output.append(prefix + seg[:split_at].strip() + line_concat) seg = seg[split_at + 1:] prefix = new_prefix max_len = new_max_len if len((cur_line + seg).rstrip()) < max_len: cur_line = (cur_line + seg).lstrip() else: output.append(prefix + cur_line.rstrip() + line_concat) prefix = new_prefix max_len = new_max_len cur_line = seg.lstrip() if cur_line.strip() != '': output.append(prefix + cur_line.strip()) def WrapPreprocessorDirevative(line, output): WrapCode(line, ' \\', output) def WrapPlainCode(line, output): WrapCode(line, '', output) def IsHeaderGuardOrInclude(line): return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or re.match(r'^#include\s', line)) def WrapLongLine(line, output): line = line.rstrip() if len(line) <= 80: output.append(line) elif IsComment(line): if IsHeaderGuardOrInclude(line): # The style guide made an exception to allow long header guard lines # and includes. output.append(line) else: WrapComment(line, output) elif IsInPreprocessorDirevative(output, line): if IsHeaderGuardOrInclude(line): # The style guide made an exception to allow long header guard lines # and includes. output.append(line) else: WrapPreprocessorDirevative(line, output) else: WrapPlainCode(line, output) def BeautifyCode(string): lines = string.splitlines() output = [] for line in lines: WrapLongLine(line, output) output2 = [line.rstrip() for line in output] return '\n'.join(output2) + '\n' def ConvertFromPumpSource(src_text): """Return the text generated from the given Pump source text.""" ast = ParseToAST(StripMetaComments(src_text)) output = Output() RunCode(Env(), ast, output) return BeautifyCode(output.string) def main(argv): if len(argv) == 1: print __doc__ sys.exit(1) file_path = argv[-1] output_str = ConvertFromPumpSource(file(file_path, 'r').read()) if file_path.endswith('.pump'): output_file_path = file_path[:-5] else: output_file_path = '-' if output_file_path == '-': print output_str, else: output_file = file(output_file_path, 'w') output_file.write('// This file was GENERATED by command:\n') output_file.write('// %s %s\n' % (os.path.basename(__file__), os.path.basename(file_path))) output_file.write('// DO NOT EDIT BY HAND!!!\n\n') output_file.write(output_str) output_file.close() if __name__ == '__main__': main(sys.argv)
apache-2.0
timkofu/gitstars
stars/migrations/0001_initial.py
1
1853
# Generated by Django 3.0.8 on 2020-07-04 10:01 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='ProgrammingLanguage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=64)), ], ), migrations.CreateModel( name='Project', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=255)), ('full_name', models.CharField(max_length=255, unique=True)), ('description', models.TextField()), ('url', models.URLField(unique=True)), ('initial_stars', models.IntegerField()), ('current_stars', models.IntegerField(default=0)), ('add_date', models.DateField(auto_now_add=True)), ('notes', models.TextField(blank=True, null=True)), ('language', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='projects', to='stars.ProgrammingLanguage')), ], options={ 'verbose_name_plural': 'Stars', }, ), migrations.AddIndex( model_name='programminglanguage', index=models.Index(fields=['name'], name='stars_progr_name_95f3ba_idx'), ), migrations.AddIndex( model_name='project', index=models.Index(fields=['name', 'full_name', 'description'], name='stars_proje_name_c5f214_idx'), ), ]
mit
cortedeltimo/SickRage
lib/requests/adapters.py
69
20836
# -*- coding: utf-8 -*- """ requests.adapters ~~~~~~~~~~~~~~~~~ This module contains the transport adapters that Requests uses to define and maintain connections. """ import os.path import socket from urllib3.poolmanager import PoolManager, proxy_from_url from urllib3.response import HTTPResponse from urllib3.util import Timeout as TimeoutSauce from urllib3.util.retry import Retry from urllib3.exceptions import ClosedPoolError from urllib3.exceptions import ConnectTimeoutError from urllib3.exceptions import HTTPError as _HTTPError from urllib3.exceptions import MaxRetryError from urllib3.exceptions import NewConnectionError from urllib3.exceptions import ProxyError as _ProxyError from urllib3.exceptions import ProtocolError from urllib3.exceptions import ReadTimeoutError from urllib3.exceptions import SSLError as _SSLError from urllib3.exceptions import ResponseError from .models import Response from .compat import urlparse, basestring from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers, prepend_scheme_if_needed, get_auth_from_url, urldefragauth, select_proxy) from .structures import CaseInsensitiveDict from .cookies import extract_cookies_to_jar from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError, ProxyError, RetryError, InvalidSchema) from .auth import _basic_auth_str try: from urllib3.contrib.socks import SOCKSProxyManager except ImportError: def SOCKSProxyManager(*args, **kwargs): raise InvalidSchema("Missing dependencies for SOCKS support.") DEFAULT_POOLBLOCK = False DEFAULT_POOLSIZE = 10 DEFAULT_RETRIES = 0 DEFAULT_POOL_TIMEOUT = None class BaseAdapter(object): """The Base Transport Adapter""" def __init__(self): super(BaseAdapter, self).__init__() def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. """ raise NotImplementedError def close(self): """Cleans up adapter specific items.""" raise NotImplementedError class HTTPAdapter(BaseAdapter): """The built-in HTTP Adapter for urllib3. Provides a general-case interface for Requests sessions to contact HTTP and HTTPS urls by implementing the Transport Adapter interface. This class will usually be created by the :class:`Session <Session>` class under the covers. :param pool_connections: The number of urllib3 connection pools to cache. :param pool_maxsize: The maximum number of connections to save in the pool. :param max_retries: The maximum number of retries each connection should attempt. Note, this applies only to failed DNS lookups, socket connections and connection timeouts, never to requests where data has made it to the server. By default, Requests does not retry failed connections. If you need granular control over the conditions under which we retry a request, import urllib3's ``Retry`` class and pass that instead. :param pool_block: Whether the connection pool should block for connections. Usage:: >>> import requests >>> s = requests.Session() >>> a = requests.adapters.HTTPAdapter(max_retries=3) >>> s.mount('http://', a) """ __attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize', '_pool_block'] def __init__(self, pool_connections=DEFAULT_POOLSIZE, pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES, pool_block=DEFAULT_POOLBLOCK): if max_retries == DEFAULT_RETRIES: self.max_retries = Retry(0, read=False) else: self.max_retries = Retry.from_int(max_retries) self.config = {} self.proxy_manager = {} super(HTTPAdapter, self).__init__() self._pool_connections = pool_connections self._pool_maxsize = pool_maxsize self._pool_block = pool_block self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block) def __getstate__(self): return dict((attr, getattr(self, attr, None)) for attr in self.__attrs__) def __setstate__(self, state): # Can't handle by adding 'proxy_manager' to self.__attrs__ because # self.poolmanager uses a lambda function, which isn't pickleable. self.proxy_manager = {} self.config = {} for attr, value in state.items(): setattr(self, attr, value) self.init_poolmanager(self._pool_connections, self._pool_maxsize, block=self._pool_block) def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs): """Initializes a urllib3 PoolManager. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param connections: The number of urllib3 connection pools to cache. :param maxsize: The maximum number of connections to save in the pool. :param block: Block when no free connections are available. :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager. """ # save these values for pickling self._pool_connections = connections self._pool_maxsize = maxsize self._pool_block = block self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, block=block, strict=True, **pool_kwargs) def proxy_manager_for(self, proxy, **proxy_kwargs): """Return urllib3 ProxyManager for the given proxy. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param proxy: The proxy to return a urllib3 ProxyManager for. :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager. :returns: ProxyManager :rtype: urllib3.ProxyManager """ if proxy in self.proxy_manager: manager = self.proxy_manager[proxy] elif proxy.lower().startswith('socks'): username, password = get_auth_from_url(proxy) manager = self.proxy_manager[proxy] = SOCKSProxyManager( proxy, username=username, password=password, num_pools=self._pool_connections, maxsize=self._pool_maxsize, block=self._pool_block, **proxy_kwargs ) else: proxy_headers = self.proxy_headers(proxy) manager = self.proxy_manager[proxy] = proxy_from_url( proxy, proxy_headers=proxy_headers, num_pools=self._pool_connections, maxsize=self._pool_maxsize, block=self._pool_block, **proxy_kwargs) return manager def cert_verify(self, conn, url, verify, cert): """Verify a SSL certificate. This method should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param conn: The urllib3 connection object associated with the cert. :param url: The requested URL. :param verify: Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: The SSL certificate to verify. """ if url.lower().startswith('https') and verify: cert_loc = None # Allow self-specified cert location. if verify is not True: cert_loc = verify if not cert_loc: cert_loc = DEFAULT_CA_BUNDLE_PATH if not cert_loc or not os.path.exists(cert_loc): raise IOError("Could not find a suitable TLS CA certificate bundle, " "invalid path: {0}".format(cert_loc)) conn.cert_reqs = 'CERT_REQUIRED' if not os.path.isdir(cert_loc): conn.ca_certs = cert_loc else: conn.ca_cert_dir = cert_loc else: conn.cert_reqs = 'CERT_NONE' conn.ca_certs = None conn.ca_cert_dir = None if cert: if not isinstance(cert, basestring): conn.cert_file = cert[0] conn.key_file = cert[1] else: conn.cert_file = cert conn.key_file = None if conn.cert_file and not os.path.exists(conn.cert_file): raise IOError("Could not find the TLS certificate file, " "invalid path: {0}".format(conn.cert_file)) if conn.key_file and not os.path.exists(conn.key_file): raise IOError("Could not find the TLS key file, " "invalid path: {0}".format(conn.key_file)) def build_response(self, req, resp): """Builds a :class:`Response <requests.Response>` object from a urllib3 response. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>` :param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response. :param resp: The urllib3 response object. :rtype: requests.Response """ response = Response() # Fallback to None if there's no status_code, for whatever reason. response.status_code = getattr(resp, 'status', None) # Make headers case-insensitive. response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {})) # Set encoding. response.encoding = get_encoding_from_headers(response.headers) response.raw = resp response.reason = response.raw.reason if isinstance(req.url, bytes): response.url = req.url.decode('utf-8') else: response.url = req.url # Add new cookies from the server. extract_cookies_to_jar(response.cookies, req, resp) # Give the Response some context. response.request = req response.connection = self return response def get_connection(self, url, proxies=None): """Returns a urllib3 connection for the given URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param url: The URL to connect to. :param proxies: (optional) A Requests-style dictionary of proxies used on this request. :rtype: urllib3.ConnectionPool """ proxy = select_proxy(url, proxies) if proxy: proxy = prepend_scheme_if_needed(proxy, 'http') proxy_manager = self.proxy_manager_for(proxy) conn = proxy_manager.connection_from_url(url) else: # Only scheme should be lower case parsed = urlparse(url) url = parsed.geturl() conn = self.poolmanager.connection_from_url(url) return conn def close(self): """Disposes of any internal state. Currently, this closes the PoolManager and any active ProxyManager, which closes any pooled connections. """ self.poolmanager.clear() for proxy in self.proxy_manager.values(): proxy.clear() def request_url(self, request, proxies): """Obtain the url to use when making the final request. If the message is being sent through a HTTP proxy, the full URL has to be used. Otherwise, we should only use the path portion of the URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs. :rtype: str """ proxy = select_proxy(request.url, proxies) scheme = urlparse(request.url).scheme is_proxied_http_request = (proxy and scheme != 'https') using_socks_proxy = False if proxy: proxy_scheme = urlparse(proxy).scheme.lower() using_socks_proxy = proxy_scheme.startswith('socks') url = request.path_url if is_proxied_http_request and not using_socks_proxy: url = urldefragauth(request.url) return url def add_headers(self, request, **kwargs): """Add any headers needed by the connection. As of v2.0 this does nothing by default, but is left for overriding by users that subclass the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to. :param kwargs: The keyword arguments from the call to send(). """ pass def proxy_headers(self, proxy): """Returns a dictionary of the headers to add to any request sent through a proxy. This works with urllib3 magic to ensure that they are correctly sent to the proxy, rather than in a tunnelled request if CONNECT is being used. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param proxies: The url of the proxy being used for this request. :rtype: dict """ headers = {} username, password = get_auth_from_url(proxy) if username: headers['Proxy-Authorization'] = _basic_auth_str(username, password) return headers def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): """Sends PreparedRequest object. Returns Response object. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple or urllib3 Timeout object :param verify: (optional) Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. :rtype: requests.Response """ conn = self.get_connection(request.url, proxies) self.cert_verify(conn, request.url, verify, cert) url = self.request_url(request, proxies) self.add_headers(request) chunked = not (request.body is None or 'Content-Length' in request.headers) if isinstance(timeout, tuple): try: connect, read = timeout timeout = TimeoutSauce(connect=connect, read=read) except ValueError as e: # this may raise a string formatting error. err = ("Invalid timeout {0}. Pass a (connect, read) " "timeout tuple, or a single float to set " "both timeouts to the same value".format(timeout)) raise ValueError(err) elif isinstance(timeout, TimeoutSauce): pass else: timeout = TimeoutSauce(connect=timeout, read=timeout) try: if not chunked: resp = conn.urlopen( method=request.method, url=url, body=request.body, headers=request.headers, redirect=False, assert_same_host=False, preload_content=False, decode_content=False, retries=self.max_retries, timeout=timeout ) # Send the request. else: if hasattr(conn, 'proxy_pool'): conn = conn.proxy_pool low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT) try: low_conn.putrequest(request.method, url, skip_accept_encoding=True) for header, value in request.headers.items(): low_conn.putheader(header, value) low_conn.endheaders() for i in request.body: low_conn.send(hex(len(i))[2:].encode('utf-8')) low_conn.send(b'\r\n') low_conn.send(i) low_conn.send(b'\r\n') low_conn.send(b'0\r\n\r\n') # Receive the response from the server try: # For Python 2.7+ versions, use buffering of HTTP # responses r = low_conn.getresponse(buffering=True) except TypeError: # For compatibility with Python 2.6 versions and back r = low_conn.getresponse() resp = HTTPResponse.from_httplib( r, pool=conn, connection=low_conn, preload_content=False, decode_content=False ) except: # If we hit any problems here, clean up the connection. # Then, reraise so that we can handle the actual exception. low_conn.close() raise except (ProtocolError, socket.error) as err: raise ConnectionError(err, request=request) except MaxRetryError as e: if isinstance(e.reason, ConnectTimeoutError): # TODO: Remove this in 3.0.0: see #2811 if not isinstance(e.reason, NewConnectionError): raise ConnectTimeout(e, request=request) if isinstance(e.reason, ResponseError): raise RetryError(e, request=request) if isinstance(e.reason, _ProxyError): raise ProxyError(e, request=request) if isinstance(e.reason, _SSLError): # This branch is for urllib3 v1.22 and later. raise SSLError(e, request=request) raise ConnectionError(e, request=request) except ClosedPoolError as e: raise ConnectionError(e, request=request) except _ProxyError as e: raise ProxyError(e) except (_SSLError, _HTTPError) as e: if isinstance(e, _SSLError): # This branch is for urllib3 versions earlier than v1.22 raise SSLError(e, request=request) elif isinstance(e, ReadTimeoutError): raise ReadTimeout(e, request=request) else: raise return self.build_response(request, resp)
gpl-3.0
samuelfd/harpia
harpia/bpGUI/xor.py
1
5854
# -*- coding: utf-8 -*- # [HARPIA PROJECT] # # # S2i - Intelligent Industrial Systems # DAS - Automation and Systems Department # UFSC - Federal University of Santa Catarina # Copyright: 2006 - 2007 Luis Carlos Dill Junges (lcdjunges@yahoo.com.br), Clovis Peruchi Scotti (scotti@ieee.org), # Guilherme Augusto Rutzen (rutzen@das.ufsc.br), Mathias Erdtmann (erdtmann@gmail.com) and S2i (www.s2i.das.ufsc.br) # 2007 - 2009 Clovis Peruchi Scotti (scotti@ieee.org), S2i (www.s2i.das.ufsc.br) # # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License version 3, as published # by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranties of # MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR # PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see <http://www.gnu.org/licenses/>. # # For further information, check the COPYING file distributed with this software. # # ---------------------------------------------------------------------- import gtk from harpia.GladeWindow import GladeWindow from harpia.s2icommonproperties import S2iCommonProperties, APP, DIR # i18n import os import gettext from harpia.utils.XMLUtils import XMLParser _ = gettext.gettext gettext.bindtextdomain(APP, DIR) gettext.textdomain(APP) # ---------------------------------------------------------------------- class Properties(GladeWindow, S2iCommonProperties): # ---------------------------------------------------------------------- def __init__(self, PropertiesXML, S2iBlockProperties): self.m_sDataDir = os.environ['HARPIA_DATA_DIR'] filename = self.m_sDataDir + 'glade/xor.ui' self.m_oPropertiesXML = PropertiesXML self.m_oS2iBlockProperties = S2iBlockProperties widget_list = [ 'Properties', 'BackgroundColor', 'BorderColor', 'HelpView' ] handlers = [ 'on_cancel_clicked', 'on_xor_confirm_clicked', 'on_BackColorButton_clicked', 'on_BorderColorButton_clicked' ] top_window = 'Properties' GladeWindow.__init__(self, filename, top_window, widget_list, handlers) self.configure() # load help text t_oS2iHelp = XMLParser(self.m_sDataDir + "help/xor" + _("_en.help")) t_oTextBuffer = gtk.TextBuffer() t_oTextBuffer.set_text(unicode(str(t_oS2iHelp.getTag("help").getTag("content").getTagContent()))) self.widgets['HelpView'].set_buffer(t_oTextBuffer) # ---------------------------------------------------------------------- def __del__(self): pass # ---------------------------------------------------------------------- def on_xor_confirm_clicked(self, *args): self.m_oS2iBlockProperties.SetBorderColor(self.m_oBorderColor) self.m_oS2iBlockProperties.SetBackColor(self.m_oBackColor) self.widgets['Properties'].destroy() # ---------------------------------------------------------------------- # XorProperties = Properties() # XorProperties.show( center=0 ) # ------------------------------------------------------------------------------ # Code generation # ------------------------------------------------------------------------------ def generate(blockTemplate): import harpia.gerador blockTemplate.imagesIO = \ 'IplImage * block' + blockTemplate.blockNumber + '_img_i1 = NULL;\n' + \ 'IplImage * block' + blockTemplate.blockNumber + '_img_i2 = NULL;\n' + \ 'IplImage * block' + blockTemplate.blockNumber + '_img_o1 = NULL;\n' blockTemplate.functionCall = '\nif(block' + blockTemplate.blockNumber + '_img_i1){\n' + \ 'block' + blockTemplate.blockNumber + '_img_o1 = cvCreateImage(cvSize(block' + blockTemplate.blockNumber + \ '_img_i1->width,block' + blockTemplate.blockNumber + '_img_i1->height),block' + blockTemplate.blockNumber + \ '_img_i1->depth,block' + blockTemplate.blockNumber + '_img_i1->nChannels);\n' + \ harpia.gerador.inputSizeComply(2, blockTemplate.blockNumber) + 'cvXor(block' + \ blockTemplate.blockNumber + '_img_i1, block' + blockTemplate.blockNumber + '_img_i2, block' + \ blockTemplate.blockNumber + '_img_o1,0);\n cvResetImageROI(block' + blockTemplate.blockNumber + '_img_o1);}\n' blockTemplate.dealloc = 'cvReleaseImage(&block' + blockTemplate.blockNumber + '_img_o1);\n' + \ 'cvReleaseImage(&block' + blockTemplate.blockNumber + '_img_i1);\n' + \ 'cvReleaseImage(&block' + blockTemplate.blockNumber + '_img_i2);\n' # ------------------------------------------------------------------------------ # Block Setup # ------------------------------------------------------------------------------ def getBlock(): return {"Label": _("Xor"), "Path": {"Python": "xor", "Glade": "glade/xor.ui", "Xml": "xml/xor.xml"}, "Inputs": 2, "Outputs": 1, "Icon": "images/xor.png", "Color": "10:180:10:150", "InTypes": {0: "HRP_IMAGE", 1: "HRP_IMAGE"}, "OutTypes": {0: "HRP_IMAGE"}, "Description": _("Logical XOR (exclusive-or) operation between two images."), "TreeGroup": _("Arithmetic and logical operations") }
gpl-2.0
kmolab/kmolab.github.io
data/Brython-3.3.4/Lib/unittest/test/testmock/testmagicmethods.py
737
12145
import unittest import inspect import sys from unittest.mock import Mock, MagicMock, _magics class TestMockingMagicMethods(unittest.TestCase): def test_deleting_magic_methods(self): mock = Mock() self.assertFalse(hasattr(mock, '__getitem__')) mock.__getitem__ = Mock() self.assertTrue(hasattr(mock, '__getitem__')) del mock.__getitem__ self.assertFalse(hasattr(mock, '__getitem__')) def test_magicmock_del(self): mock = MagicMock() # before using getitem del mock.__getitem__ self.assertRaises(TypeError, lambda: mock['foo']) mock = MagicMock() # this time use it first mock['foo'] del mock.__getitem__ self.assertRaises(TypeError, lambda: mock['foo']) def test_magic_method_wrapping(self): mock = Mock() def f(self, name): return self, 'fish' mock.__getitem__ = f self.assertFalse(mock.__getitem__ is f) self.assertEqual(mock['foo'], (mock, 'fish')) self.assertEqual(mock.__getitem__('foo'), (mock, 'fish')) mock.__getitem__ = mock self.assertTrue(mock.__getitem__ is mock) def test_magic_methods_isolated_between_mocks(self): mock1 = Mock() mock2 = Mock() mock1.__iter__ = Mock(return_value=iter([])) self.assertEqual(list(mock1), []) self.assertRaises(TypeError, lambda: list(mock2)) def test_repr(self): mock = Mock() self.assertEqual(repr(mock), "<Mock id='%s'>" % id(mock)) mock.__repr__ = lambda s: 'foo' self.assertEqual(repr(mock), 'foo') def test_str(self): mock = Mock() self.assertEqual(str(mock), object.__str__(mock)) mock.__str__ = lambda s: 'foo' self.assertEqual(str(mock), 'foo') def test_dict_methods(self): mock = Mock() self.assertRaises(TypeError, lambda: mock['foo']) def _del(): del mock['foo'] def _set(): mock['foo'] = 3 self.assertRaises(TypeError, _del) self.assertRaises(TypeError, _set) _dict = {} def getitem(s, name): return _dict[name] def setitem(s, name, value): _dict[name] = value def delitem(s, name): del _dict[name] mock.__setitem__ = setitem mock.__getitem__ = getitem mock.__delitem__ = delitem self.assertRaises(KeyError, lambda: mock['foo']) mock['foo'] = 'bar' self.assertEqual(_dict, {'foo': 'bar'}) self.assertEqual(mock['foo'], 'bar') del mock['foo'] self.assertEqual(_dict, {}) def test_numeric(self): original = mock = Mock() mock.value = 0 self.assertRaises(TypeError, lambda: mock + 3) def add(self, other): mock.value += other return self mock.__add__ = add self.assertEqual(mock + 3, mock) self.assertEqual(mock.value, 3) del mock.__add__ def iadd(mock): mock += 3 self.assertRaises(TypeError, iadd, mock) mock.__iadd__ = add mock += 6 self.assertEqual(mock, original) self.assertEqual(mock.value, 9) self.assertRaises(TypeError, lambda: 3 + mock) mock.__radd__ = add self.assertEqual(7 + mock, mock) self.assertEqual(mock.value, 16) def test_hash(self): mock = Mock() # test delegation self.assertEqual(hash(mock), Mock.__hash__(mock)) def _hash(s): return 3 mock.__hash__ = _hash self.assertEqual(hash(mock), 3) def test_nonzero(self): m = Mock() self.assertTrue(bool(m)) m.__bool__ = lambda s: False self.assertFalse(bool(m)) def test_comparison(self): mock = Mock() def comp(s, o): return True mock.__lt__ = mock.__gt__ = mock.__le__ = mock.__ge__ = comp self. assertTrue(mock < 3) self. assertTrue(mock > 3) self. assertTrue(mock <= 3) self. assertTrue(mock >= 3) self.assertRaises(TypeError, lambda: MagicMock() < object()) self.assertRaises(TypeError, lambda: object() < MagicMock()) self.assertRaises(TypeError, lambda: MagicMock() < MagicMock()) self.assertRaises(TypeError, lambda: MagicMock() > object()) self.assertRaises(TypeError, lambda: object() > MagicMock()) self.assertRaises(TypeError, lambda: MagicMock() > MagicMock()) self.assertRaises(TypeError, lambda: MagicMock() <= object()) self.assertRaises(TypeError, lambda: object() <= MagicMock()) self.assertRaises(TypeError, lambda: MagicMock() <= MagicMock()) self.assertRaises(TypeError, lambda: MagicMock() >= object()) self.assertRaises(TypeError, lambda: object() >= MagicMock()) self.assertRaises(TypeError, lambda: MagicMock() >= MagicMock()) def test_equality(self): for mock in Mock(), MagicMock(): self.assertEqual(mock == mock, True) self.assertIsInstance(mock == mock, bool) self.assertEqual(mock != mock, False) self.assertIsInstance(mock != mock, bool) self.assertEqual(mock == object(), False) self.assertEqual(mock != object(), True) def eq(self, other): return other == 3 mock.__eq__ = eq self.assertTrue(mock == 3) self.assertFalse(mock == 4) def ne(self, other): return other == 3 mock.__ne__ = ne self.assertTrue(mock != 3) self.assertFalse(mock != 4) mock = MagicMock() mock.__eq__.return_value = True self.assertIsInstance(mock == 3, bool) self.assertEqual(mock == 3, True) mock.__ne__.return_value = False self.assertIsInstance(mock != 3, bool) self.assertEqual(mock != 3, False) def test_len_contains_iter(self): mock = Mock() self.assertRaises(TypeError, len, mock) self.assertRaises(TypeError, iter, mock) self.assertRaises(TypeError, lambda: 'foo' in mock) mock.__len__ = lambda s: 6 self.assertEqual(len(mock), 6) mock.__contains__ = lambda s, o: o == 3 self.assertTrue(3 in mock) self.assertFalse(6 in mock) mock.__iter__ = lambda s: iter('foobarbaz') self.assertEqual(list(mock), list('foobarbaz')) def test_magicmock(self): mock = MagicMock() mock.__iter__.return_value = iter([1, 2, 3]) self.assertEqual(list(mock), [1, 2, 3]) getattr(mock, '__bool__').return_value = False self.assertFalse(hasattr(mock, '__nonzero__')) self.assertFalse(bool(mock)) for entry in _magics: self.assertTrue(hasattr(mock, entry)) self.assertFalse(hasattr(mock, '__imaginery__')) def test_magic_mock_equality(self): mock = MagicMock() self.assertIsInstance(mock == object(), bool) self.assertIsInstance(mock != object(), bool) self.assertEqual(mock == object(), False) self.assertEqual(mock != object(), True) self.assertEqual(mock == mock, True) self.assertEqual(mock != mock, False) def test_magicmock_defaults(self): mock = MagicMock() self.assertEqual(int(mock), 1) self.assertEqual(complex(mock), 1j) self.assertEqual(float(mock), 1.0) self.assertNotIn(object(), mock) self.assertEqual(len(mock), 0) self.assertEqual(list(mock), []) self.assertEqual(hash(mock), object.__hash__(mock)) self.assertEqual(str(mock), object.__str__(mock)) self.assertTrue(bool(mock)) # in Python 3 oct and hex use __index__ # so these tests are for __index__ in py3k self.assertEqual(oct(mock), '0o1') self.assertEqual(hex(mock), '0x1') # how to test __sizeof__ ? def test_magic_methods_and_spec(self): class Iterable(object): def __iter__(self): pass mock = Mock(spec=Iterable) self.assertRaises(AttributeError, lambda: mock.__iter__) mock.__iter__ = Mock(return_value=iter([])) self.assertEqual(list(mock), []) class NonIterable(object): pass mock = Mock(spec=NonIterable) self.assertRaises(AttributeError, lambda: mock.__iter__) def set_int(): mock.__int__ = Mock(return_value=iter([])) self.assertRaises(AttributeError, set_int) mock = MagicMock(spec=Iterable) self.assertEqual(list(mock), []) self.assertRaises(AttributeError, set_int) def test_magic_methods_and_spec_set(self): class Iterable(object): def __iter__(self): pass mock = Mock(spec_set=Iterable) self.assertRaises(AttributeError, lambda: mock.__iter__) mock.__iter__ = Mock(return_value=iter([])) self.assertEqual(list(mock), []) class NonIterable(object): pass mock = Mock(spec_set=NonIterable) self.assertRaises(AttributeError, lambda: mock.__iter__) def set_int(): mock.__int__ = Mock(return_value=iter([])) self.assertRaises(AttributeError, set_int) mock = MagicMock(spec_set=Iterable) self.assertEqual(list(mock), []) self.assertRaises(AttributeError, set_int) def test_setting_unsupported_magic_method(self): mock = MagicMock() def set_setattr(): mock.__setattr__ = lambda self, name: None self.assertRaisesRegex(AttributeError, "Attempting to set unsupported magic method '__setattr__'.", set_setattr ) def test_attributes_and_return_value(self): mock = MagicMock() attr = mock.foo def _get_type(obj): # the type of every mock (or magicmock) is a custom subclass # so the real type is the second in the mro return type(obj).__mro__[1] self.assertEqual(_get_type(attr), MagicMock) returned = mock() self.assertEqual(_get_type(returned), MagicMock) def test_magic_methods_are_magic_mocks(self): mock = MagicMock() self.assertIsInstance(mock.__getitem__, MagicMock) mock[1][2].__getitem__.return_value = 3 self.assertEqual(mock[1][2][3], 3) def test_magic_method_reset_mock(self): mock = MagicMock() str(mock) self.assertTrue(mock.__str__.called) mock.reset_mock() self.assertFalse(mock.__str__.called) def test_dir(self): # overriding the default implementation for mock in Mock(), MagicMock(): def _dir(self): return ['foo'] mock.__dir__ = _dir self.assertEqual(dir(mock), ['foo']) @unittest.skipIf('PyPy' in sys.version, "This fails differently on pypy") def test_bound_methods(self): m = Mock() # XXXX should this be an expected failure instead? # this seems like it should work, but is hard to do without introducing # other api inconsistencies. Failure message could be better though. m.__iter__ = [3].__iter__ self.assertRaises(TypeError, iter, m) def test_magic_method_type(self): class Foo(MagicMock): pass foo = Foo() self.assertIsInstance(foo.__int__, Foo) def test_descriptor_from_class(self): m = MagicMock() type(m).__str__.return_value = 'foo' self.assertEqual(str(m), 'foo') def test_iterable_as_iter_return_value(self): m = MagicMock() m.__iter__.return_value = [1, 2, 3] self.assertEqual(list(m), [1, 2, 3]) self.assertEqual(list(m), [1, 2, 3]) m.__iter__.return_value = iter([4, 5, 6]) self.assertEqual(list(m), [4, 5, 6]) self.assertEqual(list(m), []) if __name__ == '__main__': unittest.main()
agpl-3.0
dursk/django
tests/custom_pk/fields.py
379
1731
import random import string from django.db import models from django.utils import six from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class MyWrapper(object): def __init__(self, value): self.value = value def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.value) def __str__(self): return self.value def __eq__(self, other): if isinstance(other, self.__class__): return self.value == other.value return self.value == other class MyAutoField(models.CharField): def __init__(self, *args, **kwargs): kwargs['max_length'] = 10 super(MyAutoField, self).__init__(*args, **kwargs) def pre_save(self, instance, add): value = getattr(instance, self.attname, None) if not value: value = MyWrapper(''.join(random.sample(string.ascii_lowercase, 10))) setattr(instance, self.attname, value) return value def to_python(self, value): if not value: return if not isinstance(value, MyWrapper): value = MyWrapper(value) return value def from_db_value(self, value, expression, connection, context): if not value: return return MyWrapper(value) def get_db_prep_save(self, value, connection): if not value: return if isinstance(value, MyWrapper): return six.text_type(value) return value def get_db_prep_value(self, value, connection, prepared=False): if not value: return if isinstance(value, MyWrapper): return six.text_type(value) return value
bsd-3-clause
vladikr/nova_drafts
nova/openstack/common/service.py
16
15336
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generic Node base class for all workers that run on hosts.""" import errno import logging as std_logging import os import random import signal import sys import time try: # Importing just the symbol here because the io module does not # exist in Python 2.6. from io import UnsupportedOperation # noqa except ImportError: # Python 2.6 UnsupportedOperation = None import eventlet from eventlet import event from oslo.config import cfg from nova.openstack.common import eventlet_backdoor from nova.openstack.common.gettextutils import _LE, _LI, _LW from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import systemd from nova.openstack.common import threadgroup rpc = importutils.try_import('nova.openstack.common.rpc') CONF = cfg.CONF LOG = logging.getLogger(__name__) def _sighup_supported(): return hasattr(signal, 'SIGHUP') def _is_daemon(): # The process group for a foreground process will match the # process group of the controlling terminal. If those values do # not match, or ioctl() fails on the stdout file handle, we assume # the process is running in the background as a daemon. # http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics try: is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()) except OSError as err: if err.errno == errno.ENOTTY: # Assume we are a daemon because there is no terminal. is_daemon = True else: raise except UnsupportedOperation: # Could not get the fileno for stdout, so we must be a daemon. is_daemon = True return is_daemon def _is_sighup_and_daemon(signo): if not (_sighup_supported() and signo == signal.SIGHUP): # Avoid checking if we are a daemon, because the signal isn't # SIGHUP. return False return _is_daemon() def _signo_to_signame(signo): signals = {signal.SIGTERM: 'SIGTERM', signal.SIGINT: 'SIGINT'} if _sighup_supported(): signals[signal.SIGHUP] = 'SIGHUP' return signals[signo] def _set_signals_handler(handler): signal.signal(signal.SIGTERM, handler) signal.signal(signal.SIGINT, handler) if _sighup_supported(): signal.signal(signal.SIGHUP, handler) class Launcher(object): """Launch one or more services and wait for them to complete.""" def __init__(self): """Initialize the service launcher. :returns: None """ self.services = Services() self.backdoor_port = eventlet_backdoor.initialize_if_enabled() def launch_service(self, service): """Load and start the given service. :param service: The service you would like to start. :returns: None """ service.backdoor_port = self.backdoor_port self.services.add(service) def stop(self): """Stop all services which are currently running. :returns: None """ self.services.stop() def wait(self): """Waits until all services have been stopped, and then returns. :returns: None """ self.services.wait() def restart(self): """Reload config files and restart service. :returns: None """ cfg.CONF.reload_config_files() self.services.restart() class SignalExit(SystemExit): def __init__(self, signo, exccode=1): super(SignalExit, self).__init__(exccode) self.signo = signo class ServiceLauncher(Launcher): def _handle_signal(self, signo, frame): # Allow the process to be killed again and die from natural causes _set_signals_handler(signal.SIG_DFL) raise SignalExit(signo) def handle_signal(self): _set_signals_handler(self._handle_signal) def _wait_for_exit_or_signal(self, ready_callback=None): status = None signo = 0 LOG.debug('Full set of CONF:') CONF.log_opt_values(LOG, std_logging.DEBUG) try: if ready_callback: ready_callback() super(ServiceLauncher, self).wait() except SignalExit as exc: signame = _signo_to_signame(exc.signo) LOG.info(_LI('Caught %s, exiting'), signame) status = exc.code signo = exc.signo except SystemExit as exc: status = exc.code finally: self.stop() if rpc: try: rpc.cleanup() except Exception: # We're shutting down, so it doesn't matter at this point. LOG.exception(_LE('Exception during rpc cleanup.')) return status, signo def wait(self, ready_callback=None): while True: self.handle_signal() status, signo = self._wait_for_exit_or_signal(ready_callback) if not _is_sighup_and_daemon(signo): return status self.restart() class ServiceWrapper(object): def __init__(self, service, workers): self.service = service self.workers = workers self.children = set() self.forktimes = [] class ProcessLauncher(object): def __init__(self, wait_interval=0.01): """Constructor. :param wait_interval: The interval to sleep for between checks of child process exit. """ self.children = {} self.sigcaught = None self.running = True self.wait_interval = wait_interval rfd, self.writepipe = os.pipe() self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') self.handle_signal() def handle_signal(self): _set_signals_handler(self._handle_signal) def _handle_signal(self, signo, frame): self.sigcaught = signo self.running = False # Allow the process to be killed again and die from natural causes _set_signals_handler(signal.SIG_DFL) def _pipe_watcher(self): # This will block until the write end is closed when the parent # dies unexpectedly self.readpipe.read() LOG.info(_LI('Parent process has died unexpectedly, exiting')) sys.exit(1) def _child_process_handle_signal(self): # Setup child signal handlers differently def _sigterm(*args): signal.signal(signal.SIGTERM, signal.SIG_DFL) raise SignalExit(signal.SIGTERM) def _sighup(*args): signal.signal(signal.SIGHUP, signal.SIG_DFL) raise SignalExit(signal.SIGHUP) signal.signal(signal.SIGTERM, _sigterm) if _sighup_supported(): signal.signal(signal.SIGHUP, _sighup) # Block SIGINT and let the parent send us a SIGTERM signal.signal(signal.SIGINT, signal.SIG_IGN) def _child_wait_for_exit_or_signal(self, launcher): status = 0 signo = 0 # NOTE(johannes): All exceptions are caught to ensure this # doesn't fallback into the loop spawning children. It would # be bad for a child to spawn more children. try: launcher.wait() except SignalExit as exc: signame = _signo_to_signame(exc.signo) LOG.info(_LI('Caught %s, exiting'), signame) status = exc.code signo = exc.signo except SystemExit as exc: status = exc.code except BaseException: LOG.exception(_LE('Unhandled exception')) status = 2 finally: launcher.stop() return status, signo def _child_process(self, service): self._child_process_handle_signal() # Reopen the eventlet hub to make sure we don't share an epoll # fd with parent and/or siblings, which would be bad eventlet.hubs.use_hub() # Close write to ensure only parent has it open os.close(self.writepipe) # Create greenthread to watch for parent to close pipe eventlet.spawn_n(self._pipe_watcher) # Reseed random number generator random.seed() launcher = Launcher() launcher.launch_service(service) return launcher def _start_child(self, wrap): if len(wrap.forktimes) > wrap.workers: # Limit ourselves to one process a second (over the period of # number of workers * 1 second). This will allow workers to # start up quickly but ensure we don't fork off children that # die instantly too quickly. if time.time() - wrap.forktimes[0] < wrap.workers: LOG.info(_LI('Forking too fast, sleeping')) time.sleep(1) wrap.forktimes.pop(0) wrap.forktimes.append(time.time()) pid = os.fork() if pid == 0: launcher = self._child_process(wrap.service) while True: self._child_process_handle_signal() status, signo = self._child_wait_for_exit_or_signal(launcher) if not _is_sighup_and_daemon(signo): break launcher.restart() os._exit(status) LOG.info(_LI('Started child %d'), pid) wrap.children.add(pid) self.children[pid] = wrap return pid def launch_service(self, service, workers=1): wrap = ServiceWrapper(service, workers) LOG.info(_LI('Starting %d workers'), wrap.workers) while self.running and len(wrap.children) < wrap.workers: self._start_child(wrap) def _wait_child(self): try: # Don't block if no child processes have exited pid, status = os.waitpid(0, os.WNOHANG) if not pid: return None except OSError as exc: if exc.errno not in (errno.EINTR, errno.ECHILD): raise return None if os.WIFSIGNALED(status): sig = os.WTERMSIG(status) LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'), dict(pid=pid, sig=sig)) else: code = os.WEXITSTATUS(status) LOG.info(_LI('Child %(pid)s exited with status %(code)d'), dict(pid=pid, code=code)) if pid not in self.children: LOG.warning(_LW('pid %d not in child list'), pid) return None wrap = self.children.pop(pid) wrap.children.remove(pid) return wrap def _respawn_children(self): while self.running: wrap = self._wait_child() if not wrap: # Yield to other threads if no children have exited # Sleep for a short time to avoid excessive CPU usage # (see bug #1095346) eventlet.greenthread.sleep(self.wait_interval) continue while self.running and len(wrap.children) < wrap.workers: self._start_child(wrap) def wait(self): """Loop waiting on children to die and respawning as necessary.""" LOG.debug('Full set of CONF:') CONF.log_opt_values(LOG, std_logging.DEBUG) try: while True: self.handle_signal() self._respawn_children() if self.sigcaught: signame = _signo_to_signame(self.sigcaught) LOG.info(_LI('Caught %s, stopping children'), signame) if not _is_sighup_and_daemon(self.sigcaught): break for pid in self.children: os.kill(pid, signal.SIGHUP) self.running = True self.sigcaught = None except eventlet.greenlet.GreenletExit: LOG.info(_LI("Wait called after thread killed. Cleaning up.")) for pid in self.children: try: os.kill(pid, signal.SIGTERM) except OSError as exc: if exc.errno != errno.ESRCH: raise # Wait for children to die if self.children: LOG.info(_LI('Waiting on %d children to exit'), len(self.children)) while self.children: self._wait_child() class Service(object): """Service object for binaries running on hosts.""" def __init__(self, threads=1000): self.tg = threadgroup.ThreadGroup(threads) # signal that the service is done shutting itself down: self._done = event.Event() def reset(self): # NOTE(Fengqian): docs for Event.reset() recommend against using it self._done = event.Event() def start(self): pass def stop(self): self.tg.stop() self.tg.wait() # Signal that service cleanup is done: if not self._done.ready(): self._done.send() def wait(self): self._done.wait() class Services(object): def __init__(self): self.services = [] self.tg = threadgroup.ThreadGroup() self.done = event.Event() def add(self, service): self.services.append(service) self.tg.add_thread(self.run_service, service, self.done) def stop(self): # wait for graceful shutdown of services: for service in self.services: service.stop() service.wait() # Each service has performed cleanup, now signal that the run_service # wrapper threads can now die: if not self.done.ready(): self.done.send() # reap threads: self.tg.stop() def wait(self): self.tg.wait() def restart(self): self.stop() self.done = event.Event() for restart_service in self.services: restart_service.reset() self.tg.add_thread(self.run_service, restart_service, self.done) @staticmethod def run_service(service, done): """Service start wrapper. :param service: service to run :param done: event to wait on until a shutdown is triggered :returns: None """ service.start() systemd.notify_once() done.wait() def launch(service, workers=1): if workers is None or workers == 1: launcher = ServiceLauncher() launcher.launch_service(service) else: launcher = ProcessLauncher() launcher.launch_service(service, workers=workers) return launcher
apache-2.0
Communities-Communications/cc-odoo
addons/sale/report/__init__.py
370
1086
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import sale_report import invoice_report # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
2014c2g19/2014c2g19
exts/wsgi/static/Brython2.1.0-20140419-113919/Lib/logging/handlers.py
736
55579
# Copyright 2001-2013 by Vinay Sajip. All Rights Reserved. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose and without fee is hereby granted, # provided that the above copyright notice appear in all copies and that # both that copyright notice and this permission notice appear in # supporting documentation, and that the name of Vinay Sajip # not be used in advertising or publicity pertaining to distribution # of the software without specific, written prior permission. # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """ Additional handlers for the logging package for Python. The core package is based on PEP 282 and comments thereto in comp.lang.python. Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved. To use, simply 'import logging.handlers' and log away! """ import errno, logging, socket, os, pickle, struct, time, re from codecs import BOM_UTF8 from stat import ST_DEV, ST_INO, ST_MTIME import queue try: import threading except ImportError: #pragma: no cover threading = None # # Some constants... # DEFAULT_TCP_LOGGING_PORT = 9020 DEFAULT_UDP_LOGGING_PORT = 9021 DEFAULT_HTTP_LOGGING_PORT = 9022 DEFAULT_SOAP_LOGGING_PORT = 9023 SYSLOG_UDP_PORT = 514 SYSLOG_TCP_PORT = 514 _MIDNIGHT = 24 * 60 * 60 # number of seconds in a day class BaseRotatingHandler(logging.FileHandler): """ Base class for handlers that rotate log files at a certain point. Not meant to be instantiated directly. Instead, use RotatingFileHandler or TimedRotatingFileHandler. """ def __init__(self, filename, mode, encoding=None, delay=False): """ Use the specified filename for streamed logging """ logging.FileHandler.__init__(self, filename, mode, encoding, delay) self.mode = mode self.encoding = encoding self.namer = None self.rotator = None def emit(self, record): """ Emit a record. Output the record to the file, catering for rollover as described in doRollover(). """ try: if self.shouldRollover(record): self.doRollover() logging.FileHandler.emit(self, record) except (KeyboardInterrupt, SystemExit): #pragma: no cover raise except: self.handleError(record) def rotation_filename(self, default_name): """ Modify the filename of a log file when rotating. This is provided so that a custom filename can be provided. The default implementation calls the 'namer' attribute of the handler, if it's callable, passing the default name to it. If the attribute isn't callable (the default is None), the name is returned unchanged. :param default_name: The default name for the log file. """ if not callable(self.namer): result = default_name else: result = self.namer(default_name) return result def rotate(self, source, dest): """ When rotating, rotate the current log. The default implementation calls the 'rotator' attribute of the handler, if it's callable, passing the source and dest arguments to it. If the attribute isn't callable (the default is None), the source is simply renamed to the destination. :param source: The source filename. This is normally the base filename, e.g. 'test.log' :param dest: The destination filename. This is normally what the source is rotated to, e.g. 'test.log.1'. """ if not callable(self.rotator): # Issue 18940: A file may not have been created if delay is True. if os.path.exists(source): os.rename(source, dest) else: self.rotator(source, dest) class RotatingFileHandler(BaseRotatingHandler): """ Handler for logging to a set of files, which switches from one file to the next when the current file reaches a certain size. """ def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False): """ Open the specified file and use it as the stream for logging. By default, the file grows indefinitely. You can specify particular values of maxBytes and backupCount to allow the file to rollover at a predetermined size. Rollover occurs whenever the current log file is nearly maxBytes in length. If backupCount is >= 1, the system will successively create new files with the same pathname as the base file, but with extensions ".1", ".2" etc. appended to it. For example, with a backupCount of 5 and a base file name of "app.log", you would get "app.log", "app.log.1", "app.log.2", ... through to "app.log.5". The file being written to is always "app.log" - when it gets filled up, it is closed and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. exist, then they are renamed to "app.log.2", "app.log.3" etc. respectively. If maxBytes is zero, rollover never occurs. """ # If rotation/rollover is wanted, it doesn't make sense to use another # mode. If for example 'w' were specified, then if there were multiple # runs of the calling application, the logs from previous runs would be # lost if the 'w' is respected, because the log file would be truncated # on each run. if maxBytes > 0: mode = 'a' BaseRotatingHandler.__init__(self, filename, mode, encoding, delay) self.maxBytes = maxBytes self.backupCount = backupCount def doRollover(self): """ Do a rollover, as described in __init__(). """ if self.stream: self.stream.close() self.stream = None if self.backupCount > 0: for i in range(self.backupCount - 1, 0, -1): sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i)) dfn = self.rotation_filename("%s.%d" % (self.baseFilename, i + 1)) if os.path.exists(sfn): if os.path.exists(dfn): os.remove(dfn) os.rename(sfn, dfn) dfn = self.rotation_filename(self.baseFilename + ".1") if os.path.exists(dfn): os.remove(dfn) self.rotate(self.baseFilename, dfn) if not self.delay: self.stream = self._open() def shouldRollover(self, record): """ Determine if rollover should occur. Basically, see if the supplied record would cause the file to exceed the size limit we have. """ if self.stream is None: # delay was set... self.stream = self._open() if self.maxBytes > 0: # are we rolling over? msg = "%s\n" % self.format(record) self.stream.seek(0, 2) #due to non-posix-compliant Windows feature if self.stream.tell() + len(msg) >= self.maxBytes: return 1 return 0 class TimedRotatingFileHandler(BaseRotatingHandler): """ Handler for logging to a file, rotating the log file at certain timed intervals. If backupCount is > 0, when rollover is done, no more than backupCount files are kept - the oldest ones are deleted. """ def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False): BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay) self.when = when.upper() self.backupCount = backupCount self.utc = utc # Calculate the real rollover interval, which is just the number of # seconds between rollovers. Also set the filename suffix used when # a rollover occurs. Current 'when' events supported: # S - Seconds # M - Minutes # H - Hours # D - Days # midnight - roll over at midnight # W{0-6} - roll over on a certain day; 0 - Monday # # Case of the 'when' specifier is not important; lower or upper case # will work. if self.when == 'S': self.interval = 1 # one second self.suffix = "%Y-%m-%d_%H-%M-%S" self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$" elif self.when == 'M': self.interval = 60 # one minute self.suffix = "%Y-%m-%d_%H-%M" self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$" elif self.when == 'H': self.interval = 60 * 60 # one hour self.suffix = "%Y-%m-%d_%H" self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$" elif self.when == 'D' or self.when == 'MIDNIGHT': self.interval = 60 * 60 * 24 # one day self.suffix = "%Y-%m-%d" self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$" elif self.when.startswith('W'): self.interval = 60 * 60 * 24 * 7 # one week if len(self.when) != 2: raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when) if self.when[1] < '0' or self.when[1] > '6': raise ValueError("Invalid day specified for weekly rollover: %s" % self.when) self.dayOfWeek = int(self.when[1]) self.suffix = "%Y-%m-%d" self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$" else: raise ValueError("Invalid rollover interval specified: %s" % self.when) self.extMatch = re.compile(self.extMatch, re.ASCII) self.interval = self.interval * interval # multiply by units requested if os.path.exists(filename): t = os.stat(filename)[ST_MTIME] else: t = int(time.time()) self.rolloverAt = self.computeRollover(t) def computeRollover(self, currentTime): """ Work out the rollover time based on the specified time. """ result = currentTime + self.interval # If we are rolling over at midnight or weekly, then the interval is already known. # What we need to figure out is WHEN the next interval is. In other words, # if you are rolling over at midnight, then your base interval is 1 day, # but you want to start that one day clock at midnight, not now. So, we # have to fudge the rolloverAt value in order to trigger the first rollover # at the right time. After that, the regular interval will take care of # the rest. Note that this code doesn't care about leap seconds. :) if self.when == 'MIDNIGHT' or self.when.startswith('W'): # This could be done with less code, but I wanted it to be clear if self.utc: t = time.gmtime(currentTime) else: t = time.localtime(currentTime) currentHour = t[3] currentMinute = t[4] currentSecond = t[5] # r is the number of seconds left between now and midnight r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 + currentSecond) result = currentTime + r # If we are rolling over on a certain day, add in the number of days until # the next rollover, but offset by 1 since we just calculated the time # until the next day starts. There are three cases: # Case 1) The day to rollover is today; in this case, do nothing # Case 2) The day to rollover is further in the interval (i.e., today is # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to # next rollover is simply 6 - 2 - 1, or 3. # Case 3) The day to rollover is behind us in the interval (i.e., today # is day 5 (Saturday) and rollover is on day 3 (Thursday). # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the # number of days left in the current week (1) plus the number # of days in the next week until the rollover day (3). # The calculations described in 2) and 3) above need to have a day added. # This is because the above time calculation takes us to midnight on this # day, i.e. the start of the next day. if self.when.startswith('W'): day = t[6] # 0 is Monday if day != self.dayOfWeek: if day < self.dayOfWeek: daysToWait = self.dayOfWeek - day else: daysToWait = 6 - day + self.dayOfWeek + 1 newRolloverAt = result + (daysToWait * (60 * 60 * 24)) if not self.utc: dstNow = t[-1] dstAtRollover = time.localtime(newRolloverAt)[-1] if dstNow != dstAtRollover: if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour addend = -3600 else: # DST bows out before next rollover, so we need to add an hour addend = 3600 newRolloverAt += addend result = newRolloverAt return result def shouldRollover(self, record): """ Determine if rollover should occur. record is not used, as we are just comparing times, but it is needed so the method signatures are the same """ t = int(time.time()) if t >= self.rolloverAt: return 1 return 0 def getFilesToDelete(self): """ Determine the files to delete when rolling over. More specific than the earlier method, which just used glob.glob(). """ dirName, baseName = os.path.split(self.baseFilename) fileNames = os.listdir(dirName) result = [] prefix = baseName + "." plen = len(prefix) for fileName in fileNames: if fileName[:plen] == prefix: suffix = fileName[plen:] if self.extMatch.match(suffix): result.append(os.path.join(dirName, fileName)) result.sort() if len(result) < self.backupCount: result = [] else: result = result[:len(result) - self.backupCount] return result def doRollover(self): """ do a rollover; in this case, a date/time stamp is appended to the filename when the rollover happens. However, you want the file to be named for the start of the interval, not the current time. If there is a backup count, then we have to get a list of matching filenames, sort them and remove the one with the oldest suffix. """ if self.stream: self.stream.close() self.stream = None # get the time that this sequence started at and make it a TimeTuple currentTime = int(time.time()) dstNow = time.localtime(currentTime)[-1] t = self.rolloverAt - self.interval if self.utc: timeTuple = time.gmtime(t) else: timeTuple = time.localtime(t) dstThen = timeTuple[-1] if dstNow != dstThen: if dstNow: addend = 3600 else: addend = -3600 timeTuple = time.localtime(t + addend) dfn = self.rotation_filename(self.baseFilename + "." + time.strftime(self.suffix, timeTuple)) if os.path.exists(dfn): os.remove(dfn) self.rotate(self.baseFilename, dfn) if self.backupCount > 0: for s in self.getFilesToDelete(): os.remove(s) if not self.delay: self.stream = self._open() newRolloverAt = self.computeRollover(currentTime) while newRolloverAt <= currentTime: newRolloverAt = newRolloverAt + self.interval #If DST changes and midnight or weekly rollover, adjust for this. if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc: dstAtRollover = time.localtime(newRolloverAt)[-1] if dstNow != dstAtRollover: if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour addend = -3600 else: # DST bows out before next rollover, so we need to add an hour addend = 3600 newRolloverAt += addend self.rolloverAt = newRolloverAt class WatchedFileHandler(logging.FileHandler): """ A handler for logging to a file, which watches the file to see if it has changed while in use. This can happen because of usage of programs such as newsyslog and logrotate which perform log file rotation. This handler, intended for use under Unix, watches the file to see if it has changed since the last emit. (A file has changed if its device or inode have changed.) If it has changed, the old file stream is closed, and the file opened to get a new stream. This handler is not appropriate for use under Windows, because under Windows open files cannot be moved or renamed - logging opens the files with exclusive locks - and so there is no need for such a handler. Furthermore, ST_INO is not supported under Windows; stat always returns zero for this value. This handler is based on a suggestion and patch by Chad J. Schroeder. """ def __init__(self, filename, mode='a', encoding=None, delay=False): logging.FileHandler.__init__(self, filename, mode, encoding, delay) self.dev, self.ino = -1, -1 self._statstream() def _statstream(self): if self.stream: sres = os.fstat(self.stream.fileno()) self.dev, self.ino = sres[ST_DEV], sres[ST_INO] def emit(self, record): """ Emit a record. First check if the underlying file has changed, and if it has, close the old stream and reopen the file to get the current stream. """ # Reduce the chance of race conditions by stat'ing by path only # once and then fstat'ing our new fd if we opened a new log stream. # See issue #14632: Thanks to John Mulligan for the problem report # and patch. try: # stat the file by path, checking for existence sres = os.stat(self.baseFilename) except OSError as err: if err.errno == errno.ENOENT: sres = None else: raise # compare file system stat with that of our stream file handle if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino: if self.stream is not None: # we have an open file handle, clean it up self.stream.flush() self.stream.close() # open a new file handle and get new stat info from that fd self.stream = self._open() self._statstream() logging.FileHandler.emit(self, record) class SocketHandler(logging.Handler): """ A handler class which writes logging records, in pickle format, to a streaming socket. The socket is kept open across logging calls. If the peer resets it, an attempt is made to reconnect on the next call. The pickle which is sent is that of the LogRecord's attribute dictionary (__dict__), so that the receiver does not need to have the logging module installed in order to process the logging event. To unpickle the record at the receiving end into a LogRecord, use the makeLogRecord function. """ def __init__(self, host, port): """ Initializes the handler with a specific host address and port. When the attribute *closeOnError* is set to True - if a socket error occurs, the socket is silently closed and then reopened on the next logging call. """ logging.Handler.__init__(self) self.host = host self.port = port self.sock = None self.closeOnError = False self.retryTime = None # # Exponential backoff parameters. # self.retryStart = 1.0 self.retryMax = 30.0 self.retryFactor = 2.0 def makeSocket(self, timeout=1): """ A factory method which allows subclasses to define the precise type of socket they want. """ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if hasattr(s, 'settimeout'): s.settimeout(timeout) try: s.connect((self.host, self.port)) return s except socket.error: s.close() raise def createSocket(self): """ Try to create a socket, using an exponential backoff with a max retry time. Thanks to Robert Olson for the original patch (SF #815911) which has been slightly refactored. """ now = time.time() # Either retryTime is None, in which case this # is the first time back after a disconnect, or # we've waited long enough. if self.retryTime is None: attempt = True else: attempt = (now >= self.retryTime) if attempt: try: self.sock = self.makeSocket() self.retryTime = None # next time, no delay before trying except socket.error: #Creation failed, so set the retry time and return. if self.retryTime is None: self.retryPeriod = self.retryStart else: self.retryPeriod = self.retryPeriod * self.retryFactor if self.retryPeriod > self.retryMax: self.retryPeriod = self.retryMax self.retryTime = now + self.retryPeriod def send(self, s): """ Send a pickled string to the socket. This function allows for partial sends which can happen when the network is busy. """ if self.sock is None: self.createSocket() #self.sock can be None either because we haven't reached the retry #time yet, or because we have reached the retry time and retried, #but are still unable to connect. if self.sock: try: if hasattr(self.sock, "sendall"): self.sock.sendall(s) else: #pragma: no cover sentsofar = 0 left = len(s) while left > 0: sent = self.sock.send(s[sentsofar:]) sentsofar = sentsofar + sent left = left - sent except socket.error: #pragma: no cover self.sock.close() self.sock = None # so we can call createSocket next time def makePickle(self, record): """ Pickles the record in binary format with a length prefix, and returns it ready for transmission across the socket. """ ei = record.exc_info if ei: # just to get traceback text into record.exc_text ... dummy = self.format(record) # See issue #14436: If msg or args are objects, they may not be # available on the receiving end. So we convert the msg % args # to a string, save it as msg and zap the args. d = dict(record.__dict__) d['msg'] = record.getMessage() d['args'] = None d['exc_info'] = None s = pickle.dumps(d, 1) slen = struct.pack(">L", len(s)) return slen + s def handleError(self, record): """ Handle an error during logging. An error has occurred during logging. Most likely cause - connection lost. Close the socket so that we can retry on the next event. """ if self.closeOnError and self.sock: self.sock.close() self.sock = None #try to reconnect next time else: logging.Handler.handleError(self, record) def emit(self, record): """ Emit a record. Pickles the record and writes it to the socket in binary format. If there is an error with the socket, silently drop the packet. If there was a problem with the socket, re-establishes the socket. """ try: s = self.makePickle(record) self.send(s) except (KeyboardInterrupt, SystemExit): #pragma: no cover raise except: self.handleError(record) def close(self): """ Closes the socket. """ self.acquire() try: if self.sock: self.sock.close() self.sock = None logging.Handler.close(self) finally: self.release() class DatagramHandler(SocketHandler): """ A handler class which writes logging records, in pickle format, to a datagram socket. The pickle which is sent is that of the LogRecord's attribute dictionary (__dict__), so that the receiver does not need to have the logging module installed in order to process the logging event. To unpickle the record at the receiving end into a LogRecord, use the makeLogRecord function. """ def __init__(self, host, port): """ Initializes the handler with a specific host address and port. """ SocketHandler.__init__(self, host, port) self.closeOnError = False def makeSocket(self): """ The factory method of SocketHandler is here overridden to create a UDP socket (SOCK_DGRAM). """ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return s def send(self, s): """ Send a pickled string to a socket. This function no longer allows for partial sends which can happen when the network is busy - UDP does not guarantee delivery and can deliver packets out of sequence. """ if self.sock is None: self.createSocket() self.sock.sendto(s, (self.host, self.port)) class SysLogHandler(logging.Handler): """ A handler class which sends formatted logging records to a syslog server. Based on Sam Rushing's syslog module: http://www.nightmare.com/squirl/python-ext/misc/syslog.py Contributed by Nicolas Untz (after which minor refactoring changes have been made). """ # from <linux/sys/syslog.h>: # ====================================================================== # priorities/facilities are encoded into a single 32-bit quantity, where # the bottom 3 bits are the priority (0-7) and the top 28 bits are the # facility (0-big number). Both the priorities and the facilities map # roughly one-to-one to strings in the syslogd(8) source code. This # mapping is included in this file. # # priorities (these are ordered) LOG_EMERG = 0 # system is unusable LOG_ALERT = 1 # action must be taken immediately LOG_CRIT = 2 # critical conditions LOG_ERR = 3 # error conditions LOG_WARNING = 4 # warning conditions LOG_NOTICE = 5 # normal but significant condition LOG_INFO = 6 # informational LOG_DEBUG = 7 # debug-level messages # facility codes LOG_KERN = 0 # kernel messages LOG_USER = 1 # random user-level messages LOG_MAIL = 2 # mail system LOG_DAEMON = 3 # system daemons LOG_AUTH = 4 # security/authorization messages LOG_SYSLOG = 5 # messages generated internally by syslogd LOG_LPR = 6 # line printer subsystem LOG_NEWS = 7 # network news subsystem LOG_UUCP = 8 # UUCP subsystem LOG_CRON = 9 # clock daemon LOG_AUTHPRIV = 10 # security/authorization messages (private) LOG_FTP = 11 # FTP daemon # other codes through 15 reserved for system use LOG_LOCAL0 = 16 # reserved for local use LOG_LOCAL1 = 17 # reserved for local use LOG_LOCAL2 = 18 # reserved for local use LOG_LOCAL3 = 19 # reserved for local use LOG_LOCAL4 = 20 # reserved for local use LOG_LOCAL5 = 21 # reserved for local use LOG_LOCAL6 = 22 # reserved for local use LOG_LOCAL7 = 23 # reserved for local use priority_names = { "alert": LOG_ALERT, "crit": LOG_CRIT, "critical": LOG_CRIT, "debug": LOG_DEBUG, "emerg": LOG_EMERG, "err": LOG_ERR, "error": LOG_ERR, # DEPRECATED "info": LOG_INFO, "notice": LOG_NOTICE, "panic": LOG_EMERG, # DEPRECATED "warn": LOG_WARNING, # DEPRECATED "warning": LOG_WARNING, } facility_names = { "auth": LOG_AUTH, "authpriv": LOG_AUTHPRIV, "cron": LOG_CRON, "daemon": LOG_DAEMON, "ftp": LOG_FTP, "kern": LOG_KERN, "lpr": LOG_LPR, "mail": LOG_MAIL, "news": LOG_NEWS, "security": LOG_AUTH, # DEPRECATED "syslog": LOG_SYSLOG, "user": LOG_USER, "uucp": LOG_UUCP, "local0": LOG_LOCAL0, "local1": LOG_LOCAL1, "local2": LOG_LOCAL2, "local3": LOG_LOCAL3, "local4": LOG_LOCAL4, "local5": LOG_LOCAL5, "local6": LOG_LOCAL6, "local7": LOG_LOCAL7, } #The map below appears to be trivially lowercasing the key. However, #there's more to it than meets the eye - in some locales, lowercasing #gives unexpected results. See SF #1524081: in the Turkish locale, #"INFO".lower() != "info" priority_map = { "DEBUG" : "debug", "INFO" : "info", "WARNING" : "warning", "ERROR" : "error", "CRITICAL" : "critical" } def __init__(self, address=('localhost', SYSLOG_UDP_PORT), facility=LOG_USER, socktype=None): """ Initialize a handler. If address is specified as a string, a UNIX socket is used. To log to a local syslogd, "SysLogHandler(address="/dev/log")" can be used. If facility is not specified, LOG_USER is used. """ logging.Handler.__init__(self) self.address = address self.facility = facility self.socktype = socktype if isinstance(address, str): self.unixsocket = True self._connect_unixsocket(address) else: self.unixsocket = False if socktype is None: socktype = socket.SOCK_DGRAM self.socket = socket.socket(socket.AF_INET, socktype) if socktype == socket.SOCK_STREAM: self.socket.connect(address) self.socktype = socktype self.formatter = None def _connect_unixsocket(self, address): use_socktype = self.socktype if use_socktype is None: use_socktype = socket.SOCK_DGRAM self.socket = socket.socket(socket.AF_UNIX, use_socktype) try: self.socket.connect(address) # it worked, so set self.socktype to the used type self.socktype = use_socktype except socket.error: self.socket.close() if self.socktype is not None: # user didn't specify falling back, so fail raise use_socktype = socket.SOCK_STREAM self.socket = socket.socket(socket.AF_UNIX, use_socktype) try: self.socket.connect(address) # it worked, so set self.socktype to the used type self.socktype = use_socktype except socket.error: self.socket.close() raise def encodePriority(self, facility, priority): """ Encode the facility and priority. You can pass in strings or integers - if strings are passed, the facility_names and priority_names mapping dictionaries are used to convert them to integers. """ if isinstance(facility, str): facility = self.facility_names[facility] if isinstance(priority, str): priority = self.priority_names[priority] return (facility << 3) | priority def close (self): """ Closes the socket. """ self.acquire() try: self.socket.close() logging.Handler.close(self) finally: self.release() def mapPriority(self, levelName): """ Map a logging level name to a key in the priority_names map. This is useful in two scenarios: when custom levels are being used, and in the case where you can't do a straightforward mapping by lowercasing the logging level name because of locale- specific issues (see SF #1524081). """ return self.priority_map.get(levelName, "warning") ident = '' # prepended to all messages append_nul = True # some old syslog daemons expect a NUL terminator def emit(self, record): """ Emit a record. The record is formatted, and then sent to the syslog server. If exception information is present, it is NOT sent to the server. """ msg = self.format(record) if self.ident: msg = self.ident + msg if self.append_nul: msg += '\000' """ We need to convert record level to lowercase, maybe this will change in the future. """ prio = '<%d>' % self.encodePriority(self.facility, self.mapPriority(record.levelname)) prio = prio.encode('utf-8') # Message is a string. Convert to bytes as required by RFC 5424 msg = msg.encode('utf-8') msg = prio + msg try: if self.unixsocket: try: self.socket.send(msg) except socket.error: self.socket.close() self._connect_unixsocket(self.address) self.socket.send(msg) elif self.socktype == socket.SOCK_DGRAM: self.socket.sendto(msg, self.address) else: self.socket.sendall(msg) except (KeyboardInterrupt, SystemExit): #pragma: no cover raise except: self.handleError(record) class SMTPHandler(logging.Handler): """ A handler class which sends an SMTP email for each logging event. """ def __init__(self, mailhost, fromaddr, toaddrs, subject, credentials=None, secure=None, timeout=5.0): """ Initialize the handler. Initialize the instance with the from and to addresses and subject line of the email. To specify a non-standard SMTP port, use the (host, port) tuple format for the mailhost argument. To specify authentication credentials, supply a (username, password) tuple for the credentials argument. To specify the use of a secure protocol (TLS), pass in a tuple for the secure argument. This will only be used when authentication credentials are supplied. The tuple will be either an empty tuple, or a single-value tuple with the name of a keyfile, or a 2-value tuple with the names of the keyfile and certificate file. (This tuple is passed to the `starttls` method). A timeout in seconds can be specified for the SMTP connection (the default is one second). """ logging.Handler.__init__(self) if isinstance(mailhost, tuple): self.mailhost, self.mailport = mailhost else: self.mailhost, self.mailport = mailhost, None if isinstance(credentials, tuple): self.username, self.password = credentials else: self.username = None self.fromaddr = fromaddr if isinstance(toaddrs, str): toaddrs = [toaddrs] self.toaddrs = toaddrs self.subject = subject self.secure = secure self.timeout = timeout def getSubject(self, record): """ Determine the subject for the email. If you want to specify a subject line which is record-dependent, override this method. """ return self.subject def emit(self, record): """ Emit a record. Format the record and send it to the specified addressees. """ try: import smtplib from email.utils import formatdate port = self.mailport if not port: port = smtplib.SMTP_PORT smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout) msg = self.format(record) msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( self.fromaddr, ",".join(self.toaddrs), self.getSubject(record), formatdate(), msg) if self.username: if self.secure is not None: smtp.ehlo() smtp.starttls(*self.secure) smtp.ehlo() smtp.login(self.username, self.password) smtp.sendmail(self.fromaddr, self.toaddrs, msg) smtp.quit() except (KeyboardInterrupt, SystemExit): #pragma: no cover raise except: self.handleError(record) class NTEventLogHandler(logging.Handler): """ A handler class which sends events to the NT Event Log. Adds a registry entry for the specified application name. If no dllname is provided, win32service.pyd (which contains some basic message placeholders) is used. Note that use of these placeholders will make your event logs big, as the entire message source is held in the log. If you want slimmer logs, you have to pass in the name of your own DLL which contains the message definitions you want to use in the event log. """ def __init__(self, appname, dllname=None, logtype="Application"): logging.Handler.__init__(self) try: import win32evtlogutil, win32evtlog self.appname = appname self._welu = win32evtlogutil if not dllname: dllname = os.path.split(self._welu.__file__) dllname = os.path.split(dllname[0]) dllname = os.path.join(dllname[0], r'win32service.pyd') self.dllname = dllname self.logtype = logtype self._welu.AddSourceToRegistry(appname, dllname, logtype) self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE self.typemap = { logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE, logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE, logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE, logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE, logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, } except ImportError: print("The Python Win32 extensions for NT (service, event "\ "logging) appear not to be available.") self._welu = None def getMessageID(self, record): """ Return the message ID for the event record. If you are using your own messages, you could do this by having the msg passed to the logger being an ID rather than a formatting string. Then, in here, you could use a dictionary lookup to get the message ID. This version returns 1, which is the base message ID in win32service.pyd. """ return 1 def getEventCategory(self, record): """ Return the event category for the record. Override this if you want to specify your own categories. This version returns 0. """ return 0 def getEventType(self, record): """ Return the event type for the record. Override this if you want to specify your own types. This version does a mapping using the handler's typemap attribute, which is set up in __init__() to a dictionary which contains mappings for DEBUG, INFO, WARNING, ERROR and CRITICAL. If you are using your own levels you will either need to override this method or place a suitable dictionary in the handler's typemap attribute. """ return self.typemap.get(record.levelno, self.deftype) def emit(self, record): """ Emit a record. Determine the message ID, event category and event type. Then log the message in the NT event log. """ if self._welu: try: id = self.getMessageID(record) cat = self.getEventCategory(record) type = self.getEventType(record) msg = self.format(record) self._welu.ReportEvent(self.appname, id, cat, type, [msg]) except (KeyboardInterrupt, SystemExit): #pragma: no cover raise except: self.handleError(record) def close(self): """ Clean up this handler. You can remove the application name from the registry as a source of event log entries. However, if you do this, you will not be able to see the events as you intended in the Event Log Viewer - it needs to be able to access the registry to get the DLL name. """ #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype) logging.Handler.close(self) class HTTPHandler(logging.Handler): """ A class which sends records to a Web server, using either GET or POST semantics. """ def __init__(self, host, url, method="GET", secure=False, credentials=None): """ Initialize the instance with the host, the request URL, and the method ("GET" or "POST") """ logging.Handler.__init__(self) method = method.upper() if method not in ["GET", "POST"]: raise ValueError("method must be GET or POST") self.host = host self.url = url self.method = method self.secure = secure self.credentials = credentials def mapLogRecord(self, record): """ Default implementation of mapping the log record into a dict that is sent as the CGI data. Overwrite in your class. Contributed by Franz Glasner. """ return record.__dict__ def emit(self, record): """ Emit a record. Send the record to the Web server as a percent-encoded dictionary """ try: import http.client, urllib.parse host = self.host if self.secure: h = http.client.HTTPSConnection(host) else: h = http.client.HTTPConnection(host) url = self.url data = urllib.parse.urlencode(self.mapLogRecord(record)) if self.method == "GET": if (url.find('?') >= 0): sep = '&' else: sep = '?' url = url + "%c%s" % (sep, data) h.putrequest(self.method, url) # support multiple hosts on one IP address... # need to strip optional :port from host, if present i = host.find(":") if i >= 0: host = host[:i] h.putheader("Host", host) if self.method == "POST": h.putheader("Content-type", "application/x-www-form-urlencoded") h.putheader("Content-length", str(len(data))) if self.credentials: import base64 s = ('u%s:%s' % self.credentials).encode('utf-8') s = 'Basic ' + base64.b64encode(s).strip() h.putheader('Authorization', s) h.endheaders() if self.method == "POST": h.send(data.encode('utf-8')) h.getresponse() #can't do anything with the result except (KeyboardInterrupt, SystemExit): #pragma: no cover raise except: self.handleError(record) class BufferingHandler(logging.Handler): """ A handler class which buffers logging records in memory. Whenever each record is added to the buffer, a check is made to see if the buffer should be flushed. If it should, then flush() is expected to do what's needed. """ def __init__(self, capacity): """ Initialize the handler with the buffer size. """ logging.Handler.__init__(self) self.capacity = capacity self.buffer = [] def shouldFlush(self, record): """ Should the handler flush its buffer? Returns true if the buffer is up to capacity. This method can be overridden to implement custom flushing strategies. """ return (len(self.buffer) >= self.capacity) def emit(self, record): """ Emit a record. Append the record. If shouldFlush() tells us to, call flush() to process the buffer. """ self.buffer.append(record) if self.shouldFlush(record): self.flush() def flush(self): """ Override to implement custom flushing behaviour. This version just zaps the buffer to empty. """ self.acquire() try: self.buffer = [] finally: self.release() def close(self): """ Close the handler. This version just flushes and chains to the parent class' close(). """ self.flush() logging.Handler.close(self) class MemoryHandler(BufferingHandler): """ A handler class which buffers logging records in memory, periodically flushing them to a target handler. Flushing occurs whenever the buffer is full, or when an event of a certain severity or greater is seen. """ def __init__(self, capacity, flushLevel=logging.ERROR, target=None): """ Initialize the handler with the buffer size, the level at which flushing should occur and an optional target. Note that without a target being set either here or via setTarget(), a MemoryHandler is no use to anyone! """ BufferingHandler.__init__(self, capacity) self.flushLevel = flushLevel self.target = target def shouldFlush(self, record): """ Check for buffer full or a record at the flushLevel or higher. """ return (len(self.buffer) >= self.capacity) or \ (record.levelno >= self.flushLevel) def setTarget(self, target): """ Set the target handler for this handler. """ self.target = target def flush(self): """ For a MemoryHandler, flushing means just sending the buffered records to the target, if there is one. Override if you want different behaviour. The record buffer is also cleared by this operation. """ self.acquire() try: if self.target: for record in self.buffer: self.target.handle(record) self.buffer = [] finally: self.release() def close(self): """ Flush, set the target to None and lose the buffer. """ self.flush() self.acquire() try: self.target = None BufferingHandler.close(self) finally: self.release() class QueueHandler(logging.Handler): """ This handler sends events to a queue. Typically, it would be used together with a multiprocessing Queue to centralise logging to file in one process (in a multi-process application), so as to avoid file write contention between processes. This code is new in Python 3.2, but this class can be copy pasted into user code for use with earlier Python versions. """ def __init__(self, queue): """ Initialise an instance, using the passed queue. """ logging.Handler.__init__(self) self.queue = queue def enqueue(self, record): """ Enqueue a record. The base implementation uses put_nowait. You may want to override this method if you want to use blocking, timeouts or custom queue implementations. """ self.queue.put_nowait(record) def prepare(self, record): """ Prepares a record for queuing. The object returned by this method is enqueued. The base implementation formats the record to merge the message and arguments, and removes unpickleable items from the record in-place. You might want to override this method if you want to convert the record to a dict or JSON string, or send a modified copy of the record while leaving the original intact. """ # The format operation gets traceback text into record.exc_text # (if there's exception data), and also puts the message into # record.message. We can then use this to replace the original # msg + args, as these might be unpickleable. We also zap the # exc_info attribute, as it's no longer needed and, if not None, # will typically not be pickleable. self.format(record) record.msg = record.message record.args = None record.exc_info = None return record def emit(self, record): """ Emit a record. Writes the LogRecord to the queue, preparing it for pickling first. """ try: self.enqueue(self.prepare(record)) except (KeyboardInterrupt, SystemExit): #pragma: no cover raise except: self.handleError(record) if threading: class QueueListener(object): """ This class implements an internal threaded listener which watches for LogRecords being added to a queue, removes them and passes them to a list of handlers for processing. """ _sentinel = None def __init__(self, queue, *handlers): """ Initialise an instance with the specified queue and handlers. """ self.queue = queue self.handlers = handlers self._stop = threading.Event() self._thread = None def dequeue(self, block): """ Dequeue a record and return it, optionally blocking. The base implementation uses get. You may want to override this method if you want to use timeouts or work with custom queue implementations. """ return self.queue.get(block) def start(self): """ Start the listener. This starts up a background thread to monitor the queue for LogRecords to process. """ self._thread = t = threading.Thread(target=self._monitor) t.setDaemon(True) t.start() def prepare(self , record): """ Prepare a record for handling. This method just returns the passed-in record. You may want to override this method if you need to do any custom marshalling or manipulation of the record before passing it to the handlers. """ return record def handle(self, record): """ Handle a record. This just loops through the handlers offering them the record to handle. """ record = self.prepare(record) for handler in self.handlers: handler.handle(record) def _monitor(self): """ Monitor the queue for records, and ask the handler to deal with them. This method runs on a separate, internal thread. The thread will terminate if it sees a sentinel object in the queue. """ q = self.queue has_task_done = hasattr(q, 'task_done') while not self._stop.isSet(): try: record = self.dequeue(True) if record is self._sentinel: break self.handle(record) if has_task_done: q.task_done() except queue.Empty: pass # There might still be records in the queue. while True: try: record = self.dequeue(False) if record is self._sentinel: break self.handle(record) if has_task_done: q.task_done() except queue.Empty: break def enqueue_sentinel(self): """ This is used to enqueue the sentinel record. The base implementation uses put_nowait. You may want to override this method if you want to use timeouts or work with custom queue implementations. """ self.queue.put_nowait(self._sentinel) def stop(self): """ Stop the listener. This asks the thread to terminate, and then waits for it to do so. Note that if you don't call this before your application exits, there may be some records still left on the queue, which won't be processed. """ self._stop.set() self.enqueue_sentinel() self._thread.join() self._thread = None
gpl-2.0
simonwydooghe/ansible
test/units/mock/vault_helper.py
206
1559
# Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.module_utils._text import to_bytes from ansible.parsing.vault import VaultSecret class TextVaultSecret(VaultSecret): '''A secret piece of text. ie, a password. Tracks text encoding. The text encoding of the text may not be the default text encoding so we keep track of the encoding so we encode it to the same bytes.''' def __init__(self, text, encoding=None, errors=None, _bytes=None): super(TextVaultSecret, self).__init__() self.text = text self.encoding = encoding or 'utf-8' self._bytes = _bytes self.errors = errors or 'strict' @property def bytes(self): '''The text encoded with encoding, unless we specifically set _bytes.''' return self._bytes or to_bytes(self.text, encoding=self.encoding, errors=self.errors)
gpl-3.0
marduk191/plugin.video.movie25
resources/libs/plugins/animetoon.py
2
16443
# -*- coding: utf-8 -*- import urllib,re,os,sys,json import xbmc, xbmcgui, xbmcaddon, xbmcplugin,time from resources.libs import main #Mash Up - by Mash2k3 2012. addon_id = 'plugin.video.movie25' selfAddon = xbmcaddon.Addon(id=addon_id) art = main.art smalllogo=art+'/smallicon.png' def AZAT(sec): import string main.addDir('0-9','http://www.animetoon.tv/'+sec+'/others',376,art+'/09.png') for i in string.ascii_uppercase: main.addDir(i,'http://www.animetoon.tv/'+sec+'/'+i.lower(),376,art+'/'+i.lower()+'.png') main.GA("Watchseries","A-Z") main.VIEWSB() def MAIN(): main.GA("Plugin","Animania") main.addDir('Search','anime',385,art+'/search.png') main.addDir('Daily Releases','dramania',384,art+'/animetoon.png') main.addDir('Popular Dubbed Anime & Cartoon List','http://www.animetoon.tv/popular-list',376,art+'/animetoon.png') main.addDir('Dubbed Anime','dramania',375,art+'/animetoon.png') main.addDir('Cartoons','dramania',382,art+'/animetoon.png') main.addDir('Movies','dramania',380,art+'/animetoon.png') def DUBBED(): main.addDir('A-Z Dubbed Anime','alpha-dubbed-anime',383,art+'/animetoon.png') main.addDir('Popular Dubbed Anime','http://www.animetoon.tv/popular-dubbed-anime',376,art+'/animetoon.png') main.addDir('Newly Released Dubbed Anime','http://www.animetoon.tv/new-dubbed-anime',376,art+'/animetoon.png') main.addDir('Recently Added Dubbed Anime','',376,art+'/animetoon.png') main.addDir('Ongoing Dubbed Anime','http://www.animetoon.tv/ongoing-dubbed-anime',376,art+'/animetoon.png') main.addDir('Completed Dubbed Anime','http://www.animetoon.tv/completed-dubbed-anime',376,art+'/animetoon.png') main.addDir('Genre Dubbed Anime','dubbed-anime-genres',386,art+'/genre.png') def MOVIES(): main.addDir('A-Z Movies','alpha-movies',383,art+'/animetoon.png') main.addDir('Popular Movies','http://www.animetoon.tv/popular-movies',376,art+'/animetoon.png') main.addDir('Newly Released Movies','http://www.animetoon.tv/new-movies',376,art+'/animetoon.png') main.addDir('Recently Added Movies','http://www.animetoon.tv/recent-movies',376,art+'/animetoon.png') main.addDir('Genre Movies','movies-genres',386,art+'/genre.png') def CARTOONS(): main.addDir('A-Z Cartoon','alpha-cartoon',383,art+'/animetoon.png') main.addDir('Popular Cartoon','http://www.animetoon.tv/popular-cartoon',376,art+'/animetoon.png') main.addDir('Newly Released Cartoon','http://www.animetoon.tv/new-cartoon',376,art+'/animetoon.png') main.addDir('Recently Added Cartoon','http://www.animetoon.tv/recent-cartoon',376,art+'/animetoon.png') main.addDir('Ongoing Cartoon','http://www.animetoon.tv/ongoing-cartoon',376,art+'/animetoon.png') main.addDir('Completed Cartoon','http://www.animetoon.tv/completed-cartoon',376,art+'/animetoon.png') main.addDir('Genre Cartoon','cartoon-genres',386,art+'/genre.png') def SEARCHAT(): keyb = xbmc.Keyboard('', 'Search Movies & Series') keyb.doModal() if (keyb.isConfirmed()): search = keyb.getText() encode=urllib.quote(search) surl='http://www.animetoon.tv/toon/search?key='+encode LIST(surl) def GENREAT(sec): link=main.OPENURL('http://www.animetoon.tv/'+sec, mobile=True) link=link.replace('\r','').replace('\n','').replace('\t','').replace('&nbsp;','').replace(' ','').replace('>Follow @GoGoAnime</a>','') match=re.compile('<tr><td><a href="(.+?)">(.+?)</a></td><td>(.+?)</td> </tr>',re.DOTALL).findall(link) for url,name,count in match: main.addDir(name+' [COLOR red]('+count+')[/COLOR]',url,376,art+'/genre.png') def LISTPOP(): link=main.OPENURL('http://www.animetoon.tv/updates', mobile=True) link=link.replace('\r','').replace('\n','').replace('\t','').replace('&nbsp;','').replace(' ','').replace('>Follow @GoGoAnime</a>','') match=re.compile('<tr><td><div><a href="(.+?)">(.+?)</a></div><ul>.+?</ul></td><td><img src="(.+?)" alt="(.+?)" /></td><td>(.+?)</td></tr>',re.DOTALL).findall(link) for url,name,thumb,type,date in match: if 'Movie' in type: main.addDirMs(name+' [COLOR red]('+date+')[/COLOR] [COLOR blue]'+type+'[/COLOR]',url,381,thumb,'','','','','') else: main.addDirMs(name+' [COLOR red]('+date+')[/COLOR] [COLOR blue]'+type+'[/COLOR]',url,377,thumb,'NA','','',thumb,'') def LIST(murl): link=main.OPENURL(murl) link=link.replace('\r','').replace('\n','').replace('\t','').replace('&nbsp;','').replace('&raquo;','').replace('&rarr;','').replace(' ','') match=re.compile("""<a href="([^<]+)"><img src="(.+?)".+?<h3><a href=".+?">(.+?)</a></h3><div><span class="type_indic">(.+?)</span>.+?<div class="descr">(.+?).?<a.+?Released:</span><span class="bold">(.+?)</span>.+?Rating:</span><span class="bold">(.+?)</span>""",re.DOTALL).findall(link) dialogWait = xbmcgui.DialogProgress() ret = dialogWait.create('Please wait until Movie list is cached.') totalLinks = len(match) loadedLinks = 0 remaining_display = 'Movies/Shows Cached :: [B]'+str(loadedLinks)+' / '+str(totalLinks)+'[/B].' dialogWait.update(0,'[B]Will load instantly from now on[/B]',remaining_display) xbmc.executebuiltin("XBMC.Dialog.Close(busydialog,true)") for url,thumb,name,type,desc,year,rate in match: if 'Movie' in type: main.addDirMs(name+' [COLOR red]('+year+') [/COLOR][COLOR blue]'+rate+'[/COLOR] [COLOR tan]'+type+'[/COLOR]',url,381,thumb,desc,'','','','') else: main.addDirMs(name+' [COLOR red]('+year+') [/COLOR][COLOR blue]'+rate+'[/COLOR] [COLOR tan]'+type+'[/COLOR]',url,377,thumb,desc,'','',thumb,'') loadedLinks = loadedLinks + 1 percent = (loadedLinks * 100)/totalLinks remaining_display = 'Movies/Episodes Cached :: [B]'+str(loadedLinks)+' / '+str(totalLinks)+'[/B].' dialogWait.update(percent,'[B]Will load instantly from now on[/B]',remaining_display) if (dialogWait.iscanceled()): return False paginate = re.compile('''<a href="([^<]+)">Next</a>''').findall(link) if len(paginate)>0: main.addDir('Next',paginate[0],376,art+'/next2.png') main.GA("Animania","List") def GETMOVIE(mname,murl,thumb): link=main.OPENURL(murl, mobile=True) link=link.replace('\r','').replace('\n','').replace('\t','').replace('&nbsp;','').replace(' ','').replace('>Follow @GoGoAnime</a>','') match=re.compile('<h2>Videos:</h2><ul><li><a href="([^<]+)">.+?</a>',re.DOTALL).findall(link) LISTHOSTS(mname,match[0],thumb) def LISTEPI(mname,murl,pic,desc,thumb): link=main.OPENURL(murl, mobile=True) link=link.replace('\r','').replace('\n','').replace('\t','').replace('&nbsp;','').replace(' ','').replace('>Follow @GoGoAnime</a>','') match=re.compile('<li><a href="([^<]+)">(.+?)</a>.+?<span class="right_text">(.+?)</span>',re.DOTALL).findall(link) for url,name,date in match: main.addDirMs(name+' [COLOR red]('+date+')[/COLOR]',url,378,thumb,desc,'','','','') paginate = re.compile('''<a href="([^<]+)">Next</a>''').findall(link) if len(paginate)>0: main.addDirc('Next',paginate[0],377,art+'/next2.png',desc,'','',thumb,'') def LISTHOSTS(name,murl,thumb): name=main.removeColoredText(name) collect=[] videobug=[] yourupload=[] video44=[] play44=[] videoweed=[] cheesestream=[] videofun=[] byzoo=[] i=1 j=1 v=1 p=1 vw=1 c=1 vf=1 b=1 link=main.OPENURL(murl).replace('\/','/').replace('\r','').replace('\n','').replace('\t','').replace('&nbsp;','').replace(' class="selected"','').replace('&rarr;','') """parts=re.compile('<li><a href="(http://www.animetoon.tv/.+?/\d-\d+).+?>(Part.+?)</a>').findall(link) print len(parts) if part_pages: for url,pname in part_pages: match=re.compile('</ul></div><div><.+?src="(.+?)"').findall(main.OPENURL(url)) for url in match: collect.append(url)""" try: count=1 match=re.findall('<div><iframe src="(.+?)"',link,re.I) for url in match: parts=re.compile('<li><a href="http://www.animetoon.tv/.+?/'+str(count)+'-([^<]+)">Part(.+?)</a>').findall(link) if len(parts)>1: for item,num in parts: furl=url.replace('part_1','part_'+str(item)) collect.append(furl) else: collect.append(url) count=count+1 except: pass for links in collect: if 'byzoo' in links: main.addDown2(name+' [COLOR tan]Byzoo Part '+str(b)+'[/COLOR]',links,379,thumb,'') byzoo.append(('Part '+str(b),links)) b=b+1 if byzoo and len(byzoo)>1: main.addDown2(name+' [COLOR tan]Byzoo Play All[/COLOR]',str(byzoo),379,thumb,'') for links in collect: if 'videobug' in links: main.addDown2(name+' [COLOR blue]VideoBug Part '+str(i)+'[/COLOR]',links,379,thumb,'') videobug.append(('Part '+str(i),links)) i=i+1 if videobug and len(videobug)>1: main.addDown2(name+' [COLOR blue]VideoBug Play All[/COLOR]',str(videobug),379,thumb,'') for links in collect: if 'yourupload' in links: main.addDown2(name+' [COLOR yellow]YourUpload Part '+str(j)+'[/COLOR]',links,379,thumb,'') yourupload.append(('Part '+str(j),links)) j=j+1 if yourupload and len(yourupload)>1: main.addDown2(name+' [COLOR yellow]YourUpload Play All[/COLOR]',str(yourupload),379,thumb,'') for links in collect: if 'video44' in links: main.addDown2(name+' [COLOR red]Video44 Part '+str(v)+'[/COLOR]',links,379,thumb,'') video44.append(('Part '+str(v),links)) v=v+1 if video44 and len(video44)>1: main.addDown2(name+' [COLOR red]Video44 Play All[/COLOR]',str(video44),379,thumb,'') for links in collect: if 'play44' in links: main.addDown2(name+' [COLOR green]Play44 Part '+str(p)+'[/COLOR]',links,379,thumb,'') play44.append(('Part '+str(p),links)) p=p+1 if play44 and len(play44)>1: main.addDown2(name+' [COLOR green]Play44 Play All[/COLOR]',str(play44),379,thumb,'') for links in collect: if 'videoweed' in links: main.addDown2(name+' [COLOR aqua]Videoweed Part '+str(vw)+'[/COLOR]',links,379,thumb,'') videoweed.append(('Part '+str(vw),links)) vw=vw+1 if videoweed and len(videoweed)>1: main.addDown2(name+' [COLOR aqua]Videoweed Play All[/COLOR]',str(videoweed),379,thumb,'') for links in collect: if 'cheesestream' in links: main.addDown2(name+' [COLOR purple]Cheesestream Part '+str(c)+'[/COLOR]',links,379,thumb,'') cheesestream.append(('Part '+str(c),links)) c=c+1 if cheesestream and len(cheesestream)>1: main.addDown2(name+' [COLOR purple]Cheesestream Play All[/COLOR]',str(cheesestream),379,thumb,'') for links in collect: if 'videofun' in links: main.addDown2(name+' [COLOR maroon]Videofun Part '+str(vf)+'[/COLOR]',links,379,thumb,'') videofun.append(('Part '+str(vf),links)) vf=vf+1 if videofun and len(videofun)>1: main.addDown2(name+' [COLOR maroon]Videofun Play All[/COLOR]',str(videofun),379,thumb,'') def getLink(links): if 'byzoo' in links: link=main.OPENURL(links) try:match=re.compile("playlist:.+?url: '(.+?)',",re.DOTALL).findall(link)[0] except:match=re.compile('file: "(.+?)",',re.DOTALL).findall(link)[0] match=urllib.unquote_plus(match) if 'videobug' in links: link=main.OPENURL(links) try:match=re.compile("playlist:.+?url: '(.+?)',",re.DOTALL).findall(link)[0] except:match=re.compile('file: "(.+?)",',re.DOTALL).findall(link)[0] match=urllib.unquote_plus(match) if 'yourupload' in links: link=main.OPENURL(links) try: match=re.compile('<meta property="og.+?video" content="(.+?)"/>',re.DOTALL).findall(link) if len(match)!=0: match=urllib.unquote_plus(match[0]) except:pass if 'video44' in links: link=main.OPENURL(links) try:match=re.compile("playlist:.+?url: '(.+?)',",re.DOTALL).findall(link)[0] except:match=re.compile('file: "(.+?)"',re.DOTALL).findall(link)[0] match=urllib.unquote_plus(match) if 'play44' in links: link=main.OPENURL(links) try:match=re.compile("playlist:.+?url: '(.+?)',",re.DOTALL).findall(link)[0] except:match=re.compile('file: "(.+?)"',re.DOTALL).findall(link)[0] match=urllib.unquote_plus(match) if 'videoweed' in links: try:stream_url = main.resolve_url(links) except:pass match=urllib.unquote_plus(stream_url) if 'cheesestream' in links: link=main.OPENURL(links) try:match=re.compile('<meta property="og:video" content="(.+?)"/>',re.DOTALL).findall(link)[0] except:pass match=urllib.unquote_plus(match) if 'videofun' in links: link=main.OPENURL(links) try:match=re.compile("""'fit'},.+?{url: "([^<]+)", autoPlay""",re.DOTALL).findall(link)[0] except:pass match=urllib.unquote_plus(match) return match def PLAY(mname,murl,thumb): main.GA("Animania","Watched") ok=True r = re.findall('(.+?)\ss(\d+)e(\d+)\s',mname,re.I) if r: infoLabels =main.GETMETAEpiT(mname,'','') video_type='episode' season=infoLabels['season'] episode=infoLabels['episode'] else: infoLabels =main.GETMETAT(mname,'','',thumb) video_type='movie' season='' episode='' img=infoLabels['cover_url'] fanart =infoLabels['backdrop_url'] imdb_id=infoLabels['imdb_id'] infolabels = { 'supports_meta' : 'true', 'video_type':video_type, 'name':str(infoLabels['title']), 'imdb_id':str(infoLabels['imdb_id']), 'season':str(season), 'episode':str(episode), 'year':str(infoLabels['year']) } try : xbmc.executebuiltin("XBMC.Notification(Please Wait!,Resolving Link,3000)") infoL={'Title': infoLabels['title'], 'Plot': infoLabels['plot'], 'Genre': infoLabels['genre']} if not video_type is 'episode': infoL['originalTitle']=main.removeColoredText(infoLabels['metaName']) from resources.universal import playbackengine if "'," in murl: mname=main.removeColoredText(mname) pl=xbmc.PlayList(1);pl.clear() playlist = sorted(list(set(eval(murl))), key=lambda playlist: playlist[0]) for xname,link in playlist: pl.add(getLink(link),xbmcgui.ListItem(mname+' '+xname,thumbnailImage=img)) xbmc.Player().play(pl) while xbmc.Player().isPlaying(): xbmc.sleep(2500) else: stream_url = getLink(murl) # play with bookmark player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url, addon_id=addon_id, video_type=video_type, title=infoLabels['title'],season=season, episode=episode, year=str(infoLabels['year']),img=img,infolabels=infoL, watchedCallbackwithParams=main.WatchedCallbackwithParams,imdb_id=imdb_id) #WatchHistory if selfAddon.getSetting("whistory") == "true": from resources.universal import watchhistory wh = watchhistory.WatchHistory(addon_id) wh.add_item(mname+' '+'[COLOR green]Dramania[/COLOR]', sys.argv[0]+sys.argv[2], infolabels=infolabels, img=infoLabels['cover_url'], fanart=infoLabels['backdrop_url'], is_folder=False) player.KeepAlive() return ok except Exception, e: if stream_url != False: main.ErrorReport(e) return ok
gpl-3.0
tomschr/sdsc
src/sdsc/__init__.py
1
1375
# # Copyright (c) 2017 SUSE Linux GmbH # # This program is free software; you can redistribute it and/or # modify it under the terms of version 3 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, contact SUSE LLC. # # To contact SUSE about this file by physical or electronic mail, # you may find current contact information at www.suse.com """ sdsc Module =================== .. default-domain:: py Checks a given DocBook XML file for stylistic errors """ import logging __projectname__ = "suse-doc-style-checker" __programname__ = "SUSE Documentation Style Checker" __license__ = "LGPL-2.1+" __description__ = "checks a given DocBook XML file for stylistic errors" __authors__ = "Stefan Knorr, Thomas Schraitle, Fabian Vogt" __url__ = "https://github.com/tomschr/sdsc" __version__ = "2016.7.0.0" #: Set default logging handler to avoid "No handler found" warnings. # See https://docs.python.org/3/howto/logging.html#library-config logging.getLogger().addHandler(logging.NullHandler())
lgpl-2.1
OMAP4-AOSP/android_kernel_omap4_common
scripts/tracing/draw_functrace.py
14676
3560
#!/usr/bin/python """ Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com> Licensed under the terms of the GNU GPL License version 2 This script parses a trace provided by the function tracer in kernel/trace/trace_functions.c The resulted trace is processed into a tree to produce a more human view of the call stack by drawing textual but hierarchical tree of calls. Only the functions's names and the the call time are provided. Usage: Be sure that you have CONFIG_FUNCTION_TRACER # mount -t debugfs nodev /sys/kernel/debug # echo function > /sys/kernel/debug/tracing/current_tracer $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func Wait some times but not too much, the script is a bit slow. Break the pipe (Ctrl + Z) $ scripts/draw_functrace.py < raw_trace_func > draw_functrace Then you have your drawn trace in draw_functrace """ import sys, re class CallTree: """ This class provides a tree representation of the functions call stack. If a function has no parent in the kernel (interrupt, syscall, kernel thread...) then it is attached to a virtual parent called ROOT. """ ROOT = None def __init__(self, func, time = None, parent = None): self._func = func self._time = time if parent is None: self._parent = CallTree.ROOT else: self._parent = parent self._children = [] def calls(self, func, calltime): """ If a function calls another one, call this method to insert it into the tree at the appropriate place. @return: A reference to the newly created child node. """ child = CallTree(func, calltime, self) self._children.append(child) return child def getParent(self, func): """ Retrieve the last parent of the current node that has the name given by func. If this function is not on a parent, then create it as new child of root @return: A reference to the parent. """ tree = self while tree != CallTree.ROOT and tree._func != func: tree = tree._parent if tree == CallTree.ROOT: child = CallTree.ROOT.calls(func, None) return child return tree def __repr__(self): return self.__toString("", True) def __toString(self, branch, lastChild): if self._time is not None: s = "%s----%s (%s)\n" % (branch, self._func, self._time) else: s = "%s----%s\n" % (branch, self._func) i = 0 if lastChild: branch = branch[:-1] + " " while i < len(self._children): if i != len(self._children) - 1: s += "%s" % self._children[i].__toString(branch +\ " |", False) else: s += "%s" % self._children[i].__toString(branch +\ " |", True) i += 1 return s class BrokenLineException(Exception): """If the last line is not complete because of the pipe breakage, we want to stop the processing and ignore this line. """ pass class CommentLineException(Exception): """ If the line is a comment (as in the beginning of the trace file), just ignore it. """ pass def parseLine(line): line = line.strip() if line.startswith("#"): raise CommentLineException m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line) if m is None: raise BrokenLineException return (m.group(1), m.group(2), m.group(3)) def main(): CallTree.ROOT = CallTree("Root (Nowhere)", None, None) tree = CallTree.ROOT for line in sys.stdin: try: calltime, callee, caller = parseLine(line) except BrokenLineException: break except CommentLineException: continue tree = tree.getParent(caller) tree = tree.calls(callee, calltime) print CallTree.ROOT if __name__ == "__main__": main()
gpl-2.0
goulu/networkx
networkx/algorithms/tests/test_graphical.py
22
4665
#!/usr/bin/env python from nose.tools import * from nose import SkipTest import networkx as nx def test_valid_degree_sequence1(): n = 100 p = .3 for i in range(10): G = nx.erdos_renyi_graph(n,p) deg = (d for n, d in G.degree()) assert_true( nx.is_valid_degree_sequence(deg, method='eg') ) assert_true( nx.is_valid_degree_sequence(deg, method='hh') ) def test_valid_degree_sequence2(): n = 100 for i in range(10): G = nx.barabasi_albert_graph(n,1) deg = (d for n, d in G.degree()) assert_true( nx.is_valid_degree_sequence(deg, method='eg') ) assert_true( nx.is_valid_degree_sequence(deg, method='hh') ) @raises(nx.NetworkXException) def test_string_input(): a = nx.is_valid_degree_sequence([],'foo') def test_negative_input(): assert_false(nx.is_valid_degree_sequence([-1],'hh')) assert_false(nx.is_valid_degree_sequence([-1],'eg')) assert_false(nx.is_valid_degree_sequence([72.5],'eg')) class TestAtlas(object): @classmethod def setupClass(cls): global atlas import platform if platform.python_implementation()=='Jython': raise SkipTest('graph atlas not available under Jython.') import networkx.generators.atlas as atlas def setUp(self): self.GAG=atlas.graph_atlas_g() def test_atlas(self): for graph in self.GAG: deg = (d for n, d in graph.degree()) assert_true( nx.is_valid_degree_sequence(deg, method='eg') ) assert_true( nx.is_valid_degree_sequence(deg, method='hh') ) def test_small_graph_true(): z=[5,3,3,3,3,2,2,2,1,1,1] assert_true(nx.is_valid_degree_sequence(z, method='hh')) assert_true(nx.is_valid_degree_sequence(z, method='eg')) z=[10,3,3,3,3,2,2,2,2,2,2] assert_true(nx.is_valid_degree_sequence(z, method='hh')) assert_true(nx.is_valid_degree_sequence(z, method='eg')) z=[1, 1, 1, 1, 1, 2, 2, 2, 3, 4] assert_true(nx.is_valid_degree_sequence(z, method='hh')) assert_true(nx.is_valid_degree_sequence(z, method='eg')) def test_small_graph_false(): z=[1000,3,3,3,3,2,2,2,1,1,1] assert_false(nx.is_valid_degree_sequence(z, method='hh')) assert_false(nx.is_valid_degree_sequence(z, method='eg')) z=[6,5,4,4,2,1,1,1] assert_false(nx.is_valid_degree_sequence(z, method='hh')) assert_false(nx.is_valid_degree_sequence(z, method='eg')) z=[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4] assert_false(nx.is_valid_degree_sequence(z, method='hh')) assert_false(nx.is_valid_degree_sequence(z, method='eg')) def test_directed_degree_sequence(): # Test a range of valid directed degree sequences n, r = 100, 10 p = 1.0 / r for i in range(r): G = nx.erdos_renyi_graph(n,p*(i+1),None,True) din = (d for n, d in G.in_degree()) dout = (d for n, d in G.out_degree()) assert_true(nx.is_digraphical(din, dout)) def test_small_directed_sequences(): dout=[5,3,3,3,3,2,2,2,1,1,1] din=[3,3,3,3,3,2,2,2,2,2,1] assert_true(nx.is_digraphical(din, dout)) # Test nongraphical directed sequence dout = [1000,3,3,3,3,2,2,2,1,1,1] din=[103,102,102,102,102,102,102,102,102,102] assert_false(nx.is_digraphical(din, dout)) # Test digraphical small sequence dout=[1, 1, 1, 1, 1, 2, 2, 2, 3, 4] din=[2, 2, 2, 2, 2, 2, 2, 2, 1, 1] assert_true(nx.is_digraphical(din, dout)) # Test nonmatching sum din=[2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1] assert_false(nx.is_digraphical(din, dout)) # Test for negative integer in sequence din=[2, 2, 2, -2, 2, 2, 2, 2, 1, 1, 4] assert_false(nx.is_digraphical(din, dout)) def test_multi_sequence(): # Test nongraphical multi sequence seq=[1000,3,3,3,3,2,2,2,1,1] assert_false(nx.is_multigraphical(seq)) # Test small graphical multi sequence seq=[6,5,4,4,2,1,1,1] assert_true(nx.is_multigraphical(seq)) # Test for negative integer in sequence seq=[6,5,4,-4,2,1,1,1] assert_false(nx.is_multigraphical(seq)) # Test for sequence with odd sum seq=[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 4] assert_false(nx.is_multigraphical(seq)) def test_pseudo_sequence(): # Test small valid pseudo sequence seq=[1000,3,3,3,3,2,2,2,1,1] assert_true(nx.is_pseudographical(seq)) # Test for sequence with odd sum seq=[1000,3,3,3,3,2,2,2,1,1,1] assert_false(nx.is_pseudographical(seq)) # Test for negative integer in sequence seq=[1000,3,3,3,3,2,2,-2,1,1] assert_false(nx.is_pseudographical(seq))
bsd-3-clause
xq262144/hue
desktop/core/ext-py/Babel-0.9.6/babel/dates.py
67
38022
# -*- coding: utf-8 -*- # # Copyright (C) 2007 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://babel.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://babel.edgewall.org/log/. """Locale dependent formatting and parsing of dates and times. The default locale for the functions in this module is determined by the following environment variables, in that order: * ``LC_TIME``, * ``LC_ALL``, and * ``LANG`` """ from datetime import date, datetime, time, timedelta, tzinfo import re from babel.core import default_locale, get_global, Locale from babel.util import UTC __all__ = ['format_date', 'format_datetime', 'format_time', 'get_timezone_name', 'parse_date', 'parse_datetime', 'parse_time'] __docformat__ = 'restructuredtext en' LC_TIME = default_locale('LC_TIME') # Aliases for use in scopes where the modules are shadowed by local variables date_ = date datetime_ = datetime time_ = time def get_period_names(locale=LC_TIME): """Return the names for day periods (AM/PM) used by the locale. >>> get_period_names(locale='en_US')['am'] u'AM' :param locale: the `Locale` object, or a locale string :return: the dictionary of period names :rtype: `dict` """ return Locale.parse(locale).periods def get_day_names(width='wide', context='format', locale=LC_TIME): """Return the day names used by the locale for the specified format. >>> get_day_names('wide', locale='en_US')[1] u'Tuesday' >>> get_day_names('abbreviated', locale='es')[1] u'mar' >>> get_day_names('narrow', context='stand-alone', locale='de_DE')[1] u'D' :param width: the width to use, one of "wide", "abbreviated", or "narrow" :param context: the context, either "format" or "stand-alone" :param locale: the `Locale` object, or a locale string :return: the dictionary of day names :rtype: `dict` """ return Locale.parse(locale).days[context][width] def get_month_names(width='wide', context='format', locale=LC_TIME): """Return the month names used by the locale for the specified format. >>> get_month_names('wide', locale='en_US')[1] u'January' >>> get_month_names('abbreviated', locale='es')[1] u'ene' >>> get_month_names('narrow', context='stand-alone', locale='de_DE')[1] u'J' :param width: the width to use, one of "wide", "abbreviated", or "narrow" :param context: the context, either "format" or "stand-alone" :param locale: the `Locale` object, or a locale string :return: the dictionary of month names :rtype: `dict` """ return Locale.parse(locale).months[context][width] def get_quarter_names(width='wide', context='format', locale=LC_TIME): """Return the quarter names used by the locale for the specified format. >>> get_quarter_names('wide', locale='en_US')[1] u'1st quarter' >>> get_quarter_names('abbreviated', locale='de_DE')[1] u'Q1' :param width: the width to use, one of "wide", "abbreviated", or "narrow" :param context: the context, either "format" or "stand-alone" :param locale: the `Locale` object, or a locale string :return: the dictionary of quarter names :rtype: `dict` """ return Locale.parse(locale).quarters[context][width] def get_era_names(width='wide', locale=LC_TIME): """Return the era names used by the locale for the specified format. >>> get_era_names('wide', locale='en_US')[1] u'Anno Domini' >>> get_era_names('abbreviated', locale='de_DE')[1] u'n. Chr.' :param width: the width to use, either "wide", "abbreviated", or "narrow" :param locale: the `Locale` object, or a locale string :return: the dictionary of era names :rtype: `dict` """ return Locale.parse(locale).eras[width] def get_date_format(format='medium', locale=LC_TIME): """Return the date formatting patterns used by the locale for the specified format. >>> get_date_format(locale='en_US') <DateTimePattern u'MMM d, yyyy'> >>> get_date_format('full', locale='de_DE') <DateTimePattern u'EEEE, d. MMMM yyyy'> :param format: the format to use, one of "full", "long", "medium", or "short" :param locale: the `Locale` object, or a locale string :return: the date format pattern :rtype: `DateTimePattern` """ return Locale.parse(locale).date_formats[format] def get_datetime_format(format='medium', locale=LC_TIME): """Return the datetime formatting patterns used by the locale for the specified format. >>> get_datetime_format(locale='en_US') u'{1} {0}' :param format: the format to use, one of "full", "long", "medium", or "short" :param locale: the `Locale` object, or a locale string :return: the datetime format pattern :rtype: `unicode` """ patterns = Locale.parse(locale).datetime_formats if format not in patterns: format = None return patterns[format] def get_time_format(format='medium', locale=LC_TIME): """Return the time formatting patterns used by the locale for the specified format. >>> get_time_format(locale='en_US') <DateTimePattern u'h:mm:ss a'> >>> get_time_format('full', locale='de_DE') <DateTimePattern u'HH:mm:ss v'> :param format: the format to use, one of "full", "long", "medium", or "short" :param locale: the `Locale` object, or a locale string :return: the time format pattern :rtype: `DateTimePattern` """ return Locale.parse(locale).time_formats[format] def get_timezone_gmt(datetime=None, width='long', locale=LC_TIME): """Return the timezone associated with the given `datetime` object formatted as string indicating the offset from GMT. >>> dt = datetime(2007, 4, 1, 15, 30) >>> get_timezone_gmt(dt, locale='en') u'GMT+00:00' >>> from pytz import timezone >>> tz = timezone('America/Los_Angeles') >>> dt = datetime(2007, 4, 1, 15, 30, tzinfo=tz) >>> get_timezone_gmt(dt, locale='en') u'GMT-08:00' >>> get_timezone_gmt(dt, 'short', locale='en') u'-0800' The long format depends on the locale, for example in France the acronym UTC string is used instead of GMT: >>> get_timezone_gmt(dt, 'long', locale='fr_FR') u'UTC-08:00' :param datetime: the ``datetime`` object; if `None`, the current date and time in UTC is used :param width: either "long" or "short" :param locale: the `Locale` object, or a locale string :return: the GMT offset representation of the timezone :rtype: `unicode` :since: version 0.9 """ if datetime is None: datetime = datetime_.utcnow() elif isinstance(datetime, (int, long)): datetime = datetime_.utcfromtimestamp(datetime).time() if datetime.tzinfo is None: datetime = datetime.replace(tzinfo=UTC) locale = Locale.parse(locale) offset = datetime.tzinfo.utcoffset(datetime) seconds = offset.days * 24 * 60 * 60 + offset.seconds hours, seconds = divmod(seconds, 3600) if width == 'short': pattern = u'%+03d%02d' else: pattern = locale.zone_formats['gmt'] % '%+03d:%02d' return pattern % (hours, seconds // 60) def get_timezone_location(dt_or_tzinfo=None, locale=LC_TIME): """Return a representation of the given timezone using "location format". The result depends on both the local display name of the country and the city associated with the time zone: >>> from pytz import timezone >>> tz = timezone('America/St_Johns') >>> get_timezone_location(tz, locale='de_DE') u"Kanada (St. John's)" >>> tz = timezone('America/Mexico_City') >>> get_timezone_location(tz, locale='de_DE') u'Mexiko (Mexiko-Stadt)' If the timezone is associated with a country that uses only a single timezone, just the localized country name is returned: >>> tz = timezone('Europe/Berlin') >>> get_timezone_name(tz, locale='de_DE') u'Deutschland' :param dt_or_tzinfo: the ``datetime`` or ``tzinfo`` object that determines the timezone; if `None`, the current date and time in UTC is assumed :param locale: the `Locale` object, or a locale string :return: the localized timezone name using location format :rtype: `unicode` :since: version 0.9 """ if dt_or_tzinfo is None or isinstance(dt_or_tzinfo, (int, long)): dt = None tzinfo = UTC elif isinstance(dt_or_tzinfo, (datetime, time)): dt = dt_or_tzinfo if dt.tzinfo is not None: tzinfo = dt.tzinfo else: tzinfo = UTC else: dt = None tzinfo = dt_or_tzinfo locale = Locale.parse(locale) if hasattr(tzinfo, 'zone'): zone = tzinfo.zone else: zone = tzinfo.tzname(dt or datetime.utcnow()) # Get the canonical time-zone code zone = get_global('zone_aliases').get(zone, zone) info = locale.time_zones.get(zone, {}) # Otherwise, if there is only one timezone for the country, return the # localized country name region_format = locale.zone_formats['region'] territory = get_global('zone_territories').get(zone) if territory not in locale.territories: territory = 'ZZ' # invalid/unknown territory_name = locale.territories[territory] if territory and len(get_global('territory_zones').get(territory, [])) == 1: return region_format % (territory_name) # Otherwise, include the city in the output fallback_format = locale.zone_formats['fallback'] if 'city' in info: city_name = info['city'] else: metazone = get_global('meta_zones').get(zone) metazone_info = locale.meta_zones.get(metazone, {}) if 'city' in metazone_info: city_name = metainfo['city'] elif '/' in zone: city_name = zone.split('/', 1)[1].replace('_', ' ') else: city_name = zone.replace('_', ' ') return region_format % (fallback_format % { '0': city_name, '1': territory_name }) def get_timezone_name(dt_or_tzinfo=None, width='long', uncommon=False, locale=LC_TIME): r"""Return the localized display name for the given timezone. The timezone may be specified using a ``datetime`` or `tzinfo` object. >>> from pytz import timezone >>> dt = time(15, 30, tzinfo=timezone('America/Los_Angeles')) >>> get_timezone_name(dt, locale='en_US') u'Pacific Standard Time' >>> get_timezone_name(dt, width='short', locale='en_US') u'PST' If this function gets passed only a `tzinfo` object and no concrete `datetime`, the returned display name is indenpendent of daylight savings time. This can be used for example for selecting timezones, or to set the time of events that recur across DST changes: >>> tz = timezone('America/Los_Angeles') >>> get_timezone_name(tz, locale='en_US') u'Pacific Time' >>> get_timezone_name(tz, 'short', locale='en_US') u'PT' If no localized display name for the timezone is available, and the timezone is associated with a country that uses only a single timezone, the name of that country is returned, formatted according to the locale: >>> tz = timezone('Europe/Berlin') >>> get_timezone_name(tz, locale='de_DE') u'Deutschland' >>> get_timezone_name(tz, locale='pt_BR') u'Hor\xe1rio Alemanha' On the other hand, if the country uses multiple timezones, the city is also included in the representation: >>> tz = timezone('America/St_Johns') >>> get_timezone_name(tz, locale='de_DE') u"Kanada (St. John's)" The `uncommon` parameter can be set to `True` to enable the use of timezone representations that are not commonly used by the requested locale. For example, while in French the central European timezone is usually abbreviated as "HEC", in Canadian French, this abbreviation is not in common use, so a generic name would be chosen by default: >>> tz = timezone('Europe/Paris') >>> get_timezone_name(tz, 'short', locale='fr_CA') u'France' >>> get_timezone_name(tz, 'short', uncommon=True, locale='fr_CA') u'HEC' :param dt_or_tzinfo: the ``datetime`` or ``tzinfo`` object that determines the timezone; if a ``tzinfo`` object is used, the resulting display name will be generic, i.e. independent of daylight savings time; if `None`, the current date in UTC is assumed :param width: either "long" or "short" :param uncommon: whether even uncommon timezone abbreviations should be used :param locale: the `Locale` object, or a locale string :return: the timezone display name :rtype: `unicode` :since: version 0.9 :see: `LDML Appendix J: Time Zone Display Names <http://www.unicode.org/reports/tr35/#Time_Zone_Fallback>`_ """ if dt_or_tzinfo is None or isinstance(dt_or_tzinfo, (int, long)): dt = None tzinfo = UTC elif isinstance(dt_or_tzinfo, (datetime, time)): dt = dt_or_tzinfo if dt.tzinfo is not None: tzinfo = dt.tzinfo else: tzinfo = UTC else: dt = None tzinfo = dt_or_tzinfo locale = Locale.parse(locale) if hasattr(tzinfo, 'zone'): zone = tzinfo.zone else: zone = tzinfo.tzname(dt) # Get the canonical time-zone code zone = get_global('zone_aliases').get(zone, zone) info = locale.time_zones.get(zone, {}) # Try explicitly translated zone names first if width in info: if dt is None: field = 'generic' else: dst = tzinfo.dst(dt) if dst is None: field = 'generic' elif dst == 0: field = 'standard' else: field = 'daylight' if field in info[width]: return info[width][field] metazone = get_global('meta_zones').get(zone) if metazone: metazone_info = locale.meta_zones.get(metazone, {}) if width in metazone_info and (uncommon or metazone_info.get('common')): if dt is None: field = 'generic' else: field = tzinfo.dst(dt) and 'daylight' or 'standard' if field in metazone_info[width]: return metazone_info[width][field] # If we have a concrete datetime, we assume that the result can't be # independent of daylight savings time, so we return the GMT offset if dt is not None: return get_timezone_gmt(dt, width=width, locale=locale) return get_timezone_location(dt_or_tzinfo, locale=locale) def format_date(date=None, format='medium', locale=LC_TIME): """Return a date formatted according to the given pattern. >>> d = date(2007, 04, 01) >>> format_date(d, locale='en_US') u'Apr 1, 2007' >>> format_date(d, format='full', locale='de_DE') u'Sonntag, 1. April 2007' If you don't want to use the locale default formats, you can specify a custom date pattern: >>> format_date(d, "EEE, MMM d, ''yy", locale='en') u"Sun, Apr 1, '07" :param date: the ``date`` or ``datetime`` object; if `None`, the current date is used :param format: one of "full", "long", "medium", or "short", or a custom date/time pattern :param locale: a `Locale` object or a locale identifier :rtype: `unicode` :note: If the pattern contains time fields, an `AttributeError` will be raised when trying to apply the formatting. This is also true if the value of ``date`` parameter is actually a ``datetime`` object, as this function automatically converts that to a ``date``. """ if date is None: date = date_.today() elif isinstance(date, datetime): date = date.date() locale = Locale.parse(locale) if format in ('full', 'long', 'medium', 'short'): format = get_date_format(format, locale=locale) pattern = parse_pattern(format) return pattern.apply(date, locale) def format_datetime(datetime=None, format='medium', tzinfo=None, locale=LC_TIME): """Return a date formatted according to the given pattern. >>> dt = datetime(2007, 04, 01, 15, 30) >>> format_datetime(dt, locale='en_US') u'Apr 1, 2007 3:30:00 PM' For any pattern requiring the display of the time-zone, the third-party ``pytz`` package is needed to explicitly specify the time-zone: >>> from pytz import timezone >>> format_datetime(dt, 'full', tzinfo=timezone('Europe/Paris'), ... locale='fr_FR') u'dimanche 1 avril 2007 17:30:00 HEC' >>> format_datetime(dt, "yyyy.MM.dd G 'at' HH:mm:ss zzz", ... tzinfo=timezone('US/Eastern'), locale='en') u'2007.04.01 AD at 11:30:00 EDT' :param datetime: the `datetime` object; if `None`, the current date and time is used :param format: one of "full", "long", "medium", or "short", or a custom date/time pattern :param tzinfo: the timezone to apply to the time for display :param locale: a `Locale` object or a locale identifier :rtype: `unicode` """ if datetime is None: datetime = datetime_.utcnow() elif isinstance(datetime, (int, long)): datetime = datetime_.utcfromtimestamp(datetime) elif isinstance(datetime, time): datetime = datetime_.combine(date.today(), datetime) if datetime.tzinfo is None: datetime = datetime.replace(tzinfo=UTC) if tzinfo is not None: datetime = datetime.astimezone(tzinfo) if hasattr(tzinfo, 'normalize'): # pytz datetime = tzinfo.normalize(datetime) locale = Locale.parse(locale) if format in ('full', 'long', 'medium', 'short'): return get_datetime_format(format, locale=locale) \ .replace('{0}', format_time(datetime, format, tzinfo=None, locale=locale)) \ .replace('{1}', format_date(datetime, format, locale=locale)) else: return parse_pattern(format).apply(datetime, locale) def format_time(time=None, format='medium', tzinfo=None, locale=LC_TIME): """Return a time formatted according to the given pattern. >>> t = time(15, 30) >>> format_time(t, locale='en_US') u'3:30:00 PM' >>> format_time(t, format='short', locale='de_DE') u'15:30' If you don't want to use the locale default formats, you can specify a custom time pattern: >>> format_time(t, "hh 'o''clock' a", locale='en') u"03 o'clock PM" For any pattern requiring the display of the time-zone, the third-party ``pytz`` package is needed to explicitly specify the time-zone: >>> from pytz import timezone >>> t = datetime(2007, 4, 1, 15, 30) >>> tzinfo = timezone('Europe/Paris') >>> t = tzinfo.localize(t) >>> format_time(t, format='full', tzinfo=tzinfo, locale='fr_FR') u'15:30:00 HEC' >>> format_time(t, "hh 'o''clock' a, zzzz", tzinfo=timezone('US/Eastern'), ... locale='en') u"09 o'clock AM, Eastern Daylight Time" As that example shows, when this function gets passed a ``datetime.datetime`` value, the actual time in the formatted string is adjusted to the timezone specified by the `tzinfo` parameter. If the ``datetime`` is "naive" (i.e. it has no associated timezone information), it is assumed to be in UTC. These timezone calculations are **not** performed if the value is of type ``datetime.time``, as without date information there's no way to determine what a given time would translate to in a different timezone without information about whether daylight savings time is in effect or not. This means that time values are left as-is, and the value of the `tzinfo` parameter is only used to display the timezone name if needed: >>> t = time(15, 30) >>> format_time(t, format='full', tzinfo=timezone('Europe/Paris'), ... locale='fr_FR') u'15:30:00 HEC' >>> format_time(t, format='full', tzinfo=timezone('US/Eastern'), ... locale='en_US') u'3:30:00 PM ET' :param time: the ``time`` or ``datetime`` object; if `None`, the current time in UTC is used :param format: one of "full", "long", "medium", or "short", or a custom date/time pattern :param tzinfo: the time-zone to apply to the time for display :param locale: a `Locale` object or a locale identifier :rtype: `unicode` :note: If the pattern contains date fields, an `AttributeError` will be raised when trying to apply the formatting. This is also true if the value of ``time`` parameter is actually a ``datetime`` object, as this function automatically converts that to a ``time``. """ if time is None: time = datetime.utcnow() elif isinstance(time, (int, long)): time = datetime.utcfromtimestamp(time) if time.tzinfo is None: time = time.replace(tzinfo=UTC) if isinstance(time, datetime): if tzinfo is not None: time = time.astimezone(tzinfo) if hasattr(tzinfo, 'normalize'): # pytz time = tzinfo.normalize(time) time = time.timetz() elif tzinfo is not None: time = time.replace(tzinfo=tzinfo) locale = Locale.parse(locale) if format in ('full', 'long', 'medium', 'short'): format = get_time_format(format, locale=locale) return parse_pattern(format).apply(time, locale) def parse_date(string, locale=LC_TIME): """Parse a date from a string. This function uses the date format for the locale as a hint to determine the order in which the date fields appear in the string. >>> parse_date('4/1/04', locale='en_US') datetime.date(2004, 4, 1) >>> parse_date('01.04.2004', locale='de_DE') datetime.date(2004, 4, 1) :param string: the string containing the date :param locale: a `Locale` object or a locale identifier :return: the parsed date :rtype: `date` """ # TODO: try ISO format first? format = get_date_format(locale=locale).pattern.lower() year_idx = format.index('y') month_idx = format.index('m') if month_idx < 0: month_idx = format.index('l') day_idx = format.index('d') indexes = [(year_idx, 'Y'), (month_idx, 'M'), (day_idx, 'D')] indexes.sort() indexes = dict([(item[1], idx) for idx, item in enumerate(indexes)]) # FIXME: this currently only supports numbers, but should also support month # names, both in the requested locale, and english numbers = re.findall('(\d+)', string) year = numbers[indexes['Y']] if len(year) == 2: year = 2000 + int(year) else: year = int(year) month = int(numbers[indexes['M']]) day = int(numbers[indexes['D']]) if month > 12: month, day = day, month return date(year, month, day) def parse_datetime(string, locale=LC_TIME): """Parse a date and time from a string. This function uses the date and time formats for the locale as a hint to determine the order in which the time fields appear in the string. :param string: the string containing the date and time :param locale: a `Locale` object or a locale identifier :return: the parsed date/time :rtype: `datetime` """ raise NotImplementedError def parse_time(string, locale=LC_TIME): """Parse a time from a string. This function uses the time format for the locale as a hint to determine the order in which the time fields appear in the string. >>> parse_time('15:30:00', locale='en_US') datetime.time(15, 30) :param string: the string containing the time :param locale: a `Locale` object or a locale identifier :return: the parsed time :rtype: `time` """ # TODO: try ISO format first? format = get_time_format(locale=locale).pattern.lower() hour_idx = format.index('h') if hour_idx < 0: hour_idx = format.index('k') min_idx = format.index('m') sec_idx = format.index('s') indexes = [(hour_idx, 'H'), (min_idx, 'M'), (sec_idx, 'S')] indexes.sort() indexes = dict([(item[1], idx) for idx, item in enumerate(indexes)]) # FIXME: support 12 hour clock, and 0-based hour specification # and seconds should be optional, maybe minutes too # oh, and time-zones, of course numbers = re.findall('(\d+)', string) hour = int(numbers[indexes['H']]) minute = int(numbers[indexes['M']]) second = int(numbers[indexes['S']]) return time(hour, minute, second) class DateTimePattern(object): def __init__(self, pattern, format): self.pattern = pattern self.format = format def __repr__(self): return '<%s %r>' % (type(self).__name__, self.pattern) def __unicode__(self): return self.pattern def __mod__(self, other): assert type(other) is DateTimeFormat return self.format % other def apply(self, datetime, locale): return self % DateTimeFormat(datetime, locale) class DateTimeFormat(object): def __init__(self, value, locale): assert isinstance(value, (date, datetime, time)) if isinstance(value, (datetime, time)) and value.tzinfo is None: value = value.replace(tzinfo=UTC) self.value = value self.locale = Locale.parse(locale) def __getitem__(self, name): char = name[0] num = len(name) if char == 'G': return self.format_era(char, num) elif char in ('y', 'Y', 'u'): return self.format_year(char, num) elif char in ('Q', 'q'): return self.format_quarter(char, num) elif char in ('M', 'L'): return self.format_month(char, num) elif char in ('w', 'W'): return self.format_week(char, num) elif char == 'd': return self.format(self.value.day, num) elif char == 'D': return self.format_day_of_year(num) elif char == 'F': return self.format_day_of_week_in_month() elif char in ('E', 'e', 'c'): return self.format_weekday(char, num) elif char == 'a': return self.format_period(char) elif char == 'h': if self.value.hour % 12 == 0: return self.format(12, num) else: return self.format(self.value.hour % 12, num) elif char == 'H': return self.format(self.value.hour, num) elif char == 'K': return self.format(self.value.hour % 12, num) elif char == 'k': if self.value.hour == 0: return self.format(24, num) else: return self.format(self.value.hour, num) elif char == 'm': return self.format(self.value.minute, num) elif char == 's': return self.format(self.value.second, num) elif char == 'S': return self.format_frac_seconds(num) elif char == 'A': return self.format_milliseconds_in_day(num) elif char in ('z', 'Z', 'v', 'V'): return self.format_timezone(char, num) else: raise KeyError('Unsupported date/time field %r' % char) def format_era(self, char, num): width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[max(3, num)] era = int(self.value.year >= 0) return get_era_names(width, self.locale)[era] def format_year(self, char, num): value = self.value.year if char.isupper(): week = self.get_week_number(self.get_day_of_year()) if week == 0: value -= 1 year = self.format(value, num) if num == 2: year = year[-2:] return year def format_quarter(self, char, num): quarter = (self.value.month - 1) // 3 + 1 if num <= 2: return ('%%0%dd' % num) % quarter width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num] context = {'Q': 'format', 'q': 'stand-alone'}[char] return get_quarter_names(width, context, self.locale)[quarter] def format_month(self, char, num): if num <= 2: return ('%%0%dd' % num) % self.value.month width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num] context = {'M': 'format', 'L': 'stand-alone'}[char] return get_month_names(width, context, self.locale)[self.value.month] def format_week(self, char, num): if char.islower(): # week of year day_of_year = self.get_day_of_year() week = self.get_week_number(day_of_year) if week == 0: date = self.value - timedelta(days=day_of_year) week = self.get_week_number(self.get_day_of_year(date), date.weekday()) return self.format(week, num) else: # week of month week = self.get_week_number(self.value.day) if week == 0: date = self.value - timedelta(days=self.value.day) week = self.get_week_number(date.day, date.weekday()) pass return '%d' % week def format_weekday(self, char, num): if num < 3: if char.islower(): value = 7 - self.locale.first_week_day + self.value.weekday() return self.format(value % 7 + 1, num) num = 3 weekday = self.value.weekday() width = {3: 'abbreviated', 4: 'wide', 5: 'narrow'}[num] context = {3: 'format', 4: 'format', 5: 'stand-alone'}[num] return get_day_names(width, context, self.locale)[weekday] def format_day_of_year(self, num): return self.format(self.get_day_of_year(), num) def format_day_of_week_in_month(self): return '%d' % ((self.value.day - 1) / 7 + 1) def format_period(self, char): period = {0: 'am', 1: 'pm'}[int(self.value.hour >= 12)] return get_period_names(locale=self.locale)[period] def format_frac_seconds(self, num): value = str(self.value.microsecond) return self.format(round(float('.%s' % value), num) * 10**num, num) def format_milliseconds_in_day(self, num): msecs = self.value.microsecond // 1000 + self.value.second * 1000 + \ self.value.minute * 60000 + self.value.hour * 3600000 return self.format(msecs, num) def format_timezone(self, char, num): width = {3: 'short', 4: 'long'}[max(3, num)] if char == 'z': return get_timezone_name(self.value, width, locale=self.locale) elif char == 'Z': return get_timezone_gmt(self.value, width, locale=self.locale) elif char == 'v': return get_timezone_name(self.value.tzinfo, width, locale=self.locale) elif char == 'V': if num == 1: return get_timezone_name(self.value.tzinfo, width, uncommon=True, locale=self.locale) return get_timezone_location(self.value.tzinfo, locale=self.locale) def format(self, value, length): return ('%%0%dd' % length) % value def get_day_of_year(self, date=None): if date is None: date = self.value return (date - date_(date.year, 1, 1)).days + 1 def get_week_number(self, day_of_period, day_of_week=None): """Return the number of the week of a day within a period. This may be the week number in a year or the week number in a month. Usually this will return a value equal to or greater than 1, but if the first week of the period is so short that it actually counts as the last week of the previous period, this function will return 0. >>> format = DateTimeFormat(date(2006, 1, 8), Locale.parse('de_DE')) >>> format.get_week_number(6) 1 >>> format = DateTimeFormat(date(2006, 1, 8), Locale.parse('en_US')) >>> format.get_week_number(6) 2 :param day_of_period: the number of the day in the period (usually either the day of month or the day of year) :param day_of_week: the week day; if ommitted, the week day of the current date is assumed """ if day_of_week is None: day_of_week = self.value.weekday() first_day = (day_of_week - self.locale.first_week_day - day_of_period + 1) % 7 if first_day < 0: first_day += 7 week_number = (day_of_period + first_day - 1) / 7 if 7 - first_day >= self.locale.min_week_days: week_number += 1 return week_number PATTERN_CHARS = { 'G': [1, 2, 3, 4, 5], # era 'y': None, 'Y': None, 'u': None, # year 'Q': [1, 2, 3, 4], 'q': [1, 2, 3, 4], # quarter 'M': [1, 2, 3, 4, 5], 'L': [1, 2, 3, 4, 5], # month 'w': [1, 2], 'W': [1], # week 'd': [1, 2], 'D': [1, 2, 3], 'F': [1], 'g': None, # day 'E': [1, 2, 3, 4, 5], 'e': [1, 2, 3, 4, 5], 'c': [1, 3, 4, 5], # week day 'a': [1], # period 'h': [1, 2], 'H': [1, 2], 'K': [1, 2], 'k': [1, 2], # hour 'm': [1, 2], # minute 's': [1, 2], 'S': None, 'A': None, # second 'z': [1, 2, 3, 4], 'Z': [1, 2, 3, 4], 'v': [1, 4], 'V': [1, 4] # zone } def parse_pattern(pattern): """Parse date, time, and datetime format patterns. >>> parse_pattern("MMMMd").format u'%(MMMM)s%(d)s' >>> parse_pattern("MMM d, yyyy").format u'%(MMM)s %(d)s, %(yyyy)s' Pattern can contain literal strings in single quotes: >>> parse_pattern("H:mm' Uhr 'z").format u'%(H)s:%(mm)s Uhr %(z)s' An actual single quote can be used by using two adjacent single quote characters: >>> parse_pattern("hh' o''clock'").format u"%(hh)s o'clock" :param pattern: the formatting pattern to parse """ if type(pattern) is DateTimePattern: return pattern result = [] quotebuf = None charbuf = [] fieldchar = [''] fieldnum = [0] def append_chars(): result.append(''.join(charbuf).replace('%', '%%')) del charbuf[:] def append_field(): limit = PATTERN_CHARS[fieldchar[0]] if limit and fieldnum[0] not in limit: raise ValueError('Invalid length for field: %r' % (fieldchar[0] * fieldnum[0])) result.append('%%(%s)s' % (fieldchar[0] * fieldnum[0])) fieldchar[0] = '' fieldnum[0] = 0 for idx, char in enumerate(pattern.replace("''", '\0')): if quotebuf is None: if char == "'": # quote started if fieldchar[0]: append_field() elif charbuf: append_chars() quotebuf = [] elif char in PATTERN_CHARS: if charbuf: append_chars() if char == fieldchar[0]: fieldnum[0] += 1 else: if fieldchar[0]: append_field() fieldchar[0] = char fieldnum[0] = 1 else: if fieldchar[0]: append_field() charbuf.append(char) elif quotebuf is not None: if char == "'": # end of quote charbuf.extend(quotebuf) quotebuf = None else: # inside quote quotebuf.append(char) if fieldchar[0]: append_field() elif charbuf: append_chars() return DateTimePattern(pattern, u''.join(result).replace('\0', "'"))
apache-2.0
zbyte64/django-hyperadmin
hyperadmin/models.py
2
1912
""" Helper function for logging all events that come through the API. """ # TODO Write logging model from contextlib import contextmanager import logging from django.utils.encoding import smart_unicode from django.utils.translation import ugettext_lazy as _ class RelList(list): """ A list subclass that allows us to use dot notation to search for elements that match the reltype. >>> links = RelList([{"rel":"self", "href": "self"}, {"rel":"other", "href":"other"}]) >>> links.self["href"] 'self' >>> links.other["href"] 'other' >>> links.foo """ def __getattr__(self, name): for item in self: if item['rel'] == name: return item LOGGER = logging.getLogger('hypermedia') UNSPECIFIED = 0 ADDITION = 1 CHANGE = 2 DELETION = 3 ACTION_NAME = { UNSPECIFIED: "???", ADDITION: "added", CHANGE: "changed", DELETION: "deleted" } @contextmanager def log_action(user, obj, action_flag, change_message="", request=None): """ A context manager that logs the action. If the action fails, it logs that, too. """ if user.is_anonymous: user = None action = ACTION_NAME[action_flag] object_repr = smart_unicode(obj) if not change_message: change_message = _(u"Object %s was %s." % ( object_repr, action )) # Allow the body of the context to run try: yield except: LOGGER.error(_(u"Unable to %s object %s." % (action, object_repr)), exc_info=True, extra={ 'request': request, 'data': { 'user': user } } ) raise LOGGER.info(change_message, exc_info=True, extra={ 'stack': True, 'request': request, 'data': { 'user': user } })
bsd-3-clause
MrLoick/python-for-android
python-modules/twisted/twisted/protocols/gps/rockwell.py
61
11638
# Copyright (c) 2001-2004 Twisted Matrix Laboratories. # See LICENSE for details. """Rockwell Semiconductor Zodiac Serial Protocol Coded from official protocol specs (Order No. GPS-25, 09/24/1996, Revision 11) Maintainer: Bob Ippolito The following Rockwell Zodiac messages are currently understood:: EARTHA\\r\\n (a hack to "turn on" a DeLorme Earthmate) 1000 (Geodesic Position Status Output) 1002 (Channel Summary) 1003 (Visible Satellites) 1011 (Receiver ID) The following Rockwell Zodiac messages require implementation:: None really, the others aren't quite so useful and require bidirectional communication w/ the device Other desired features:: - Compatability with the DeLorme Tripmate and other devices with this chipset (?) """ import struct, operator, math from twisted.internet import protocol from twisted.python import log DEBUG = 1 class ZodiacParseError(ValueError): pass class Zodiac(protocol.Protocol): dispatch = { # Output Messages (* means they get sent by the receiver by default periodically) 1000: 'fix', # *Geodesic Position Status Output 1001: 'ecef', # ECEF Position Status Output 1002: 'channels', # *Channel Summary 1003: 'satellites', # *Visible Satellites 1005: 'dgps', # Differential GPS Status 1007: 'channelmeas', # Channel Measurement 1011: 'id', # *Receiver ID 1012: 'usersettings', # User-Settings Output 1100: 'testresults', # Built-In Test Results 1102: 'meastimemark', # Measurement Time Mark 1108: 'utctimemark', # UTC Time Mark Pulse Output 1130: 'serial', # Serial Port Communication Parameters In Use 1135: 'eepromupdate', # EEPROM Update 1136: 'eepromstatus', # EEPROM Status } # these aren't used for anything yet, just sitting here for reference messages = { # Input Messages 'fix': 1200, # Geodesic Position and Velocity Initialization 'udatum': 1210, # User-Defined Datum Definition 'mdatum': 1211, # Map Datum Select 'smask': 1212, # Satellite Elevation Mask Control 'sselect': 1213, # Satellite Candidate Select 'dgpsc': 1214, # Differential GPS Control 'startc': 1216, # Cold Start Control 'svalid': 1217, # Solution Validity Control 'antenna': 1218, # Antenna Type Select 'altinput': 1219, # User-Entered Altitude Input 'appctl': 1220, # Application Platform Control 'navcfg': 1221, # Nav Configuration 'test': 1300, # Perform Built-In Test Command 'restart': 1303, # Restart Command 'serial': 1330, # Serial Port Communications Parameters 'msgctl': 1331, # Message Protocol Control 'dgpsd': 1351, # Raw DGPS RTCM SC-104 Data } MAX_LENGTH = 296 allow_earthmate_hack = 1 recvd = "" def dataReceived(self, recd): self.recvd = self.recvd + recd while len(self.recvd) >= 10: # hack for DeLorme EarthMate if self.recvd[:8] == 'EARTHA\r\n': if self.allow_earthmate_hack: self.allow_earthmate_hack = 0 self.transport.write('EARTHA\r\n') self.recvd = self.recvd[8:] continue if self.recvd[0:2] != '\xFF\x81': if DEBUG: raise ZodiacParseError('Invalid Sync %r' % self.recvd) else: raise ZodiacParseError sync, msg_id, length, acknak, checksum = struct.unpack('<HHHHh', self.recvd[:10]) # verify checksum cksum = -(reduce(operator.add, (sync, msg_id, length, acknak)) & 0xFFFF) cksum, = struct.unpack('<h', struct.pack('<h', cksum)) if cksum != checksum: if DEBUG: raise ZodiacParseError('Invalid Header Checksum %r != %r %r' % (checksum, cksum, self.recvd[:8])) else: raise ZodiacParseError # length was in words, now it's bytes length = length * 2 # do we need more data ? neededBytes = 10 if length: neededBytes += length + 2 if len(self.recvd) < neededBytes: break if neededBytes > self.MAX_LENGTH: raise ZodiacParseError("Invalid Header??") # empty messages pass empty strings message = '' # does this message have data ? if length: message, checksum = self.recvd[10:10+length], struct.unpack('<h', self.recvd[10+length:neededBytes])[0] cksum = 0x10000 - (reduce(operator.add, struct.unpack('<%dH' % (length/2), message)) & 0xFFFF) cksum, = struct.unpack('<h', struct.pack('<h', cksum)) if cksum != checksum: if DEBUG: log.dmsg('msg_id = %r length = %r' % (msg_id, length), debug=True) raise ZodiacParseError('Invalid Data Checksum %r != %r %r' % (checksum, cksum, message)) else: raise ZodiacParseError # discard used buffer, dispatch message self.recvd = self.recvd[neededBytes:] self.receivedMessage(msg_id, message, acknak) def receivedMessage(self, msg_id, message, acknak): dispatch = self.dispatch.get(msg_id, None) if not dispatch: raise ZodiacParseError('Unknown msg_id = %r' % msg_id) handler = getattr(self, 'handle_%s' % dispatch, None) decoder = getattr(self, 'decode_%s' % dispatch, None) if not (handler and decoder): # missing handler or decoder #if DEBUG: # log.msg('MISSING HANDLER/DECODER PAIR FOR: %r' % (dispatch,), debug=True) return decoded = decoder(message) return handler(*decoded) def decode_fix(self, message): assert len(message) == 98, "Geodesic Position Status Output should be 55 words total (98 byte message)" (ticks, msgseq, satseq, navstatus, navtype, nmeasure, polar, gpswk, gpses, gpsns, utcdy, utcmo, utcyr, utchr, utcmn, utcsc, utcns, latitude, longitude, height, geoidalsep, speed, course, magvar, climb, mapdatum, exhposerr, exvposerr, extimeerr, exphvelerr, clkbias, clkbiasdev, clkdrift, clkdriftdev) = struct.unpack('<LhhHHHHHLLHHHHHHLlllhLHhhHLLLHllll', message) # there's a lot of shit in here.. # I'll just snag the important stuff and spit it out like my NMEA decoder utc = (utchr * 3600.0) + (utcmn * 60.0) + utcsc + (float(utcns) * 0.000000001) log.msg('utchr, utcmn, utcsc, utcns = ' + repr((utchr, utcmn, utcsc, utcns)), debug=True) latitude = float(latitude) * 0.00000180 / math.pi longitude = float(longitude) * 0.00000180 / math.pi posfix = not (navstatus & 0x001c) satellites = nmeasure hdop = float(exhposerr) * 0.01 altitude = float(height) * 0.01, 'M' geoid = float(geoidalsep) * 0.01, 'M' dgps = None return ( # seconds since 00:00 UTC utc, # latitude (degrees) latitude, # longitude (degrees) longitude, # position fix status (invalid = False, valid = True) posfix, # number of satellites [measurements] used for fix 0 <= satellites <= 12 satellites, # horizontal dilution of precision hdop, # (altitude according to WGS-84 ellipsoid, units (always 'M' for meters)) altitude, # (geoid separation according to WGS-84 ellipsoid, units (always 'M' for meters)) geoid, # None, for compatability w/ NMEA code dgps, ) def decode_id(self, message): assert len(message) == 106, "Receiver ID Message should be 59 words total (106 byte message)" ticks, msgseq, channels, software_version, software_date, options_list, reserved = struct.unpack('<Lh20s20s20s20s20s', message) channels, software_version, software_date, options_list = map(lambda s: s.split('\0')[0], (channels, software_version, software_date, options_list)) software_version = float(software_version) channels = int(channels) # 0-12 .. but ALWAYS 12, so we ignore. options_list = int(options_list[:4], 16) # only two bitflags, others are reserved minimize_rom = (options_list & 0x01) > 0 minimize_ram = (options_list & 0x02) > 0 # (version info), (options info) return ((software_version, software_date), (minimize_rom, minimize_ram)) def decode_channels(self, message): assert len(message) == 90, "Channel Summary Message should be 51 words total (90 byte message)" ticks, msgseq, satseq, gpswk, gpsws, gpsns = struct.unpack('<LhhHLL', message[:18]) channels = [] message = message[18:] for i in range(12): flags, prn, cno = struct.unpack('<HHH', message[6 * i:6 * (i + 1)]) # measurement used, ephemeris available, measurement valid, dgps corrections available flags = (flags & 0x01, flags & 0x02, flags & 0x04, flags & 0x08) channels.append((flags, prn, cno)) # ((flags, satellite PRN, C/No in dbHz)) for 12 channels # satellite message sequence number # gps week number, gps seconds in week (??), gps nanoseconds from Epoch return (tuple(channels),) #, satseq, (gpswk, gpsws, gpsns)) def decode_satellites(self, message): assert len(message) == 90, "Visible Satellites Message should be 51 words total (90 byte message)" ticks, msgseq, gdop, pdop, hdop, vdop, tdop, numsatellites = struct.unpack('<LhhhhhhH', message[:18]) gdop, pdop, hdop, vdop, tdop = map(lambda n: float(n) * 0.01, (gdop, pdop, hdop, vdop, tdop)) satellites = [] message = message[18:] for i in range(numsatellites): prn, azi, elev = struct.unpack('<Hhh', message[6 * i:6 * (i + 1)]) azi, elev = map(lambda n: (float(n) * 0.0180 / math.pi), (azi, elev)) satellites.push((prn, azi, elev)) # ((PRN [0, 32], azimuth +=[0.0, 180.0] deg, elevation +-[0.0, 90.0] deg)) satellite info (0-12) # (geometric, position, horizontal, vertical, time) dilution of precision return (tuple(satellites), (gdop, pdop, hdop, vdop, tdop)) def decode_dgps(self, message): assert len(message) == 38, "Differential GPS Status Message should be 25 words total (38 byte message)" raise NotImplementedError def decode_ecef(self, message): assert len(message) == 96, "ECEF Position Status Output Message should be 54 words total (96 byte message)" raise NotImplementedError def decode_channelmeas(self, message): assert len(message) == 296, "Channel Measurement Message should be 154 words total (296 byte message)" raise NotImplementedError def decode_usersettings(self, message): assert len(message) == 32, "User-Settings Output Message should be 22 words total (32 byte message)" raise NotImplementedError def decode_testresults(self, message): assert len(message) == 28, "Built-In Test Results Message should be 20 words total (28 byte message)" raise NotImplementedError def decode_meastimemark(self, message): assert len(message) == 494, "Measurement Time Mark Message should be 253 words total (494 byte message)" raise NotImplementedError def decode_utctimemark(self, message): assert len(message) == 28, "UTC Time Mark Pulse Output Message should be 20 words total (28 byte message)" raise NotImplementedError def decode_serial(self, message): assert len(message) == 30, "Serial Port Communication Paramaters In Use Message should be 21 words total (30 byte message)" raise NotImplementedError def decode_eepromupdate(self, message): assert len(message) == 8, "EEPROM Update Message should be 10 words total (8 byte message)" raise NotImplementedError def decode_eepromstatus(self, message): assert len(message) == 24, "EEPROM Status Message should be 18 words total (24 byte message)" raise NotImplementedError
apache-2.0
kkk669/mxnet
example/speech_recognition/stt_io_iter.py
44
5098
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import print_function import sys sys.path.insert(0, "../../python") import mxnet as mx import random class SimpleBatch(object): def __init__(self, data_names, data, label_names, label): self.data = data self.label = label self.data_names = data_names self.label_names = label_names self.pad = 0 self.index = None self.effective_sample_count = None @property def provide_data(self): return [(n, x.shape) for n, x in zip(self.data_names, self.data)] @property def provide_label(self): return [(n, x.shape) for n, x in zip(self.label_names, self.label)] class STTIter(mx.io.DataIter): def __init__(self, count, datagen, batch_size, num_label, init_states, seq_length, width, height, sort_by_duration=True, is_bi_graphemes=False, partition="train", save_feature_as_csvfile=False): super(STTIter, self).__init__() self.batch_size = batch_size self.num_label = num_label self.init_states = init_states self.init_state_arrays = [mx.nd.zeros(x[1]) for x in init_states] self.datagen = datagen self.provide_data = [('data', (batch_size, seq_length, width * height))] + init_states self.provide_label = [('label', (self.batch_size, num_label))] self.count = count self.is_bi_graphemes = is_bi_graphemes # self.partition = datagen.partition if partition == 'train': durations = datagen.train_durations audio_paths = datagen.train_audio_paths texts = datagen.train_texts elif partition == 'validation': durations = datagen.val_durations audio_paths = datagen.val_audio_paths texts = datagen.val_texts elif partition == 'test': durations = datagen.test_durations audio_paths = datagen.test_audio_paths texts = datagen.test_texts else: raise Exception("Invalid partition to load metadata. " "Must be train/validation/test") # if sortagrad if sort_by_duration: durations, audio_paths, texts = datagen.sort_by_duration(durations, audio_paths, texts) else: durations = durations audio_paths = audio_paths texts = texts self.trainDataList = zip(durations, audio_paths, texts) # to shuffle data if not sort_by_duration: random.shuffle(self.trainDataList) self.trainDataIter = iter(self.trainDataList) self.is_first_epoch = True self.save_feature_as_csvfile = save_feature_as_csvfile def __iter__(self): init_state_names = [x[0] for x in self.init_states] for k in range(int(self.count / self.batch_size)): audio_paths = [] texts = [] for i in range(self.batch_size): try: duration, audio_path, text = self.trainDataIter.next() except: random.shuffle(self.trainDataList) self.trainDataIter = iter(self.trainDataList) duration, audio_path, text = self.trainDataIter.next() audio_paths.append(audio_path) texts.append(text) if self.is_first_epoch: data_set = self.datagen.prepare_minibatch(audio_paths, texts, overwrite=True, is_bi_graphemes=self.is_bi_graphemes, save_feature_as_csvfile=self.save_feature_as_csvfile) else: data_set = self.datagen.prepare_minibatch(audio_paths, texts, overwrite=False, is_bi_graphemes=self.is_bi_graphemes, save_feature_as_csvfile=self.save_feature_as_csvfile) data_all = [mx.nd.array(data_set['x'])] + self.init_state_arrays label_all = [mx.nd.array(data_set['y'])] data_names = ['data'] + init_state_names label_names = ['label'] data_batch = SimpleBatch(data_names, data_all, label_names, label_all) yield data_batch def reset(self): pass
apache-2.0
rizumu/django
tests/forms_tests/field_tests/test_multivaluefield.py
156
5661
from datetime import datetime from django.forms import ( CharField, Form, MultipleChoiceField, MultiValueField, MultiWidget, SelectMultiple, SplitDateTimeField, SplitDateTimeWidget, TextInput, ValidationError, ) from django.test import SimpleTestCase beatles = (('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')) class ComplexMultiWidget(MultiWidget): def __init__(self, attrs=None): widgets = ( TextInput(), SelectMultiple(choices=beatles), SplitDateTimeWidget(), ) super(ComplexMultiWidget, self).__init__(widgets, attrs) def decompress(self, value): if value: data = value.split(',') return [ data[0], list(data[1]), datetime.strptime(data[2], "%Y-%m-%d %H:%M:%S"), ] return [None, None, None] def format_output(self, rendered_widgets): return '\n'.join(rendered_widgets) class ComplexField(MultiValueField): def __init__(self, required=True, widget=None, label=None, initial=None): fields = ( CharField(), MultipleChoiceField(choices=beatles), SplitDateTimeField(), ) super(ComplexField, self).__init__(fields, required, widget, label, initial) def compress(self, data_list): if data_list: return '%s,%s,%s' % (data_list[0], ''.join(data_list[1]), data_list[2]) return None class ComplexFieldForm(Form): field1 = ComplexField(widget=ComplexMultiWidget()) class MultiValueFieldTest(SimpleTestCase): @classmethod def setUpClass(cls): cls.field = ComplexField(widget=ComplexMultiWidget()) super(MultiValueFieldTest, cls).setUpClass() def test_clean(self): self.assertEqual( self.field.clean(['some text', ['J', 'P'], ['2007-04-25', '6:24:00']]), 'some text,JP,2007-04-25 06:24:00', ) def test_bad_choice(self): msg = "'Select a valid choice. X is not one of the available choices.'" with self.assertRaisesMessage(ValidationError, msg): self.field.clean(['some text', ['X'], ['2007-04-25', '6:24:00']]) def test_no_value(self): """ If insufficient data is provided, None is substituted. """ msg = "'This field is required.'" with self.assertRaisesMessage(ValidationError, msg): self.field.clean(['some text', ['JP']]) def test_has_changed_no_initial(self): self.assertTrue(self.field.has_changed( None, ['some text', ['J', 'P'], ['2007-04-25', '6:24:00']], )) def test_has_changed_same(self): self.assertFalse(self.field.has_changed( 'some text,JP,2007-04-25 06:24:00', ['some text', ['J', 'P'], ['2007-04-25', '6:24:00']], )) def test_has_changed_first_widget(self): """ Test when the first widget's data has changed. """ self.assertTrue(self.field.has_changed( 'some text,JP,2007-04-25 06:24:00', ['other text', ['J', 'P'], ['2007-04-25', '6:24:00']], )) def test_has_changed_last_widget(self): """ Test when the last widget's data has changed. This ensures that it is not short circuiting while testing the widgets. """ self.assertTrue(self.field.has_changed( 'some text,JP,2007-04-25 06:24:00', ['some text', ['J', 'P'], ['2009-04-25', '11:44:00']], )) def test_form_as_table(self): form = ComplexFieldForm() self.assertHTMLEqual( form.as_table(), """ <tr><th><label for="id_field1_0">Field1:</label></th> <td><input type="text" name="field1_0" id="id_field1_0" /> <select multiple="multiple" name="field1_1" id="id_field1_1"> <option value="J">John</option> <option value="P">Paul</option> <option value="G">George</option> <option value="R">Ringo</option> </select> <input type="text" name="field1_2_0" id="id_field1_2_0" /> <input type="text" name="field1_2_1" id="id_field1_2_1" /></td></tr> """, ) def test_form_as_table_data(self): form = ComplexFieldForm({ 'field1_0': 'some text', 'field1_1': ['J', 'P'], 'field1_2_0': '2007-04-25', 'field1_2_1': '06:24:00', }) self.assertHTMLEqual( form.as_table(), """ <tr><th><label for="id_field1_0">Field1:</label></th> <td><input type="text" name="field1_0" value="some text" id="id_field1_0" /> <select multiple="multiple" name="field1_1" id="id_field1_1"> <option value="J" selected="selected">John</option> <option value="P" selected="selected">Paul</option> <option value="G">George</option> <option value="R">Ringo</option> </select> <input type="text" name="field1_2_0" value="2007-04-25" id="id_field1_2_0" /> <input type="text" name="field1_2_1" value="06:24:00" id="id_field1_2_1" /></td></tr> """, ) def test_form_cleaned_data(self): form = ComplexFieldForm({ 'field1_0': 'some text', 'field1_1': ['J', 'P'], 'field1_2_0': '2007-04-25', 'field1_2_1': '06:24:00', }) form.is_valid() self.assertEqual( form.cleaned_data['field1'], 'some text,JP,2007-04-25 06:24:00', )
bsd-3-clause
3upperm2n/HMM_cuda
unit_test/gmock/gmock-1.7.0/gtest/scripts/pump.py
2471
23673
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """pump v0.2.0 - Pretty Useful for Meta Programming. A tool for preprocessor meta programming. Useful for generating repetitive boilerplate code. Especially useful for writing C++ classes, functions, macros, and templates that need to work with various number of arguments. USAGE: pump.py SOURCE_FILE EXAMPLES: pump.py foo.cc.pump Converts foo.cc.pump to foo.cc. GRAMMAR: CODE ::= ATOMIC_CODE* ATOMIC_CODE ::= $var ID = EXPRESSION | $var ID = [[ CODE ]] | $range ID EXPRESSION..EXPRESSION | $for ID SEPARATOR [[ CODE ]] | $($) | $ID | $(EXPRESSION) | $if EXPRESSION [[ CODE ]] ELSE_BRANCH | [[ CODE ]] | RAW_CODE SEPARATOR ::= RAW_CODE | EMPTY ELSE_BRANCH ::= $else [[ CODE ]] | $elif EXPRESSION [[ CODE ]] ELSE_BRANCH | EMPTY EXPRESSION has Python syntax. """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import re import sys TOKEN_TABLE = [ (re.compile(r'\$var\s+'), '$var'), (re.compile(r'\$elif\s+'), '$elif'), (re.compile(r'\$else\s+'), '$else'), (re.compile(r'\$for\s+'), '$for'), (re.compile(r'\$if\s+'), '$if'), (re.compile(r'\$range\s+'), '$range'), (re.compile(r'\$[_A-Za-z]\w*'), '$id'), (re.compile(r'\$\(\$\)'), '$($)'), (re.compile(r'\$'), '$'), (re.compile(r'\[\[\n?'), '[['), (re.compile(r'\]\]\n?'), ']]'), ] class Cursor: """Represents a position (line and column) in a text file.""" def __init__(self, line=-1, column=-1): self.line = line self.column = column def __eq__(self, rhs): return self.line == rhs.line and self.column == rhs.column def __ne__(self, rhs): return not self == rhs def __lt__(self, rhs): return self.line < rhs.line or ( self.line == rhs.line and self.column < rhs.column) def __le__(self, rhs): return self < rhs or self == rhs def __gt__(self, rhs): return rhs < self def __ge__(self, rhs): return rhs <= self def __str__(self): if self == Eof(): return 'EOF' else: return '%s(%s)' % (self.line + 1, self.column) def __add__(self, offset): return Cursor(self.line, self.column + offset) def __sub__(self, offset): return Cursor(self.line, self.column - offset) def Clone(self): """Returns a copy of self.""" return Cursor(self.line, self.column) # Special cursor to indicate the end-of-file. def Eof(): """Returns the special cursor to denote the end-of-file.""" return Cursor(-1, -1) class Token: """Represents a token in a Pump source file.""" def __init__(self, start=None, end=None, value=None, token_type=None): if start is None: self.start = Eof() else: self.start = start if end is None: self.end = Eof() else: self.end = end self.value = value self.token_type = token_type def __str__(self): return 'Token @%s: \'%s\' type=%s' % ( self.start, self.value, self.token_type) def Clone(self): """Returns a copy of self.""" return Token(self.start.Clone(), self.end.Clone(), self.value, self.token_type) def StartsWith(lines, pos, string): """Returns True iff the given position in lines starts with 'string'.""" return lines[pos.line][pos.column:].startswith(string) def FindFirstInLine(line, token_table): best_match_start = -1 for (regex, token_type) in token_table: m = regex.search(line) if m: # We found regex in lines if best_match_start < 0 or m.start() < best_match_start: best_match_start = m.start() best_match_length = m.end() - m.start() best_match_token_type = token_type if best_match_start < 0: return None return (best_match_start, best_match_length, best_match_token_type) def FindFirst(lines, token_table, cursor): """Finds the first occurrence of any string in strings in lines.""" start = cursor.Clone() cur_line_number = cursor.line for line in lines[start.line:]: if cur_line_number == start.line: line = line[start.column:] m = FindFirstInLine(line, token_table) if m: # We found a regex in line. (start_column, length, token_type) = m if cur_line_number == start.line: start_column += start.column found_start = Cursor(cur_line_number, start_column) found_end = found_start + length return MakeToken(lines, found_start, found_end, token_type) cur_line_number += 1 # We failed to find str in lines return None def SubString(lines, start, end): """Returns a substring in lines.""" if end == Eof(): end = Cursor(len(lines) - 1, len(lines[-1])) if start >= end: return '' if start.line == end.line: return lines[start.line][start.column:end.column] result_lines = ([lines[start.line][start.column:]] + lines[start.line + 1:end.line] + [lines[end.line][:end.column]]) return ''.join(result_lines) def StripMetaComments(str): """Strip meta comments from each line in the given string.""" # First, completely remove lines containing nothing but a meta # comment, including the trailing \n. str = re.sub(r'^\s*\$\$.*\n', '', str) # Then, remove meta comments from contentful lines. return re.sub(r'\s*\$\$.*', '', str) def MakeToken(lines, start, end, token_type): """Creates a new instance of Token.""" return Token(start, end, SubString(lines, start, end), token_type) def ParseToken(lines, pos, regex, token_type): line = lines[pos.line][pos.column:] m = regex.search(line) if m and not m.start(): return MakeToken(lines, pos, pos + m.end(), token_type) else: print 'ERROR: %s expected at %s.' % (token_type, pos) sys.exit(1) ID_REGEX = re.compile(r'[_A-Za-z]\w*') EQ_REGEX = re.compile(r'=') REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)') OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*') WHITE_SPACE_REGEX = re.compile(r'\s') DOT_DOT_REGEX = re.compile(r'\.\.') def Skip(lines, pos, regex): line = lines[pos.line][pos.column:] m = re.search(regex, line) if m and not m.start(): return pos + m.end() else: return pos def SkipUntil(lines, pos, regex, token_type): line = lines[pos.line][pos.column:] m = re.search(regex, line) if m: return pos + m.start() else: print ('ERROR: %s expected on line %s after column %s.' % (token_type, pos.line + 1, pos.column)) sys.exit(1) def ParseExpTokenInParens(lines, pos): def ParseInParens(pos): pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX) pos = Skip(lines, pos, r'\(') pos = Parse(pos) pos = Skip(lines, pos, r'\)') return pos def Parse(pos): pos = SkipUntil(lines, pos, r'\(|\)', ')') if SubString(lines, pos, pos + 1) == '(': pos = Parse(pos + 1) pos = Skip(lines, pos, r'\)') return Parse(pos) else: return pos start = pos.Clone() pos = ParseInParens(pos) return MakeToken(lines, start, pos, 'exp') def RStripNewLineFromToken(token): if token.value.endswith('\n'): return Token(token.start, token.end, token.value[:-1], token.token_type) else: return token def TokenizeLines(lines, pos): while True: found = FindFirst(lines, TOKEN_TABLE, pos) if not found: yield MakeToken(lines, pos, Eof(), 'code') return if found.start == pos: prev_token = None prev_token_rstripped = None else: prev_token = MakeToken(lines, pos, found.start, 'code') prev_token_rstripped = RStripNewLineFromToken(prev_token) if found.token_type == '$var': if prev_token_rstripped: yield prev_token_rstripped yield found id_token = ParseToken(lines, found.end, ID_REGEX, 'id') yield id_token pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX) eq_token = ParseToken(lines, pos, EQ_REGEX, '=') yield eq_token pos = Skip(lines, eq_token.end, r'\s*') if SubString(lines, pos, pos + 2) != '[[': exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp') yield exp_token pos = Cursor(exp_token.end.line + 1, 0) elif found.token_type == '$for': if prev_token_rstripped: yield prev_token_rstripped yield found id_token = ParseToken(lines, found.end, ID_REGEX, 'id') yield id_token pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX) elif found.token_type == '$range': if prev_token_rstripped: yield prev_token_rstripped yield found id_token = ParseToken(lines, found.end, ID_REGEX, 'id') yield id_token pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX) dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..') yield MakeToken(lines, pos, dots_pos, 'exp') yield MakeToken(lines, dots_pos, dots_pos + 2, '..') pos = dots_pos + 2 new_pos = Cursor(pos.line + 1, 0) yield MakeToken(lines, pos, new_pos, 'exp') pos = new_pos elif found.token_type == '$': if prev_token: yield prev_token yield found exp_token = ParseExpTokenInParens(lines, found.end) yield exp_token pos = exp_token.end elif (found.token_type == ']]' or found.token_type == '$if' or found.token_type == '$elif' or found.token_type == '$else'): if prev_token_rstripped: yield prev_token_rstripped yield found pos = found.end else: if prev_token: yield prev_token yield found pos = found.end def Tokenize(s): """A generator that yields the tokens in the given string.""" if s != '': lines = s.splitlines(True) for token in TokenizeLines(lines, Cursor(0, 0)): yield token class CodeNode: def __init__(self, atomic_code_list=None): self.atomic_code = atomic_code_list class VarNode: def __init__(self, identifier=None, atomic_code=None): self.identifier = identifier self.atomic_code = atomic_code class RangeNode: def __init__(self, identifier=None, exp1=None, exp2=None): self.identifier = identifier self.exp1 = exp1 self.exp2 = exp2 class ForNode: def __init__(self, identifier=None, sep=None, code=None): self.identifier = identifier self.sep = sep self.code = code class ElseNode: def __init__(self, else_branch=None): self.else_branch = else_branch class IfNode: def __init__(self, exp=None, then_branch=None, else_branch=None): self.exp = exp self.then_branch = then_branch self.else_branch = else_branch class RawCodeNode: def __init__(self, token=None): self.raw_code = token class LiteralDollarNode: def __init__(self, token): self.token = token class ExpNode: def __init__(self, token, python_exp): self.token = token self.python_exp = python_exp def PopFront(a_list): head = a_list[0] a_list[:1] = [] return head def PushFront(a_list, elem): a_list[:0] = [elem] def PopToken(a_list, token_type=None): token = PopFront(a_list) if token_type is not None and token.token_type != token_type: print 'ERROR: %s expected at %s' % (token_type, token.start) print 'ERROR: %s found instead' % (token,) sys.exit(1) return token def PeekToken(a_list): if not a_list: return None return a_list[0] def ParseExpNode(token): python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value) return ExpNode(token, python_exp) def ParseElseNode(tokens): def Pop(token_type=None): return PopToken(tokens, token_type) next = PeekToken(tokens) if not next: return None if next.token_type == '$else': Pop('$else') Pop('[[') code_node = ParseCodeNode(tokens) Pop(']]') return code_node elif next.token_type == '$elif': Pop('$elif') exp = Pop('code') Pop('[[') code_node = ParseCodeNode(tokens) Pop(']]') inner_else_node = ParseElseNode(tokens) return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)]) elif not next.value.strip(): Pop('code') return ParseElseNode(tokens) else: return None def ParseAtomicCodeNode(tokens): def Pop(token_type=None): return PopToken(tokens, token_type) head = PopFront(tokens) t = head.token_type if t == 'code': return RawCodeNode(head) elif t == '$var': id_token = Pop('id') Pop('=') next = PeekToken(tokens) if next.token_type == 'exp': exp_token = Pop() return VarNode(id_token, ParseExpNode(exp_token)) Pop('[[') code_node = ParseCodeNode(tokens) Pop(']]') return VarNode(id_token, code_node) elif t == '$for': id_token = Pop('id') next_token = PeekToken(tokens) if next_token.token_type == 'code': sep_token = next_token Pop('code') else: sep_token = None Pop('[[') code_node = ParseCodeNode(tokens) Pop(']]') return ForNode(id_token, sep_token, code_node) elif t == '$if': exp_token = Pop('code') Pop('[[') code_node = ParseCodeNode(tokens) Pop(']]') else_node = ParseElseNode(tokens) return IfNode(ParseExpNode(exp_token), code_node, else_node) elif t == '$range': id_token = Pop('id') exp1_token = Pop('exp') Pop('..') exp2_token = Pop('exp') return RangeNode(id_token, ParseExpNode(exp1_token), ParseExpNode(exp2_token)) elif t == '$id': return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id')) elif t == '$($)': return LiteralDollarNode(head) elif t == '$': exp_token = Pop('exp') return ParseExpNode(exp_token) elif t == '[[': code_node = ParseCodeNode(tokens) Pop(']]') return code_node else: PushFront(tokens, head) return None def ParseCodeNode(tokens): atomic_code_list = [] while True: if not tokens: break atomic_code_node = ParseAtomicCodeNode(tokens) if atomic_code_node: atomic_code_list.append(atomic_code_node) else: break return CodeNode(atomic_code_list) def ParseToAST(pump_src_text): """Convert the given Pump source text into an AST.""" tokens = list(Tokenize(pump_src_text)) code_node = ParseCodeNode(tokens) return code_node class Env: def __init__(self): self.variables = [] self.ranges = [] def Clone(self): clone = Env() clone.variables = self.variables[:] clone.ranges = self.ranges[:] return clone def PushVariable(self, var, value): # If value looks like an int, store it as an int. try: int_value = int(value) if ('%s' % int_value) == value: value = int_value except Exception: pass self.variables[:0] = [(var, value)] def PopVariable(self): self.variables[:1] = [] def PushRange(self, var, lower, upper): self.ranges[:0] = [(var, lower, upper)] def PopRange(self): self.ranges[:1] = [] def GetValue(self, identifier): for (var, value) in self.variables: if identifier == var: return value print 'ERROR: meta variable %s is undefined.' % (identifier,) sys.exit(1) def EvalExp(self, exp): try: result = eval(exp.python_exp) except Exception, e: print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e) print ('ERROR: failed to evaluate meta expression %s at %s' % (exp.python_exp, exp.token.start)) sys.exit(1) return result def GetRange(self, identifier): for (var, lower, upper) in self.ranges: if identifier == var: return (lower, upper) print 'ERROR: range %s is undefined.' % (identifier,) sys.exit(1) class Output: def __init__(self): self.string = '' def GetLastLine(self): index = self.string.rfind('\n') if index < 0: return '' return self.string[index + 1:] def Append(self, s): self.string += s def RunAtomicCode(env, node, output): if isinstance(node, VarNode): identifier = node.identifier.value.strip() result = Output() RunAtomicCode(env.Clone(), node.atomic_code, result) value = result.string env.PushVariable(identifier, value) elif isinstance(node, RangeNode): identifier = node.identifier.value.strip() lower = int(env.EvalExp(node.exp1)) upper = int(env.EvalExp(node.exp2)) env.PushRange(identifier, lower, upper) elif isinstance(node, ForNode): identifier = node.identifier.value.strip() if node.sep is None: sep = '' else: sep = node.sep.value (lower, upper) = env.GetRange(identifier) for i in range(lower, upper + 1): new_env = env.Clone() new_env.PushVariable(identifier, i) RunCode(new_env, node.code, output) if i != upper: output.Append(sep) elif isinstance(node, RawCodeNode): output.Append(node.raw_code.value) elif isinstance(node, IfNode): cond = env.EvalExp(node.exp) if cond: RunCode(env.Clone(), node.then_branch, output) elif node.else_branch is not None: RunCode(env.Clone(), node.else_branch, output) elif isinstance(node, ExpNode): value = env.EvalExp(node) output.Append('%s' % (value,)) elif isinstance(node, LiteralDollarNode): output.Append('$') elif isinstance(node, CodeNode): RunCode(env.Clone(), node, output) else: print 'BAD' print node sys.exit(1) def RunCode(env, code_node, output): for atomic_code in code_node.atomic_code: RunAtomicCode(env, atomic_code, output) def IsSingleLineComment(cur_line): return '//' in cur_line def IsInPreprocessorDirective(prev_lines, cur_line): if cur_line.lstrip().startswith('#'): return True return prev_lines and prev_lines[-1].endswith('\\') def WrapComment(line, output): loc = line.find('//') before_comment = line[:loc].rstrip() if before_comment == '': indent = loc else: output.append(before_comment) indent = len(before_comment) - len(before_comment.lstrip()) prefix = indent*' ' + '// ' max_len = 80 - len(prefix) comment = line[loc + 2:].strip() segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != ''] cur_line = '' for seg in segs: if len((cur_line + seg).rstrip()) < max_len: cur_line += seg else: if cur_line.strip() != '': output.append(prefix + cur_line.rstrip()) cur_line = seg.lstrip() if cur_line.strip() != '': output.append(prefix + cur_line.strip()) def WrapCode(line, line_concat, output): indent = len(line) - len(line.lstrip()) prefix = indent*' ' # Prefix of the current line max_len = 80 - indent - len(line_concat) # Maximum length of the current line new_prefix = prefix + 4*' ' # Prefix of a continuation line new_max_len = max_len - 4 # Maximum length of a continuation line # Prefers to wrap a line after a ',' or ';'. segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != ''] cur_line = '' # The current line without leading spaces. for seg in segs: # If the line is still too long, wrap at a space. while cur_line == '' and len(seg.strip()) > max_len: seg = seg.lstrip() split_at = seg.rfind(' ', 0, max_len) output.append(prefix + seg[:split_at].strip() + line_concat) seg = seg[split_at + 1:] prefix = new_prefix max_len = new_max_len if len((cur_line + seg).rstrip()) < max_len: cur_line = (cur_line + seg).lstrip() else: output.append(prefix + cur_line.rstrip() + line_concat) prefix = new_prefix max_len = new_max_len cur_line = seg.lstrip() if cur_line.strip() != '': output.append(prefix + cur_line.strip()) def WrapPreprocessorDirective(line, output): WrapCode(line, ' \\', output) def WrapPlainCode(line, output): WrapCode(line, '', output) def IsMultiLineIWYUPragma(line): return re.search(r'/\* IWYU pragma: ', line) def IsHeaderGuardIncludeOrOneLineIWYUPragma(line): return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or re.match(r'^#include\s', line) or # Don't break IWYU pragmas, either; that causes iwyu.py problems. re.search(r'// IWYU pragma: ', line)) def WrapLongLine(line, output): line = line.rstrip() if len(line) <= 80: output.append(line) elif IsSingleLineComment(line): if IsHeaderGuardIncludeOrOneLineIWYUPragma(line): # The style guide made an exception to allow long header guard lines, # includes and IWYU pragmas. output.append(line) else: WrapComment(line, output) elif IsInPreprocessorDirective(output, line): if IsHeaderGuardIncludeOrOneLineIWYUPragma(line): # The style guide made an exception to allow long header guard lines, # includes and IWYU pragmas. output.append(line) else: WrapPreprocessorDirective(line, output) elif IsMultiLineIWYUPragma(line): output.append(line) else: WrapPlainCode(line, output) def BeautifyCode(string): lines = string.splitlines() output = [] for line in lines: WrapLongLine(line, output) output2 = [line.rstrip() for line in output] return '\n'.join(output2) + '\n' def ConvertFromPumpSource(src_text): """Return the text generated from the given Pump source text.""" ast = ParseToAST(StripMetaComments(src_text)) output = Output() RunCode(Env(), ast, output) return BeautifyCode(output.string) def main(argv): if len(argv) == 1: print __doc__ sys.exit(1) file_path = argv[-1] output_str = ConvertFromPumpSource(file(file_path, 'r').read()) if file_path.endswith('.pump'): output_file_path = file_path[:-5] else: output_file_path = '-' if output_file_path == '-': print output_str, else: output_file = file(output_file_path, 'w') output_file.write('// This file was GENERATED by command:\n') output_file.write('// %s %s\n' % (os.path.basename(__file__), os.path.basename(file_path))) output_file.write('// DO NOT EDIT BY HAND!!!\n\n') output_file.write(output_str) output_file.close() if __name__ == '__main__': main(sys.argv)
mit
javierlgroba/Eventer-gapp
django/core/management/commands/inspectdb.py
110
9570
from __future__ import unicode_literals import keyword import re from optparse import make_option from django.core.management.base import NoArgsCommand, CommandError from django.db import connections, DEFAULT_DB_ALIAS from django.utils import six class Command(NoArgsCommand): help = "Introspects the database tables in the given database and outputs a Django model module." option_list = NoArgsCommand.option_list + ( make_option('--database', action='store', dest='database', default=DEFAULT_DB_ALIAS, help='Nominates a database to ' 'introspect. Defaults to using the "default" database.'), ) requires_model_validation = False db_module = 'django.db' def handle_noargs(self, **options): try: for line in self.handle_inspection(options): self.stdout.write("%s\n" % line) except NotImplementedError: raise CommandError("Database inspection isn't supported for the currently selected database backend.") def handle_inspection(self, options): connection = connections[options.get('database')] # 'table_name_filter' is a stealth option table_name_filter = options.get('table_name_filter') table2model = lambda table_name: table_name.title().replace('_', '').replace(' ', '').replace('-', '') strip_prefix = lambda s: s.startswith("u'") and s[1:] or s cursor = connection.cursor() yield "# This is an auto-generated Django model module." yield "# You'll have to do the following manually to clean this up:" yield "# * Rearrange models' order" yield "# * Make sure each model has one field with primary_key=True" yield "# Feel free to rename the models, but don't rename db_table values or field names." yield "#" yield "# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [appname]'" yield "# into your database." yield "from __future__ import unicode_literals" yield '' yield 'from %s import models' % self.db_module yield '' known_models = [] for table_name in connection.introspection.table_names(cursor): if table_name_filter is not None and callable(table_name_filter): if not table_name_filter(table_name): continue yield 'class %s(models.Model):' % table2model(table_name) known_models.append(table2model(table_name)) try: relations = connection.introspection.get_relations(cursor, table_name) except NotImplementedError: relations = {} try: indexes = connection.introspection.get_indexes(cursor, table_name) except NotImplementedError: indexes = {} used_column_names = [] # Holds column names used in the table so far for i, row in enumerate(connection.introspection.get_table_description(cursor, table_name)): comment_notes = [] # Holds Field notes, to be displayed in a Python comment. extra_params = {} # Holds Field parameters such as 'db_column'. column_name = row[0] is_relation = i in relations att_name, params, notes = self.normalize_col_name( column_name, used_column_names, is_relation) extra_params.update(params) comment_notes.extend(notes) used_column_names.append(att_name) # Add primary_key and unique, if necessary. if column_name in indexes: if indexes[column_name]['primary_key']: extra_params['primary_key'] = True elif indexes[column_name]['unique']: extra_params['unique'] = True if is_relation: rel_to = relations[i][1] == table_name and "'self'" or table2model(relations[i][1]) if rel_to in known_models: field_type = 'ForeignKey(%s' % rel_to else: field_type = "ForeignKey('%s'" % rel_to else: # Calling `get_field_type` to get the field type string and any # additional paramters and notes. field_type, field_params, field_notes = self.get_field_type(connection, table_name, row) extra_params.update(field_params) comment_notes.extend(field_notes) field_type += '(' # Don't output 'id = meta.AutoField(primary_key=True)', because # that's assumed if it doesn't exist. if att_name == 'id' and field_type == 'AutoField(' and extra_params == {'primary_key': True}: continue # Add 'null' and 'blank', if the 'null_ok' flag was present in the # table description. if row[6]: # If it's NULL... extra_params['blank'] = True if not field_type in ('TextField(', 'CharField('): extra_params['null'] = True field_desc = '%s = models.%s' % (att_name, field_type) if extra_params: if not field_desc.endswith('('): field_desc += ', ' field_desc += ', '.join([ '%s=%s' % (k, strip_prefix(repr(v))) for k, v in extra_params.items()]) field_desc += ')' if comment_notes: field_desc += ' # ' + ' '.join(comment_notes) yield ' %s' % field_desc for meta_line in self.get_meta(table_name): yield meta_line def normalize_col_name(self, col_name, used_column_names, is_relation): """ Modify the column name to make it Python-compatible as a field name """ field_params = {} field_notes = [] new_name = col_name.lower() if new_name != col_name: field_notes.append('Field name made lowercase.') if is_relation: if new_name.endswith('_id'): new_name = new_name[:-3] else: field_params['db_column'] = col_name new_name, num_repl = re.subn(r'\W', '_', new_name) if num_repl > 0: field_notes.append('Field renamed to remove unsuitable characters.') if new_name.find('__') >= 0: while new_name.find('__') >= 0: new_name = new_name.replace('__', '_') if col_name.lower().find('__') >= 0: # Only add the comment if the double underscore was in the original name field_notes.append("Field renamed because it contained more than one '_' in a row.") if new_name.startswith('_'): new_name = 'field%s' % new_name field_notes.append("Field renamed because it started with '_'.") if new_name.endswith('_'): new_name = '%sfield' % new_name field_notes.append("Field renamed because it ended with '_'.") if keyword.iskeyword(new_name): new_name += '_field' field_notes.append('Field renamed because it was a Python reserved word.') if new_name[0].isdigit(): new_name = 'number_%s' % new_name field_notes.append("Field renamed because it wasn't a valid Python identifier.") if new_name in used_column_names: num = 0 while '%s_%d' % (new_name, num) in used_column_names: num += 1 new_name = '%s_%d' % (new_name, num) field_notes.append('Field renamed because of name conflict.') if col_name != new_name and field_notes: field_params['db_column'] = col_name return new_name, field_params, field_notes def get_field_type(self, connection, table_name, row): """ Given the database connection, the table name, and the cursor row description, this routine will return the given field type name, as well as any additional keyword parameters and notes for the field. """ field_params = {} field_notes = [] try: field_type = connection.introspection.get_field_type(row[1], row) except KeyError: field_type = 'TextField' field_notes.append('This field type is a guess.') # This is a hook for DATA_TYPES_REVERSE to return a tuple of # (field_type, field_params_dict). if type(field_type) is tuple: field_type, new_params = field_type field_params.update(new_params) # Add max_length for all CharFields. if field_type == 'CharField' and row[3]: field_params['max_length'] = row[3] if field_type == 'DecimalField': field_params['max_digits'] = row[4] field_params['decimal_places'] = row[5] return field_type, field_params, field_notes def get_meta(self, table_name): """ Return a sequence comprising the lines of code necessary to construct the inner Meta class for the model corresponding to the given database table name. """ return [" class Meta:", " db_table = '%s'" % table_name, ""]
lgpl-3.0
sumedh123/debatify
venv/lib/python2.7/site-packages/jinja2/parser.py
336
35442
# -*- coding: utf-8 -*- """ jinja2.parser ~~~~~~~~~~~~~ Implements the template parser. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ from jinja2 import nodes from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError from jinja2.lexer import describe_token, describe_token_expr from jinja2._compat import imap _statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print', 'macro', 'include', 'from', 'import', 'set']) _compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq']) class Parser(object): """This is the central parsing class Jinja2 uses. It's passed to extensions and can be used to parse expressions or statements. """ def __init__(self, environment, source, name=None, filename=None, state=None): self.environment = environment self.stream = environment._tokenize(source, name, filename, state) self.name = name self.filename = filename self.closed = False self.extensions = {} for extension in environment.iter_extensions(): for tag in extension.tags: self.extensions[tag] = extension.parse self._last_identifier = 0 self._tag_stack = [] self._end_token_stack = [] def fail(self, msg, lineno=None, exc=TemplateSyntaxError): """Convenience method that raises `exc` with the message, passed line number or last line number as well as the current name and filename. """ if lineno is None: lineno = self.stream.current.lineno raise exc(msg, lineno, self.name, self.filename) def _fail_ut_eof(self, name, end_token_stack, lineno): expected = [] for exprs in end_token_stack: expected.extend(imap(describe_token_expr, exprs)) if end_token_stack: currently_looking = ' or '.join( "'%s'" % describe_token_expr(expr) for expr in end_token_stack[-1]) else: currently_looking = None if name is None: message = ['Unexpected end of template.'] else: message = ['Encountered unknown tag \'%s\'.' % name] if currently_looking: if name is not None and name in expected: message.append('You probably made a nesting mistake. Jinja ' 'is expecting this tag, but currently looking ' 'for %s.' % currently_looking) else: message.append('Jinja was looking for the following tags: ' '%s.' % currently_looking) if self._tag_stack: message.append('The innermost block that needs to be ' 'closed is \'%s\'.' % self._tag_stack[-1]) self.fail(' '.join(message), lineno) def fail_unknown_tag(self, name, lineno=None): """Called if the parser encounters an unknown tag. Tries to fail with a human readable error message that could help to identify the problem. """ return self._fail_ut_eof(name, self._end_token_stack, lineno) def fail_eof(self, end_tokens=None, lineno=None): """Like fail_unknown_tag but for end of template situations.""" stack = list(self._end_token_stack) if end_tokens is not None: stack.append(end_tokens) return self._fail_ut_eof(None, stack, lineno) def is_tuple_end(self, extra_end_rules=None): """Are we at the end of a tuple?""" if self.stream.current.type in ('variable_end', 'block_end', 'rparen'): return True elif extra_end_rules is not None: return self.stream.current.test_any(extra_end_rules) return False def free_identifier(self, lineno=None): """Return a new free identifier as :class:`~jinja2.nodes.InternalName`.""" self._last_identifier += 1 rv = object.__new__(nodes.InternalName) nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno) return rv def parse_statement(self): """Parse a single statement.""" token = self.stream.current if token.type != 'name': self.fail('tag name expected', token.lineno) self._tag_stack.append(token.value) pop_tag = True try: if token.value in _statement_keywords: return getattr(self, 'parse_' + self.stream.current.value)() if token.value == 'call': return self.parse_call_block() if token.value == 'filter': return self.parse_filter_block() ext = self.extensions.get(token.value) if ext is not None: return ext(self) # did not work out, remove the token we pushed by accident # from the stack so that the unknown tag fail function can # produce a proper error message. self._tag_stack.pop() pop_tag = False self.fail_unknown_tag(token.value, token.lineno) finally: if pop_tag: self._tag_stack.pop() def parse_statements(self, end_tokens, drop_needle=False): """Parse multiple statements into a list until one of the end tokens is reached. This is used to parse the body of statements as it also parses template data if appropriate. The parser checks first if the current token is a colon and skips it if there is one. Then it checks for the block end and parses until if one of the `end_tokens` is reached. Per default the active token in the stream at the end of the call is the matched end token. If this is not wanted `drop_needle` can be set to `True` and the end token is removed. """ # the first token may be a colon for python compatibility self.stream.skip_if('colon') # in the future it would be possible to add whole code sections # by adding some sort of end of statement token and parsing those here. self.stream.expect('block_end') result = self.subparse(end_tokens) # we reached the end of the template too early, the subparser # does not check for this, so we do that now if self.stream.current.type == 'eof': self.fail_eof(end_tokens) if drop_needle: next(self.stream) return result def parse_set(self): """Parse an assign statement.""" lineno = next(self.stream).lineno target = self.parse_assign_target() if self.stream.skip_if('assign'): expr = self.parse_tuple() return nodes.Assign(target, expr, lineno=lineno) body = self.parse_statements(('name:endset',), drop_needle=True) return nodes.AssignBlock(target, body, lineno=lineno) def parse_for(self): """Parse a for loop.""" lineno = self.stream.expect('name:for').lineno target = self.parse_assign_target(extra_end_rules=('name:in',)) self.stream.expect('name:in') iter = self.parse_tuple(with_condexpr=False, extra_end_rules=('name:recursive',)) test = None if self.stream.skip_if('name:if'): test = self.parse_expression() recursive = self.stream.skip_if('name:recursive') body = self.parse_statements(('name:endfor', 'name:else')) if next(self.stream).value == 'endfor': else_ = [] else: else_ = self.parse_statements(('name:endfor',), drop_needle=True) return nodes.For(target, iter, body, else_, test, recursive, lineno=lineno) def parse_if(self): """Parse an if construct.""" node = result = nodes.If(lineno=self.stream.expect('name:if').lineno) while 1: node.test = self.parse_tuple(with_condexpr=False) node.body = self.parse_statements(('name:elif', 'name:else', 'name:endif')) token = next(self.stream) if token.test('name:elif'): new_node = nodes.If(lineno=self.stream.current.lineno) node.else_ = [new_node] node = new_node continue elif token.test('name:else'): node.else_ = self.parse_statements(('name:endif',), drop_needle=True) else: node.else_ = [] break return result def parse_block(self): node = nodes.Block(lineno=next(self.stream).lineno) node.name = self.stream.expect('name').value node.scoped = self.stream.skip_if('name:scoped') # common problem people encounter when switching from django # to jinja. we do not support hyphens in block names, so let's # raise a nicer error message in that case. if self.stream.current.type == 'sub': self.fail('Block names in Jinja have to be valid Python ' 'identifiers and may not contain hyphens, use an ' 'underscore instead.') node.body = self.parse_statements(('name:endblock',), drop_needle=True) self.stream.skip_if('name:' + node.name) return node def parse_extends(self): node = nodes.Extends(lineno=next(self.stream).lineno) node.template = self.parse_expression() return node def parse_import_context(self, node, default): if self.stream.current.test_any('name:with', 'name:without') and \ self.stream.look().test('name:context'): node.with_context = next(self.stream).value == 'with' self.stream.skip() else: node.with_context = default return node def parse_include(self): node = nodes.Include(lineno=next(self.stream).lineno) node.template = self.parse_expression() if self.stream.current.test('name:ignore') and \ self.stream.look().test('name:missing'): node.ignore_missing = True self.stream.skip(2) else: node.ignore_missing = False return self.parse_import_context(node, True) def parse_import(self): node = nodes.Import(lineno=next(self.stream).lineno) node.template = self.parse_expression() self.stream.expect('name:as') node.target = self.parse_assign_target(name_only=True).name return self.parse_import_context(node, False) def parse_from(self): node = nodes.FromImport(lineno=next(self.stream).lineno) node.template = self.parse_expression() self.stream.expect('name:import') node.names = [] def parse_context(): if self.stream.current.value in ('with', 'without') and \ self.stream.look().test('name:context'): node.with_context = next(self.stream).value == 'with' self.stream.skip() return True return False while 1: if node.names: self.stream.expect('comma') if self.stream.current.type == 'name': if parse_context(): break target = self.parse_assign_target(name_only=True) if target.name.startswith('_'): self.fail('names starting with an underline can not ' 'be imported', target.lineno, exc=TemplateAssertionError) if self.stream.skip_if('name:as'): alias = self.parse_assign_target(name_only=True) node.names.append((target.name, alias.name)) else: node.names.append(target.name) if parse_context() or self.stream.current.type != 'comma': break else: break if not hasattr(node, 'with_context'): node.with_context = False self.stream.skip_if('comma') return node def parse_signature(self, node): node.args = args = [] node.defaults = defaults = [] self.stream.expect('lparen') while self.stream.current.type != 'rparen': if args: self.stream.expect('comma') arg = self.parse_assign_target(name_only=True) arg.set_ctx('param') if self.stream.skip_if('assign'): defaults.append(self.parse_expression()) elif defaults: self.fail('non-default argument follows default argument') args.append(arg) self.stream.expect('rparen') def parse_call_block(self): node = nodes.CallBlock(lineno=next(self.stream).lineno) if self.stream.current.type == 'lparen': self.parse_signature(node) else: node.args = [] node.defaults = [] node.call = self.parse_expression() if not isinstance(node.call, nodes.Call): self.fail('expected call', node.lineno) node.body = self.parse_statements(('name:endcall',), drop_needle=True) return node def parse_filter_block(self): node = nodes.FilterBlock(lineno=next(self.stream).lineno) node.filter = self.parse_filter(None, start_inline=True) node.body = self.parse_statements(('name:endfilter',), drop_needle=True) return node def parse_macro(self): node = nodes.Macro(lineno=next(self.stream).lineno) node.name = self.parse_assign_target(name_only=True).name self.parse_signature(node) node.body = self.parse_statements(('name:endmacro',), drop_needle=True) return node def parse_print(self): node = nodes.Output(lineno=next(self.stream).lineno) node.nodes = [] while self.stream.current.type != 'block_end': if node.nodes: self.stream.expect('comma') node.nodes.append(self.parse_expression()) return node def parse_assign_target(self, with_tuple=True, name_only=False, extra_end_rules=None): """Parse an assignment target. As Jinja2 allows assignments to tuples, this function can parse all allowed assignment targets. Per default assignments to tuples are parsed, that can be disable however by setting `with_tuple` to `False`. If only assignments to names are wanted `name_only` can be set to `True`. The `extra_end_rules` parameter is forwarded to the tuple parsing function. """ if name_only: token = self.stream.expect('name') target = nodes.Name(token.value, 'store', lineno=token.lineno) else: if with_tuple: target = self.parse_tuple(simplified=True, extra_end_rules=extra_end_rules) else: target = self.parse_primary() target.set_ctx('store') if not target.can_assign(): self.fail('can\'t assign to %r' % target.__class__. __name__.lower(), target.lineno) return target def parse_expression(self, with_condexpr=True): """Parse an expression. Per default all expressions are parsed, if the optional `with_condexpr` parameter is set to `False` conditional expressions are not parsed. """ if with_condexpr: return self.parse_condexpr() return self.parse_or() def parse_condexpr(self): lineno = self.stream.current.lineno expr1 = self.parse_or() while self.stream.skip_if('name:if'): expr2 = self.parse_or() if self.stream.skip_if('name:else'): expr3 = self.parse_condexpr() else: expr3 = None expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno) lineno = self.stream.current.lineno return expr1 def parse_or(self): lineno = self.stream.current.lineno left = self.parse_and() while self.stream.skip_if('name:or'): right = self.parse_and() left = nodes.Or(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_and(self): lineno = self.stream.current.lineno left = self.parse_not() while self.stream.skip_if('name:and'): right = self.parse_not() left = nodes.And(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_not(self): if self.stream.current.test('name:not'): lineno = next(self.stream).lineno return nodes.Not(self.parse_not(), lineno=lineno) return self.parse_compare() def parse_compare(self): lineno = self.stream.current.lineno expr = self.parse_add() ops = [] while 1: token_type = self.stream.current.type if token_type in _compare_operators: next(self.stream) ops.append(nodes.Operand(token_type, self.parse_add())) elif self.stream.skip_if('name:in'): ops.append(nodes.Operand('in', self.parse_add())) elif (self.stream.current.test('name:not') and self.stream.look().test('name:in')): self.stream.skip(2) ops.append(nodes.Operand('notin', self.parse_add())) else: break lineno = self.stream.current.lineno if not ops: return expr return nodes.Compare(expr, ops, lineno=lineno) def parse_add(self): lineno = self.stream.current.lineno left = self.parse_sub() while self.stream.current.type == 'add': next(self.stream) right = self.parse_sub() left = nodes.Add(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_sub(self): lineno = self.stream.current.lineno left = self.parse_concat() while self.stream.current.type == 'sub': next(self.stream) right = self.parse_concat() left = nodes.Sub(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_concat(self): lineno = self.stream.current.lineno args = [self.parse_mul()] while self.stream.current.type == 'tilde': next(self.stream) args.append(self.parse_mul()) if len(args) == 1: return args[0] return nodes.Concat(args, lineno=lineno) def parse_mul(self): lineno = self.stream.current.lineno left = self.parse_div() while self.stream.current.type == 'mul': next(self.stream) right = self.parse_div() left = nodes.Mul(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_div(self): lineno = self.stream.current.lineno left = self.parse_floordiv() while self.stream.current.type == 'div': next(self.stream) right = self.parse_floordiv() left = nodes.Div(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_floordiv(self): lineno = self.stream.current.lineno left = self.parse_mod() while self.stream.current.type == 'floordiv': next(self.stream) right = self.parse_mod() left = nodes.FloorDiv(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_mod(self): lineno = self.stream.current.lineno left = self.parse_pow() while self.stream.current.type == 'mod': next(self.stream) right = self.parse_pow() left = nodes.Mod(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_pow(self): lineno = self.stream.current.lineno left = self.parse_unary() while self.stream.current.type == 'pow': next(self.stream) right = self.parse_unary() left = nodes.Pow(left, right, lineno=lineno) lineno = self.stream.current.lineno return left def parse_unary(self, with_filter=True): token_type = self.stream.current.type lineno = self.stream.current.lineno if token_type == 'sub': next(self.stream) node = nodes.Neg(self.parse_unary(False), lineno=lineno) elif token_type == 'add': next(self.stream) node = nodes.Pos(self.parse_unary(False), lineno=lineno) else: node = self.parse_primary() node = self.parse_postfix(node) if with_filter: node = self.parse_filter_expr(node) return node def parse_primary(self): token = self.stream.current if token.type == 'name': if token.value in ('true', 'false', 'True', 'False'): node = nodes.Const(token.value in ('true', 'True'), lineno=token.lineno) elif token.value in ('none', 'None'): node = nodes.Const(None, lineno=token.lineno) else: node = nodes.Name(token.value, 'load', lineno=token.lineno) next(self.stream) elif token.type == 'string': next(self.stream) buf = [token.value] lineno = token.lineno while self.stream.current.type == 'string': buf.append(self.stream.current.value) next(self.stream) node = nodes.Const(''.join(buf), lineno=lineno) elif token.type in ('integer', 'float'): next(self.stream) node = nodes.Const(token.value, lineno=token.lineno) elif token.type == 'lparen': next(self.stream) node = self.parse_tuple(explicit_parentheses=True) self.stream.expect('rparen') elif token.type == 'lbracket': node = self.parse_list() elif token.type == 'lbrace': node = self.parse_dict() else: self.fail("unexpected '%s'" % describe_token(token), token.lineno) return node def parse_tuple(self, simplified=False, with_condexpr=True, extra_end_rules=None, explicit_parentheses=False): """Works like `parse_expression` but if multiple expressions are delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created. This method could also return a regular expression instead of a tuple if no commas where found. The default parsing mode is a full tuple. If `simplified` is `True` only names and literals are parsed. The `no_condexpr` parameter is forwarded to :meth:`parse_expression`. Because tuples do not require delimiters and may end in a bogus comma an extra hint is needed that marks the end of a tuple. For example for loops support tuples between `for` and `in`. In that case the `extra_end_rules` is set to ``['name:in']``. `explicit_parentheses` is true if the parsing was triggered by an expression in parentheses. This is used to figure out if an empty tuple is a valid expression or not. """ lineno = self.stream.current.lineno if simplified: parse = self.parse_primary elif with_condexpr: parse = self.parse_expression else: parse = lambda: self.parse_expression(with_condexpr=False) args = [] is_tuple = False while 1: if args: self.stream.expect('comma') if self.is_tuple_end(extra_end_rules): break args.append(parse()) if self.stream.current.type == 'comma': is_tuple = True else: break lineno = self.stream.current.lineno if not is_tuple: if args: return args[0] # if we don't have explicit parentheses, an empty tuple is # not a valid expression. This would mean nothing (literally # nothing) in the spot of an expression would be an empty # tuple. if not explicit_parentheses: self.fail('Expected an expression, got \'%s\'' % describe_token(self.stream.current)) return nodes.Tuple(args, 'load', lineno=lineno) def parse_list(self): token = self.stream.expect('lbracket') items = [] while self.stream.current.type != 'rbracket': if items: self.stream.expect('comma') if self.stream.current.type == 'rbracket': break items.append(self.parse_expression()) self.stream.expect('rbracket') return nodes.List(items, lineno=token.lineno) def parse_dict(self): token = self.stream.expect('lbrace') items = [] while self.stream.current.type != 'rbrace': if items: self.stream.expect('comma') if self.stream.current.type == 'rbrace': break key = self.parse_expression() self.stream.expect('colon') value = self.parse_expression() items.append(nodes.Pair(key, value, lineno=key.lineno)) self.stream.expect('rbrace') return nodes.Dict(items, lineno=token.lineno) def parse_postfix(self, node): while 1: token_type = self.stream.current.type if token_type == 'dot' or token_type == 'lbracket': node = self.parse_subscript(node) # calls are valid both after postfix expressions (getattr # and getitem) as well as filters and tests elif token_type == 'lparen': node = self.parse_call(node) else: break return node def parse_filter_expr(self, node): while 1: token_type = self.stream.current.type if token_type == 'pipe': node = self.parse_filter(node) elif token_type == 'name' and self.stream.current.value == 'is': node = self.parse_test(node) # calls are valid both after postfix expressions (getattr # and getitem) as well as filters and tests elif token_type == 'lparen': node = self.parse_call(node) else: break return node def parse_subscript(self, node): token = next(self.stream) if token.type == 'dot': attr_token = self.stream.current next(self.stream) if attr_token.type == 'name': return nodes.Getattr(node, attr_token.value, 'load', lineno=token.lineno) elif attr_token.type != 'integer': self.fail('expected name or number', attr_token.lineno) arg = nodes.Const(attr_token.value, lineno=attr_token.lineno) return nodes.Getitem(node, arg, 'load', lineno=token.lineno) if token.type == 'lbracket': args = [] while self.stream.current.type != 'rbracket': if args: self.stream.expect('comma') args.append(self.parse_subscribed()) self.stream.expect('rbracket') if len(args) == 1: arg = args[0] else: arg = nodes.Tuple(args, 'load', lineno=token.lineno) return nodes.Getitem(node, arg, 'load', lineno=token.lineno) self.fail('expected subscript expression', self.lineno) def parse_subscribed(self): lineno = self.stream.current.lineno if self.stream.current.type == 'colon': next(self.stream) args = [None] else: node = self.parse_expression() if self.stream.current.type != 'colon': return node next(self.stream) args = [node] if self.stream.current.type == 'colon': args.append(None) elif self.stream.current.type not in ('rbracket', 'comma'): args.append(self.parse_expression()) else: args.append(None) if self.stream.current.type == 'colon': next(self.stream) if self.stream.current.type not in ('rbracket', 'comma'): args.append(self.parse_expression()) else: args.append(None) else: args.append(None) return nodes.Slice(lineno=lineno, *args) def parse_call(self, node): token = self.stream.expect('lparen') args = [] kwargs = [] dyn_args = dyn_kwargs = None require_comma = False def ensure(expr): if not expr: self.fail('invalid syntax for function call expression', token.lineno) while self.stream.current.type != 'rparen': if require_comma: self.stream.expect('comma') # support for trailing comma if self.stream.current.type == 'rparen': break if self.stream.current.type == 'mul': ensure(dyn_args is None and dyn_kwargs is None) next(self.stream) dyn_args = self.parse_expression() elif self.stream.current.type == 'pow': ensure(dyn_kwargs is None) next(self.stream) dyn_kwargs = self.parse_expression() else: ensure(dyn_args is None and dyn_kwargs is None) if self.stream.current.type == 'name' and \ self.stream.look().type == 'assign': key = self.stream.current.value self.stream.skip(2) value = self.parse_expression() kwargs.append(nodes.Keyword(key, value, lineno=value.lineno)) else: ensure(not kwargs) args.append(self.parse_expression()) require_comma = True self.stream.expect('rparen') if node is None: return args, kwargs, dyn_args, dyn_kwargs return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno) def parse_filter(self, node, start_inline=False): while self.stream.current.type == 'pipe' or start_inline: if not start_inline: next(self.stream) token = self.stream.expect('name') name = token.value while self.stream.current.type == 'dot': next(self.stream) name += '.' + self.stream.expect('name').value if self.stream.current.type == 'lparen': args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None) else: args = [] kwargs = [] dyn_args = dyn_kwargs = None node = nodes.Filter(node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno) start_inline = False return node def parse_test(self, node): token = next(self.stream) if self.stream.current.test('name:not'): next(self.stream) negated = True else: negated = False name = self.stream.expect('name').value while self.stream.current.type == 'dot': next(self.stream) name += '.' + self.stream.expect('name').value dyn_args = dyn_kwargs = None kwargs = [] if self.stream.current.type == 'lparen': args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None) elif (self.stream.current.type in ('name', 'string', 'integer', 'float', 'lparen', 'lbracket', 'lbrace') and not self.stream.current.test_any('name:else', 'name:or', 'name:and')): if self.stream.current.test('name:is'): self.fail('You cannot chain multiple tests with is') args = [self.parse_expression()] else: args = [] node = nodes.Test(node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno) if negated: node = nodes.Not(node, lineno=token.lineno) return node def subparse(self, end_tokens=None): body = [] data_buffer = [] add_data = data_buffer.append if end_tokens is not None: self._end_token_stack.append(end_tokens) def flush_data(): if data_buffer: lineno = data_buffer[0].lineno body.append(nodes.Output(data_buffer[:], lineno=lineno)) del data_buffer[:] try: while self.stream: token = self.stream.current if token.type == 'data': if token.value: add_data(nodes.TemplateData(token.value, lineno=token.lineno)) next(self.stream) elif token.type == 'variable_begin': next(self.stream) add_data(self.parse_tuple(with_condexpr=True)) self.stream.expect('variable_end') elif token.type == 'block_begin': flush_data() next(self.stream) if end_tokens is not None and \ self.stream.current.test_any(*end_tokens): return body rv = self.parse_statement() if isinstance(rv, list): body.extend(rv) else: body.append(rv) self.stream.expect('block_end') else: raise AssertionError('internal parsing error') flush_data() finally: if end_tokens is not None: self._end_token_stack.pop() return body def parse(self): """Parse the whole template into a `Template` node.""" result = nodes.Template(self.subparse(), lineno=1) result.set_environment(self.environment) return result
mit
BellScurry/gem5-fault-injection
util/minorview/model.py
55
39193
# Copyright (c) 2013 ARM Limited # All rights reserved # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Andrew Bardsley import parse import colours from colours import unknownColour from point import Point import re import blobs from time import time as wall_time import os id_parts = "TSPLFE" all_ids = set(id_parts) no_ids = set([]) class BlobDataSelect(object): """Represents which data is displayed for Ided object""" def __init__(self): # Copy all_ids self.ids = set(all_ids) def __and__(self, rhs): """And for filtering""" ret = BlobDataSelect() ret.ids = self.ids.intersection(rhs.ids) return ret class BlobVisualData(object): """Super class for block data colouring""" def to_striped_block(self, select): """Return an array of colours to use for a striped block""" return unknownColour def get_inst(self): """Get an instruction Id (if any) from this data""" return None def get_line(self): """Get a line Id (if any) from this data""" return None def __repr__(self): return self.__class__.__name__ + '().from_string(' + \ self.__str__() + ')' def __str__(self): return '' class Id(BlobVisualData): """A line or instruction id""" def __init__(self): self.isFault = False self.threadId = 0 self.streamSeqNum = 0 self.predictionSeqNum = 0 self.lineSeqNum = 0 self.fetchSeqNum = 0 self.execSeqNum = 0 def as_list(self): return [self.threadId, self.streamSeqNum, self.predictionSeqNum, self.lineSeqNum, self.fetchSeqNum, self.execSeqNum] def __cmp__(self, right): return cmp(self.as_list(), right.as_list()) def from_string(self, string): m = re.match('^(F;)?(\d+)/(\d+)\.(\d+)/(\d+)(/(\d+)(\.(\d+))?)?', string) def seqnum_from_string(string): if string is None: return 0 else: return int(string) if m is None: print 'Invalid Id string', string else: elems = m.groups() if elems[0] is not None: self.isFault = True else: self.isFault = False self.threadId = seqnum_from_string(elems[1]) self.streamSeqNum = seqnum_from_string(elems[2]) self.predictionSeqNum = seqnum_from_string(elems[3]) self.lineSeqNum = seqnum_from_string(elems[4]) self.fetchSeqNum = seqnum_from_string(elems[6]) self.execSeqNum = seqnum_from_string(elems[8]) return self def get_inst(self): if self.fetchSeqNum != 0: return self else: return None def get_line(self): return self def __str__(self): """Returns the usual id T/S.P/L/F.E string""" return ( str(self.threadId) + '/' + str(self.streamSeqNum) + '.' + str(self.predictionSeqNum) + '/' + str(self.lineSeqNum) + '/' + str(self.fetchSeqNum) + '.' + str(self.execSeqNum)) def to_striped_block(self, select): ret = [] if self.isFault: ret.append(colours.faultColour) if 'T' in select.ids: ret.append(colours.number_to_colour(self.threadId)) if 'S' in select.ids: ret.append(colours.number_to_colour(self.streamSeqNum)) if 'P' in select.ids: ret.append(colours.number_to_colour(self.predictionSeqNum)) if 'L' in select.ids: ret.append(colours.number_to_colour(self.lineSeqNum)) if self.fetchSeqNum != 0 and 'F' in select.ids: ret.append(colours.number_to_colour(self.fetchSeqNum)) if self.execSeqNum != 0 and 'E' in select.ids: ret.append(colours.number_to_colour(self.execSeqNum)) if len(ret) == 0: ret = [colours.unknownColour] if self.isFault: ret.append(colours.faultColour) return ret class Branch(BlobVisualData): """Branch data new stream and prediction sequence numbers, a branch reason and a new PC""" def __init__(self): self.newStreamSeqNum = 0 self.newPredictionSeqNum = 0 self.newPC = 0 self.reason = "NoBranch" self.id = Id() def from_string(self, string): m = re.match('^(\w+);(\d+)\.(\d+);([0-9a-fA-Fx]+);(.*)$', string) if m is not None: self.reason, newStreamSeqNum, newPredictionSeqNum, \ newPC, id = m.groups() self.newStreamSeqNum = int(newStreamSeqNum) self.newPredictionSeqNum = int(newPredictionSeqNum) self.newPC = int(newPC, 0) self.id = special_view_decoder(Id)(id) # self.branch = special_view_decoder(Branch)(branch) else: print "Bad Branch data:", string return self def to_striped_block(self, select): return [colours.number_to_colour(self.newStreamSeqNum), colours.number_to_colour(self.newPredictionSeqNum), colours.number_to_colour(self.newPC)] class Counts(BlobVisualData): """Treat the input data as just a /-separated list of count values (or just a single value)""" def __init__(self): self.counts = [] def from_string(self, string): self.counts = map(int, re.split('/', string)) return self def to_striped_block(self, select): return map(colours.number_to_colour, self.counts) class Colour(BlobVisualData): """A fixed colour block, used for special colour decoding""" def __init__(self, colour): self.colour = colour def to_striped_block(self, select): return [self.colour] class DcacheAccess(BlobVisualData): """Data cache accesses [RW];id""" def __init__(self): self.direc = 'R' self.id = Id() def from_string(self, string): self.direc, id = re.match('^([RW]);([^;]*);.*$', string).groups() self.id.from_string(id) return self def get_inst(self): return self.id def to_striped_block(self, select): if self.direc == 'R': direc_colour = colours.readColour elif self.direc == 'R': direc_colour = colours.writeColour else: direc_colour = colours.errorColour return [direc_colour] + self.id.to_striped_block(select) class ColourPattern(object): """Super class for decoders that make 2D grids rather than just single striped blocks""" def elems(self): return [] def to_striped_block(self, select): return [[[colours.errorColour]]] def special_view_decoder(class_): """Generate a decode function that checks for special character arguments first (and generates a fixed colour) before building a BlobVisualData of the given class""" def decode(symbol): if symbol in special_state_colours: return Colour(special_state_colours[symbol]) else: return class_().from_string(symbol) return decode class TwoDColours(ColourPattern): """A 2D grid pattern decoder""" def __init__(self, blockss): self.blockss = blockss @classmethod def decoder(class_, elemClass, dataName): """Factory for making decoders for particular block types""" def decode(pairs): if dataName not in pairs: print 'TwoDColours: no event data called:', \ dataName, 'in:', pairs return class_([[Colour(colours.errorColour)]]) else: parsed = parse.list_parser(pairs[dataName]) return class_(parse.map2(special_view_decoder(elemClass), \ parsed)) return decode @classmethod def indexed_decoder(class_, elemClass, dataName, picPairs): """Factory for making decoders for particular block types but where the list elements are pairs of (index, data) and strip and stripelems counts are picked up from the pair data on the decoder's picture file. This gives a 2D layout of the values with index 0 at strip=0, elem=0 and index 1 at strip=0, elem=1""" def decode(pairs): if dataName not in pairs: print 'TwoDColours: no event data called:', \ dataName, 'in:', pairs return class_([[Colour(colours.errorColour)]]) else: strips = int(picPairs['strips']) strip_elems = int(picPairs['stripelems']) raw_iv_pairs = pairs[dataName] parsed = parse.parse_indexed_list(raw_iv_pairs) array = [[Colour(colours.emptySlotColour) for i in xrange(0, strip_elems)] for j in xrange(0, strips)] for index, value in parsed: try: array[index % strips][index / strips] = \ special_view_decoder(elemClass)(value) except: print "Element out of range strips: %d," \ " stripelems %d, index: %d" % (strips, strip_elems, index) # return class_(array) return class_(array) return decode def elems(self): """Get a flat list of all elements""" ret = [] for blocks in self.blockss: ret += blocks return ret def to_striped_block(self, select): return parse.map2(lambda d: d.to_striped_block(select), self.blockss) class FrameColours(ColourPattern): """Decode to a 2D grid which has a single occupied row from the event data and some blank rows forming a frame with the occupied row as a 'title' coloured stripe""" def __init__(self, block, numBlankSlots): self.numBlankSlots = numBlankSlots self.block = block @classmethod def decoder(class_, elemClass, numBlankSlots, dataName): """Factory for element type""" def decode(pairs): if dataName not in pairs: print 'FrameColours: no event data called:', dataName, \ 'in:', pairs return class_([Colour(colours.errorColour)]) else: parsed = parse.list_parser(pairs[dataName]) return class_(special_view_decoder(elemClass) (parsed[0][0]), numBlankSlots) return decode def elems(self): return [self.block] def to_striped_block(self, select): return ([[self.block.to_striped_block(select)]] + (self.numBlankSlots * [[[colours.backgroundColour]]])) special_state_colours = { 'U': colours.unknownColour, 'B': colours.blockedColour, '-': colours.bubbleColour, '': colours.emptySlotColour, 'E': colours.emptySlotColour, 'R': colours.reservedSlotColour, 'X': colours.errorColour, 'F': colours.faultColour, 'r': colours.readColour, 'w': colours.writeColour } special_state_names = { 'U': '(U)nknown', 'B': '(B)locked', '-': '(-)Bubble', '': '()Empty', 'E': '(E)mpty', 'R': '(R)eserved', 'X': '(X)Error', 'F': '(F)ault', 'r': '(r)ead', 'w': '(w)rite' } special_state_chars = special_state_colours.keys() # The complete set of available block data types decoder_element_classes = { 'insts': Id, 'lines': Id, 'branch': Branch, 'dcache': DcacheAccess, 'counts': Counts } indexed_decoder_element_classes = { 'indexedCounts' : Counts } def find_colour_decoder(stripSpace, decoderName, dataName, picPairs): """Make a colour decoder from some picture file blob attributes""" if decoderName == 'frame': return FrameColours.decoder(Counts, stripSpace, dataName) elif decoderName in decoder_element_classes: return TwoDColours.decoder(decoder_element_classes[decoderName], dataName) elif decoderName in indexed_decoder_element_classes: return TwoDColours.indexed_decoder( indexed_decoder_element_classes[decoderName], dataName, picPairs) else: return None class IdedObj(object): """An object identified by an Id carrying paired data. The super class for Inst and Line""" def __init__(self, id, pairs={}): self.id = id self.pairs = pairs def __cmp__(self, right): return cmp(self.id, right.id) def table_line(self): """Represent the object as a list of table row data""" return [] # FIXME, add a table column titles? def __repr__(self): return ' '.join(self.table_line()) class Inst(IdedObj): """A non-fault instruction""" def __init__(self, id, disassembly, addr, pairs={}): super(Inst,self).__init__(id, pairs) if 'nextAddr' in pairs: self.nextAddr = int(pairs['nextAddr'], 0) del pairs['nextAddr'] else: self.nextAddr = None self.disassembly = disassembly self.addr = addr def table_line(self): if self.nextAddr is not None: addrStr = '0x%x->0x%x' % (self.addr, self.nextAddr) else: addrStr = '0x%x' % self.addr ret = [addrStr, self.disassembly] for name, value in self.pairs.iteritems(): ret.append("%s=%s" % (name, str(value))) return ret class InstFault(IdedObj): """A fault instruction""" def __init__(self, id, fault, addr, pairs={}): super(InstFault,self).__init__(id, pairs) self.fault = fault self.addr = addr def table_line(self): ret = ["0x%x" % self.addr, self.fault] for name, value in self.pairs: ret.append("%s=%s", name, str(value)) return ret class Line(IdedObj): """A fetched line""" def __init__(self, id, vaddr, paddr, size, pairs={}): super(Line,self).__init__(id, pairs) self.vaddr = vaddr self.paddr = paddr self.size = size def table_line(self): ret = ["0x%x/0x%x" % (self.vaddr, self.paddr), "%d" % self.size] for name, value in self.pairs: ret.append("%s=%s", name, str(value)) return ret class LineFault(IdedObj): """A faulting line""" def __init__(self, id, fault, vaddr, pairs={}): super(LineFault,self).__init__(id, pairs) self.vaddr = vaddr self.fault = fault def table_line(self): ret = ["0x%x" % self.vaddr, self.fault] for name, value in self.pairs: ret.append("%s=%s", name, str(value)) return ret class BlobEvent(object): """Time event for a single blob""" def __init__(self, unit, time, pairs = {}): # blob's unit name self.unit = unit self.time = time # dict of picChar (blob name) to visual data self.visuals = {} # Miscellaneous unparsed MinorTrace line data self.pairs = pairs # Non-MinorTrace debug printout for this unit at this time self.comments = [] def find_ided_objects(self, model, picChar, includeInstLines): """Find instructions/lines mentioned in the blob's event data""" ret = [] if picChar in self.visuals: blocks = self.visuals[picChar].elems() def find_inst(data): instId = data.get_inst() lineId = data.get_line() if instId is not None: inst = model.find_inst(instId) line = model.find_line(instId) if inst is not None: ret.append(inst) if includeInstLines and line is not None: ret.append(line) elif lineId is not None: line = model.find_line(lineId) if line is not None: ret.append(line) map(find_inst, blocks) return sorted(ret) class BlobModel(object): """Model bringing together blob definitions and parsed events""" def __init__(self, unitNamePrefix=''): self.blobs = [] self.unitNameToBlobs = {} self.unitEvents = {} self.clear_events() self.picSize = Point(20,10) self.lastTime = 0 self.unitNamePrefix = unitNamePrefix def clear_events(self): """Drop all events and times""" self.lastTime = 0 self.times = [] self.insts = {} self.lines = {} self.numEvents = 0 for unit, events in self.unitEvents.iteritems(): self.unitEvents[unit] = [] def add_blob(self, blob): """Add a parsed blob to the model""" self.blobs.append(blob) if blob.unit not in self.unitNameToBlobs: self.unitNameToBlobs[blob.unit] = [] self.unitNameToBlobs[blob.unit].append(blob) def add_inst(self, inst): """Add a MinorInst instruction definition to the model""" # Is this a non micro-op instruction. Microops (usually) get their # fetchSeqNum == 0 varient stored first macroop_key = (inst.id.fetchSeqNum, 0) full_key = (inst.id.fetchSeqNum, inst.id.execSeqNum) if inst.id.execSeqNum != 0 and macroop_key not in self.insts: self.insts[macroop_key] = inst self.insts[full_key] = inst def find_inst(self, id): """Find an instruction either as a microop or macroop""" macroop_key = (id.fetchSeqNum, 0) full_key = (id.fetchSeqNum, id.execSeqNum) if full_key in self.insts: return self.insts[full_key] elif macroop_key in self.insts: return self.insts[macroop_key] else: return None def add_line(self, line): """Add a MinorLine line to the model""" self.lines[line.id.lineSeqNum] = line def add_unit_event(self, event): """Add a single event to the model. This must be an event at a time >= the current maximum time""" if event.unit in self.unitEvents: events = self.unitEvents[event.unit] if len(events) > 0 and events[len(events)-1].time > event.time: print "Bad event ordering" events.append(event) self.numEvents += 1 self.lastTime = max(self.lastTime, event.time) def extract_times(self): """Extract a list of all the times from the seen events. Call after reading events to give a safe index list to use for time indices""" times = {} for unitEvents in self.unitEvents.itervalues(): for event in unitEvents: times[event.time] = 1 self.times = times.keys() self.times.sort() def find_line(self, id): """Find a line by id""" key = id.lineSeqNum return self.lines.get(key, None) def find_event_bisection(self, unit, time, events, lower_index, upper_index): """Find an event by binary search on time indices""" while lower_index <= upper_index: pivot = (upper_index + lower_index) / 2 pivotEvent = events[pivot] event_equal = (pivotEvent.time == time or (pivotEvent.time < time and (pivot == len(events) - 1 or events[pivot + 1].time > time))) if event_equal: return pivotEvent elif time > pivotEvent.time: if pivot == upper_index: return None else: lower_index = pivot + 1 elif time < pivotEvent.time: if pivot == lower_index: return None else: upper_index = pivot - 1 else: return None return None def find_unit_event_by_time(self, unit, time): """Find the last event for the given unit at time <= time""" if unit in self.unitEvents: events = self.unitEvents[unit] ret = self.find_event_bisection(unit, time, events, 0, len(events)-1) return ret else: return None def find_time_index(self, time): """Find a time index close to the given time (where times[return] <= time and times[return+1] > time""" ret = 0 lastIndex = len(self.times) - 1 while ret < lastIndex and self.times[ret + 1] <= time: ret += 1 return ret def add_minor_inst(self, rest): """Parse and add a MinorInst line to the model""" pairs = parse.parse_pairs(rest) other_pairs = dict(pairs) id = Id().from_string(pairs['id']) del other_pairs['id'] addr = int(pairs['addr'], 0) del other_pairs['addr'] if 'inst' in other_pairs: del other_pairs['inst'] # Collapse unnecessary spaces in disassembly disassembly = re.sub(' *', ' ', re.sub('^ *', '', pairs['inst'])) inst = Inst(id, disassembly, addr, other_pairs) self.add_inst(inst) elif 'fault' in other_pairs: del other_pairs['fault'] inst = InstFault(id, pairs['fault'], addr, other_pairs) self.add_inst(inst) def add_minor_line(self, rest): """Parse and add a MinorLine line to the model""" pairs = parse.parse_pairs(rest) other_pairs = dict(pairs) id = Id().from_string(pairs['id']) del other_pairs['id'] vaddr = int(pairs['vaddr'], 0) del other_pairs['vaddr'] if 'paddr' in other_pairs: del other_pairs['paddr'] del other_pairs['size'] paddr = int(pairs['paddr'], 0) size = int(pairs['size'], 0) self.add_line(Line(id, vaddr, paddr, size, other_pairs)) elif 'fault' in other_pairs: del other_pairs['fault'] self.add_line(LineFault(id, pairs['fault'], vaddr, other_pairs)) def load_events(self, file, startTime=0, endTime=None): """Load an event file and add everything to this model""" def update_comments(comments, time): # Add a list of comments to an existing event, if there is one at # the given time, or create a new, correctly-timed, event from # the last event and attach the comments to that for commentUnit, commentRest in comments: event = self.find_unit_event_by_time(commentUnit, time) # Find an event to which this comment can be attached if event is None: # No older event, make a new empty one event = BlobEvent(commentUnit, time, {}) self.add_unit_event(event) elif event.time != time: # Copy the old event and make a new one with the right # time and comment newEvent = BlobEvent(commentUnit, time, event.pairs) newEvent.visuals = dict(event.visuals) event = newEvent self.add_unit_event(event) event.comments.append(commentRest) self.clear_events() # A negative time will *always* be different from an event time time = -1 time_events = {} last_time_lines = {} minor_trace_line_count = 0 comments = [] default_colour = [[colours.unknownColour]] next_progress_print_event_count = 1000 if not os.access(file, os.R_OK): print 'Can\'t open file', file exit(1) else: print 'Opening file', file f = open(file) start_wall_time = wall_time() # Skip leading events still_skipping = True l = f.readline() while l and still_skipping: match = re.match('^\s*(\d+):', l) if match is not None: event_time = match.groups() if int(event_time[0]) >= startTime: still_skipping = False else: l = f.readline() else: l = f.readline() match_line_re = re.compile( '^\s*(\d+):\s*([\w\.]+):\s*(Minor\w+:)?\s*(.*)$') # Parse each line of the events file, accumulating comments to be # attached to MinorTrace events when the time changes reached_end_time = False while not reached_end_time and l: match = match_line_re.match(l) if match is not None: event_time, unit, line_type, rest = match.groups() event_time = int(event_time) unit = re.sub('^' + self.unitNamePrefix + '\.?(.*)$', '\\1', unit) # When the time changes, resolve comments if event_time != time: if self.numEvents > next_progress_print_event_count: print ('Parsed to time: %d' % event_time) next_progress_print_event_count = ( self.numEvents + 1000) update_comments(comments, time) comments = [] time = event_time if line_type is None: # Treat this line as just a 'comment' comments.append((unit, rest)) elif line_type == 'MinorTrace:': minor_trace_line_count += 1 # Only insert this event if it's not the same as # the last event we saw for this unit if last_time_lines.get(unit, None) != rest: event = BlobEvent(unit, event_time, {}) pairs = parse.parse_pairs(rest) event.pairs = pairs # Try to decode the colour data for this event blobs = self.unitNameToBlobs.get(unit, []) for blob in blobs: if blob.visualDecoder is not None: event.visuals[blob.picChar] = ( blob.visualDecoder(pairs)) self.add_unit_event(event) last_time_lines[unit] = rest elif line_type == 'MinorInst:': self.add_minor_inst(rest) elif line_type == 'MinorLine:': self.add_minor_line(rest) if endTime is not None and time > endTime: reached_end_time = True l = f.readline() update_comments(comments, time) self.extract_times() f.close() end_wall_time = wall_time() print 'Total events:', minor_trace_line_count, 'unique events:', \ self.numEvents print 'Time to parse:', end_wall_time - start_wall_time def add_blob_picture(self, offset, pic, nameDict): """Add a parsed ASCII-art pipeline markup to the model""" pic_width = 0 for line in pic: pic_width = max(pic_width, len(line)) pic_height = len(pic) # Number of horizontal characters per 'pixel'. Should be 2 charsPerPixel = 2 # Clean up pic_width to a multiple of charsPerPixel pic_width = (pic_width + charsPerPixel - 1) // 2 self.picSize = Point(pic_width, pic_height) def pic_at(point): """Return the char pair at the given point. Returns None for characters off the picture""" x, y = point.to_pair() x *= 2 if y >= len(pic) or x >= len(pic[y]): return None else: return pic[y][x:x + charsPerPixel] def clear_pic_at(point): """Clear the chars at point so we don't trip over them again""" line = pic[point.y] x = point.x * charsPerPixel pic[point.y] = line[0:x] + (' ' * charsPerPixel) + \ line[x + charsPerPixel:] def skip_same_char(start, increment): """Skip characters which match pic_at(start)""" char = pic_at(start) hunt = start while pic_at(hunt) == char: hunt += increment return hunt def find_size(start): """Find the size of a rectangle with top left hand corner at start consisting of (at least) a -. shaped corner describing the top right corner of a rectangle of the same char""" char = pic_at(start) hunt_x = skip_same_char(start, Point(1,0)) hunt_y = skip_same_char(start, Point(0,1)) off_bottom_right = (hunt_x * Point(1,0)) + (hunt_y * Point(0,1)) return off_bottom_right - start def point_return(point): """Carriage return, line feed""" return Point(0, point.y + 1) def find_arrow(start): """Find a simple 1-char wide arrow""" def body(endChar, contChar, direc): arrow_point = start arrow_point += Point(0, 1) clear_pic_at(start) while pic_at(arrow_point) == contChar: clear_pic_at(arrow_point) arrow_point += Point(0, 1) if pic_at(arrow_point) == endChar: clear_pic_at(arrow_point) self.add_blob(blobs.Arrow('_', start + offset, direc = direc, size = (Point(1, 1) + arrow_point - start))) else: print 'Bad arrow', start char = pic_at(start) if char == '-\\': body('-/', ' :', 'right') elif char == '/-': body('\\-', ': ', 'left') blank_chars = [' ', ' :', ': '] # Traverse the picture left to right, top to bottom to find blobs seen_dict = {} point = Point(0,0) while pic_at(point) is not None: while pic_at(point) is not None: char = pic_at(point) if char == '->': self.add_blob(blobs.Arrow('_', point + offset, direc = 'right')) elif char == '<-': self.add_blob(blobs.Arrow('_', point + offset, direc = 'left')) elif char == '-\\' or char == '/-': find_arrow(point) elif char in blank_chars: pass else: if char not in seen_dict: size = find_size(point) topLeft = point + offset if char not in nameDict: # Unnamed blobs self.add_blob(blobs.Block(char, nameDict.get(char, '_'), topLeft, size = size)) else: # Named blobs, set visual info. blob = nameDict[char] blob.size = size blob.topLeft = topLeft self.add_blob(blob) seen_dict[char] = True point = skip_same_char(point, Point(1,0)) point = point_return(point) def load_picture(self, filename): """Load a picture file into the model""" def parse_blob_description(char, unit, macros, pairsList): # Parse the name value pairs in a blob-describing line def expand_macros(pairs, newPairs): # Recursively expand macros for name, value in newPairs: if name in macros: expand_macros(pairs, macros[name]) else: pairs[name] = value return pairs pairs = expand_macros({}, pairsList) ret = None typ = pairs.get('type', 'block') colour = colours.name_to_colour(pairs.get('colour', 'black')) if typ == 'key': ret = blobs.Key(char, unit, Point(0,0), colour) elif typ == 'block': ret = blobs.Block(char, unit, Point(0,0), colour) else: print "Bad picture blog type:", typ if 'hideId' in pairs: hide = pairs['hideId'] ret.dataSelect.ids -= set(hide) if typ == 'block': ret.displayName = pairs.get('name', unit) ret.nameLoc = pairs.get('nameLoc', 'top') ret.shape = pairs.get('shape', 'box') ret.stripDir = pairs.get('stripDir', 'horiz') ret.stripOrd = pairs.get('stripOrd', 'LR') ret.blankStrips = int(pairs.get('blankStrips', '0')) ret.shorten = int(pairs.get('shorten', '0')) if 'decoder' in pairs: decoderName = pairs['decoder'] dataElement = pairs.get('dataElement', decoderName) decoder = find_colour_decoder(ret.blankStrips, decoderName, dataElement, pairs) if decoder is not None: ret.visualDecoder = decoder else: print 'Bad visualDecoder requested:', decoderName if 'border' in pairs: border = pairs['border'] if border == 'thin': ret.border = 0.2 elif border == 'mid': ret.border = 0.5 else: ret.border = 1.0 elif typ == 'key': ret.colours = pairs.get('colours', ret.colours) return ret def line_is_comment(line): """Returns true if a line starts with #, returns False for lines which are None""" return line is not None \ and re.match('^\s*#', line) is not None def get_line(f): """Get a line from file f extending that line if it ends in '\' and dropping lines that start with '#'s""" ret = f.readline() # Discard comment lines while line_is_comment(ret): ret = f.readline() if ret is not None: extend_match = re.match('^(.*)\\\\$', ret) while extend_match is not None: new_line = f.readline() if new_line is not None and not line_is_comment(new_line): line_wo_backslash, = extend_match.groups() ret = line_wo_backslash + new_line extend_match = re.match('^(.*)\\\\$', ret) else: extend_match = None return ret # Macros are recursively expanded into name=value pairs macros = {} if not os.access(filename, os.R_OK): print 'Can\'t open file', filename exit(1) else: print 'Opening file', filename f = open(filename) l = get_line(f) picture = [] blob_char_dict = {} self.unitEvents = {} self.clear_events() # Actually parse the file in_picture = False while l: l = parse.remove_trailing_ws(l) l = re.sub('#.*', '', l) if re.match("^\s*$", l) is not None: pass elif l == '<<<': in_picture = True elif l == '>>>': in_picture = False elif in_picture: picture.append(re.sub('\s*$', '', l)) else: line_match = re.match( '^([a-zA-Z0-9][a-zA-Z0-9]):\s+([\w.]+)\s*(.*)', l) macro_match = re.match('macro\s+(\w+):(.*)', l) if macro_match is not None: name, defn = macro_match.groups() macros[name] = parse.parse_pairs_list(defn) elif line_match is not None: char, unit, pairs = line_match.groups() blob = parse_blob_description(char, unit, macros, parse.parse_pairs_list(pairs)) blob_char_dict[char] = blob # Setup the events structure self.unitEvents[unit] = [] else: print 'Problem with Blob line:', l l = get_line(f) self.blobs = [] self.add_blob_picture(Point(0,1), picture, blob_char_dict)
bsd-3-clause
ahoyosid/scikit-learn
sklearn/utils/arpack.py
265
64837
""" This contains a copy of the future version of scipy.sparse.linalg.eigen.arpack.eigsh It's an upgraded wrapper of the ARPACK library which allows the use of shift-invert mode for symmetric matrices. Find a few eigenvectors and eigenvalues of a matrix. Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/ """ # Wrapper implementation notes # # ARPACK Entry Points # ------------------- # The entry points to ARPACK are # - (s,d)seupd : single and double precision symmetric matrix # - (s,d,c,z)neupd: single,double,complex,double complex general matrix # This wrapper puts the *neupd (general matrix) interfaces in eigs() # and the *seupd (symmetric matrix) in eigsh(). # There is no Hermetian complex/double complex interface. # To find eigenvalues of a Hermetian matrix you # must use eigs() and not eigsh() # It might be desirable to handle the Hermetian case differently # and, for example, return real eigenvalues. # Number of eigenvalues returned and complex eigenvalues # ------------------------------------------------------ # The ARPACK nonsymmetric real and double interface (s,d)naupd return # eigenvalues and eigenvectors in real (float,double) arrays. # Since the eigenvalues and eigenvectors are, in general, complex # ARPACK puts the real and imaginary parts in consecutive entries # in real-valued arrays. This wrapper puts the real entries # into complex data types and attempts to return the requested eigenvalues # and eigenvectors. # Solver modes # ------------ # ARPACK and handle shifted and shift-inverse computations # for eigenvalues by providing a shift (sigma) and a solver. __docformat__ = "restructuredtext en" __all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence'] import warnings from scipy.sparse.linalg.eigen.arpack import _arpack import numpy as np from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator from scipy.sparse import identity, isspmatrix, isspmatrix_csr from scipy.linalg import lu_factor, lu_solve from scipy.sparse.sputils import isdense from scipy.sparse.linalg import gmres, splu import scipy from distutils.version import LooseVersion _type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'} _ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12} DNAUPD_ERRORS = { 0: "Normal exit.", 1: "Maximum number of iterations taken. " "All possible eigenvalues of OP has been found. IPARAM(5) " "returns the number of wanted converged Ritz values.", 2: "No longer an informational error. Deprecated starting " "with release 2 of ARPACK.", 3: "No shifts could be applied during a cycle of the " "Implicitly restarted Arnoldi iteration. One possibility " "is to increase the size of NCV relative to NEV. ", -1: "N must be positive.", -2: "NEV must be positive.", -3: "NCV-NEV >= 2 and less than or equal to N.", -4: "The maximum number of Arnoldi update iterations allowed " "must be greater than zero.", -5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'", -6: "BMAT must be one of 'I' or 'G'.", -7: "Length of private work array WORKL is not sufficient.", -8: "Error return from LAPACK eigenvalue calculation;", -9: "Starting vector is zero.", -10: "IPARAM(7) must be 1,2,3,4.", -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", -12: "IPARAM(1) must be equal to 0 or 1.", -13: "NEV and WHICH = 'BE' are incompatible.", -9999: "Could not build an Arnoldi factorization. " "IPARAM(5) returns the size of the current Arnoldi " "factorization. The user is advised to check that " "enough workspace and array storage has been allocated." } SNAUPD_ERRORS = DNAUPD_ERRORS ZNAUPD_ERRORS = DNAUPD_ERRORS.copy() ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3." CNAUPD_ERRORS = ZNAUPD_ERRORS DSAUPD_ERRORS = { 0: "Normal exit.", 1: "Maximum number of iterations taken. " "All possible eigenvalues of OP has been found.", 2: "No longer an informational error. Deprecated starting with " "release 2 of ARPACK.", 3: "No shifts could be applied during a cycle of the Implicitly " "restarted Arnoldi iteration. One possibility is to increase " "the size of NCV relative to NEV. ", -1: "N must be positive.", -2: "NEV must be positive.", -3: "NCV must be greater than NEV and less than or equal to N.", -4: "The maximum number of Arnoldi update iterations allowed " "must be greater than zero.", -5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.", -6: "BMAT must be one of 'I' or 'G'.", -7: "Length of private work array WORKL is not sufficient.", -8: "Error return from trid. eigenvalue calculation; " "Informational error from LAPACK routine dsteqr .", -9: "Starting vector is zero.", -10: "IPARAM(7) must be 1,2,3,4,5.", -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", -12: "IPARAM(1) must be equal to 0 or 1.", -13: "NEV and WHICH = 'BE' are incompatible. ", -9999: "Could not build an Arnoldi factorization. " "IPARAM(5) returns the size of the current Arnoldi " "factorization. The user is advised to check that " "enough workspace and array storage has been allocated.", } SSAUPD_ERRORS = DSAUPD_ERRORS DNEUPD_ERRORS = { 0: "Normal exit.", 1: "The Schur form computed by LAPACK routine dlahqr " "could not be reordered by LAPACK routine dtrsen. " "Re-enter subroutine dneupd with IPARAM(5)NCV and " "increase the size of the arrays DR and DI to have " "dimension at least dimension NCV and allocate at least NCV " "columns for Z. NOTE: Not necessary if Z and V share " "the same space. Please notify the authors if this error " "occurs.", -1: "N must be positive.", -2: "NEV must be positive.", -3: "NCV-NEV >= 2 and less than or equal to N.", -5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'", -6: "BMAT must be one of 'I' or 'G'.", -7: "Length of private work WORKL array is not sufficient.", -8: "Error return from calculation of a real Schur form. " "Informational error from LAPACK routine dlahqr .", -9: "Error return from calculation of eigenvectors. " "Informational error from LAPACK routine dtrevc.", -10: "IPARAM(7) must be 1,2,3,4.", -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", -12: "HOWMNY = 'S' not yet implemented", -13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.", -14: "DNAUPD did not find any eigenvalues to sufficient " "accuracy.", -15: "DNEUPD got a different count of the number of converged " "Ritz values than DNAUPD got. This indicates the user " "probably made an error in passing data from DNAUPD to " "DNEUPD or that the data was modified before entering " "DNEUPD", } SNEUPD_ERRORS = DNEUPD_ERRORS.copy() SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr " "could not be reordered by LAPACK routine strsen . " "Re-enter subroutine dneupd with IPARAM(5)=NCV and " "increase the size of the arrays DR and DI to have " "dimension at least dimension NCV and allocate at least " "NCV columns for Z. NOTE: Not necessary if Z and V share " "the same space. Please notify the authors if this error " "occurs.") SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient " "accuracy.") SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of " "converged Ritz values than SNAUPD got. This indicates " "the user probably made an error in passing data from " "SNAUPD to SNEUPD or that the data was modified before " "entering SNEUPD") ZNEUPD_ERRORS = {0: "Normal exit.", 1: "The Schur form computed by LAPACK routine csheqr " "could not be reordered by LAPACK routine ztrsen. " "Re-enter subroutine zneupd with IPARAM(5)=NCV and " "increase the size of the array D to have " "dimension at least dimension NCV and allocate at least " "NCV columns for Z. NOTE: Not necessary if Z and V share " "the same space. Please notify the authors if this error " "occurs.", -1: "N must be positive.", -2: "NEV must be positive.", -3: "NCV-NEV >= 1 and less than or equal to N.", -5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'", -6: "BMAT must be one of 'I' or 'G'.", -7: "Length of private work WORKL array is not sufficient.", -8: "Error return from LAPACK eigenvalue calculation. " "This should never happened.", -9: "Error return from calculation of eigenvectors. " "Informational error from LAPACK routine ztrevc.", -10: "IPARAM(7) must be 1,2,3", -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", -12: "HOWMNY = 'S' not yet implemented", -13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.", -14: "ZNAUPD did not find any eigenvalues to sufficient " "accuracy.", -15: "ZNEUPD got a different count of the number of " "converged Ritz values than ZNAUPD got. This " "indicates the user probably made an error in passing " "data from ZNAUPD to ZNEUPD or that the data was " "modified before entering ZNEUPD"} CNEUPD_ERRORS = ZNEUPD_ERRORS.copy() CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient " "accuracy.") CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of " "converged Ritz values than CNAUPD got. This indicates " "the user probably made an error in passing data from " "CNAUPD to CNEUPD or that the data was modified before " "entering CNEUPD") DSEUPD_ERRORS = { 0: "Normal exit.", -1: "N must be positive.", -2: "NEV must be positive.", -3: "NCV must be greater than NEV and less than or equal to N.", -5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.", -6: "BMAT must be one of 'I' or 'G'.", -7: "Length of private work WORKL array is not sufficient.", -8: ("Error return from trid. eigenvalue calculation; " "Information error from LAPACK routine dsteqr."), -9: "Starting vector is zero.", -10: "IPARAM(7) must be 1,2,3,4,5.", -11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.", -12: "NEV and WHICH = 'BE' are incompatible.", -14: "DSAUPD did not find any eigenvalues to sufficient accuracy.", -15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.", -16: "HOWMNY = 'S' not yet implemented", -17: ("DSEUPD got a different count of the number of converged " "Ritz values than DSAUPD got. This indicates the user " "probably made an error in passing data from DSAUPD to " "DSEUPD or that the data was modified before entering " "DSEUPD.") } SSEUPD_ERRORS = DSEUPD_ERRORS.copy() SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues " "to sufficient accuracy.") SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of " "converged " "Ritz values than SSAUPD got. This indicates the user " "probably made an error in passing data from SSAUPD to " "SSEUPD or that the data was modified before entering " "SSEUPD.") _SAUPD_ERRORS = {'d': DSAUPD_ERRORS, 's': SSAUPD_ERRORS} _NAUPD_ERRORS = {'d': DNAUPD_ERRORS, 's': SNAUPD_ERRORS, 'z': ZNAUPD_ERRORS, 'c': CNAUPD_ERRORS} _SEUPD_ERRORS = {'d': DSEUPD_ERRORS, 's': SSEUPD_ERRORS} _NEUPD_ERRORS = {'d': DNEUPD_ERRORS, 's': SNEUPD_ERRORS, 'z': ZNEUPD_ERRORS, 'c': CNEUPD_ERRORS} # accepted values of parameter WHICH in _SEUPD _SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE'] # accepted values of parameter WHICH in _NAUPD _NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI'] class ArpackError(RuntimeError): """ ARPACK error """ def __init__(self, info, infodict=_NAUPD_ERRORS): msg = infodict.get(info, "Unknown error") RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg)) class ArpackNoConvergence(ArpackError): """ ARPACK iteration did not converge Attributes ---------- eigenvalues : ndarray Partial result. Converged eigenvalues. eigenvectors : ndarray Partial result. Converged eigenvectors. """ def __init__(self, msg, eigenvalues, eigenvectors): ArpackError.__init__(self, -1, {-1: msg}) self.eigenvalues = eigenvalues self.eigenvectors = eigenvectors class _ArpackParams(object): def __init__(self, n, k, tp, mode=1, sigma=None, ncv=None, v0=None, maxiter=None, which="LM", tol=0): if k <= 0: raise ValueError("k must be positive, k=%d" % k) if maxiter is None: maxiter = n * 10 if maxiter <= 0: raise ValueError("maxiter must be positive, maxiter=%d" % maxiter) if tp not in 'fdFD': raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'") if v0 is not None: # ARPACK overwrites its initial resid, make a copy self.resid = np.array(v0, copy=True) info = 1 else: self.resid = np.zeros(n, tp) info = 0 if sigma is None: #sigma not used self.sigma = 0 else: self.sigma = sigma if ncv is None: ncv = 2 * k + 1 ncv = min(ncv, n) self.v = np.zeros((n, ncv), tp) # holds Ritz vectors self.iparam = np.zeros(11, "int") # set solver mode and parameters ishfts = 1 self.mode = mode self.iparam[0] = ishfts self.iparam[2] = maxiter self.iparam[3] = 1 self.iparam[6] = mode self.n = n self.tol = tol self.k = k self.maxiter = maxiter self.ncv = ncv self.which = which self.tp = tp self.info = info self.converged = False self.ido = 0 def _raise_no_convergence(self): msg = "No convergence (%d iterations, %d/%d eigenvectors converged)" k_ok = self.iparam[4] num_iter = self.iparam[2] try: ev, vec = self.extract(True) except ArpackError as err: msg = "%s [%s]" % (msg, err) ev = np.zeros((0,)) vec = np.zeros((self.n, 0)) k_ok = 0 raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec) class _SymmetricArpackParams(_ArpackParams): def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None, Minv_matvec=None, sigma=None, ncv=None, v0=None, maxiter=None, which="LM", tol=0): # The following modes are supported: # mode = 1: # Solve the standard eigenvalue problem: # A*x = lambda*x : # A - symmetric # Arguments should be # matvec = left multiplication by A # M_matvec = None [not used] # Minv_matvec = None [not used] # # mode = 2: # Solve the general eigenvalue problem: # A*x = lambda*M*x # A - symmetric # M - symmetric positive definite # Arguments should be # matvec = left multiplication by A # M_matvec = left multiplication by M # Minv_matvec = left multiplication by M^-1 # # mode = 3: # Solve the general eigenvalue problem in shift-invert mode: # A*x = lambda*M*x # A - symmetric # M - symmetric positive semi-definite # Arguments should be # matvec = None [not used] # M_matvec = left multiplication by M # or None, if M is the identity # Minv_matvec = left multiplication by [A-sigma*M]^-1 # # mode = 4: # Solve the general eigenvalue problem in Buckling mode: # A*x = lambda*AG*x # A - symmetric positive semi-definite # AG - symmetric indefinite # Arguments should be # matvec = left multiplication by A # M_matvec = None [not used] # Minv_matvec = left multiplication by [A-sigma*AG]^-1 # # mode = 5: # Solve the general eigenvalue problem in Cayley-transformed mode: # A*x = lambda*M*x # A - symmetric # M - symmetric positive semi-definite # Arguments should be # matvec = left multiplication by A # M_matvec = left multiplication by M # or None, if M is the identity # Minv_matvec = left multiplication by [A-sigma*M]^-1 if mode == 1: if matvec is None: raise ValueError("matvec must be specified for mode=1") if M_matvec is not None: raise ValueError("M_matvec cannot be specified for mode=1") if Minv_matvec is not None: raise ValueError("Minv_matvec cannot be specified for mode=1") self.OP = matvec self.B = lambda x: x self.bmat = 'I' elif mode == 2: if matvec is None: raise ValueError("matvec must be specified for mode=2") if M_matvec is None: raise ValueError("M_matvec must be specified for mode=2") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified for mode=2") self.OP = lambda x: Minv_matvec(matvec(x)) self.OPa = Minv_matvec self.OPb = matvec self.B = M_matvec self.bmat = 'G' elif mode == 3: if matvec is not None: raise ValueError("matvec must not be specified for mode=3") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified for mode=3") if M_matvec is None: self.OP = Minv_matvec self.OPa = Minv_matvec self.B = lambda x: x self.bmat = 'I' else: self.OP = lambda x: Minv_matvec(M_matvec(x)) self.OPa = Minv_matvec self.B = M_matvec self.bmat = 'G' elif mode == 4: if matvec is None: raise ValueError("matvec must be specified for mode=4") if M_matvec is not None: raise ValueError("M_matvec must not be specified for mode=4") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified for mode=4") self.OPa = Minv_matvec self.OP = lambda x: self.OPa(matvec(x)) self.B = matvec self.bmat = 'G' elif mode == 5: if matvec is None: raise ValueError("matvec must be specified for mode=5") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified for mode=5") self.OPa = Minv_matvec self.A_matvec = matvec if M_matvec is None: self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x) self.B = lambda x: x self.bmat = 'I' else: self.OP = lambda x: Minv_matvec(matvec(x) + sigma * M_matvec(x)) self.B = M_matvec self.bmat = 'G' else: raise ValueError("mode=%i not implemented" % mode) if which not in _SEUPD_WHICH: raise ValueError("which must be one of %s" % ' '.join(_SEUPD_WHICH)) if k >= n: raise ValueError("k must be less than rank(A), k=%d" % k) _ArpackParams.__init__(self, n, k, tp, mode, sigma, ncv, v0, maxiter, which, tol) if self.ncv > n or self.ncv <= k: raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv) self.workd = np.zeros(3 * n, self.tp) self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp) ltr = _type_conv[self.tp] if ltr not in ["s", "d"]: raise ValueError("Input matrix is not real-valued.") self._arpack_solver = _arpack.__dict__[ltr + 'saupd'] self._arpack_extract = _arpack.__dict__[ltr + 'seupd'] self.iterate_infodict = _SAUPD_ERRORS[ltr] self.extract_infodict = _SEUPD_ERRORS[ltr] self.ipntr = np.zeros(11, "int") def iterate(self): self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \ self._arpack_solver(self.ido, self.bmat, self.which, self.k, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.workd, self.workl, self.info) xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n) yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n) if self.ido == -1: # initialization self.workd[yslice] = self.OP(self.workd[xslice]) elif self.ido == 1: # compute y = Op*x if self.mode == 1: self.workd[yslice] = self.OP(self.workd[xslice]) elif self.mode == 2: self.workd[xslice] = self.OPb(self.workd[xslice]) self.workd[yslice] = self.OPa(self.workd[xslice]) elif self.mode == 5: Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n) Ax = self.A_matvec(self.workd[xslice]) self.workd[yslice] = self.OPa(Ax + (self.sigma * self.workd[Bxslice])) else: Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n) self.workd[yslice] = self.OPa(self.workd[Bxslice]) elif self.ido == 2: self.workd[yslice] = self.B(self.workd[xslice]) elif self.ido == 3: raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0") else: self.converged = True if self.info == 0: pass elif self.info == 1: self._raise_no_convergence() else: raise ArpackError(self.info, infodict=self.iterate_infodict) def extract(self, return_eigenvectors): rvec = return_eigenvectors ierr = 0 howmny = 'A' # return all eigenvectors sselect = np.zeros(self.ncv, 'int') # unused d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma, self.bmat, self.which, self.k, self.tol, self.resid, self.v, self.iparam[0:7], self.ipntr, self.workd[0:2 * self.n], self.workl, ierr) if ierr != 0: raise ArpackError(ierr, infodict=self.extract_infodict) k_ok = self.iparam[4] d = d[:k_ok] z = z[:, :k_ok] if return_eigenvectors: return d, z else: return d class _UnsymmetricArpackParams(_ArpackParams): def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None, Minv_matvec=None, sigma=None, ncv=None, v0=None, maxiter=None, which="LM", tol=0): # The following modes are supported: # mode = 1: # Solve the standard eigenvalue problem: # A*x = lambda*x # A - square matrix # Arguments should be # matvec = left multiplication by A # M_matvec = None [not used] # Minv_matvec = None [not used] # # mode = 2: # Solve the generalized eigenvalue problem: # A*x = lambda*M*x # A - square matrix # M - symmetric, positive semi-definite # Arguments should be # matvec = left multiplication by A # M_matvec = left multiplication by M # Minv_matvec = left multiplication by M^-1 # # mode = 3,4: # Solve the general eigenvalue problem in shift-invert mode: # A*x = lambda*M*x # A - square matrix # M - symmetric, positive semi-definite # Arguments should be # matvec = None [not used] # M_matvec = left multiplication by M # or None, if M is the identity # Minv_matvec = left multiplication by [A-sigma*M]^-1 # if A is real and mode==3, use the real part of Minv_matvec # if A is real and mode==4, use the imag part of Minv_matvec # if A is complex and mode==3, # use real and imag parts of Minv_matvec if mode == 1: if matvec is None: raise ValueError("matvec must be specified for mode=1") if M_matvec is not None: raise ValueError("M_matvec cannot be specified for mode=1") if Minv_matvec is not None: raise ValueError("Minv_matvec cannot be specified for mode=1") self.OP = matvec self.B = lambda x: x self.bmat = 'I' elif mode == 2: if matvec is None: raise ValueError("matvec must be specified for mode=2") if M_matvec is None: raise ValueError("M_matvec must be specified for mode=2") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified for mode=2") self.OP = lambda x: Minv_matvec(matvec(x)) self.OPa = Minv_matvec self.OPb = matvec self.B = M_matvec self.bmat = 'G' elif mode in (3, 4): if matvec is None: raise ValueError("matvec must be specified " "for mode in (3,4)") if Minv_matvec is None: raise ValueError("Minv_matvec must be specified " "for mode in (3,4)") self.matvec = matvec if tp in 'DF': # complex type if mode == 3: self.OPa = Minv_matvec else: raise ValueError("mode=4 invalid for complex A") else: # real type if mode == 3: self.OPa = lambda x: np.real(Minv_matvec(x)) else: self.OPa = lambda x: np.imag(Minv_matvec(x)) if M_matvec is None: self.B = lambda x: x self.bmat = 'I' self.OP = self.OPa else: self.B = M_matvec self.bmat = 'G' self.OP = lambda x: self.OPa(M_matvec(x)) else: raise ValueError("mode=%i not implemented" % mode) if which not in _NEUPD_WHICH: raise ValueError("Parameter which must be one of %s" % ' '.join(_NEUPD_WHICH)) if k >= n - 1: raise ValueError("k must be less than rank(A)-1, k=%d" % k) _ArpackParams.__init__(self, n, k, tp, mode, sigma, ncv, v0, maxiter, which, tol) if self.ncv > n or self.ncv <= k + 1: raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv) self.workd = np.zeros(3 * n, self.tp) self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp) ltr = _type_conv[self.tp] self._arpack_solver = _arpack.__dict__[ltr + 'naupd'] self._arpack_extract = _arpack.__dict__[ltr + 'neupd'] self.iterate_infodict = _NAUPD_ERRORS[ltr] self.extract_infodict = _NEUPD_ERRORS[ltr] self.ipntr = np.zeros(14, "int") if self.tp in 'FD': self.rwork = np.zeros(self.ncv, self.tp.lower()) else: self.rwork = None def iterate(self): if self.tp in 'fd': self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\ self._arpack_solver(self.ido, self.bmat, self.which, self.k, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.workd, self.workl, self.info) else: self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\ self._arpack_solver(self.ido, self.bmat, self.which, self.k, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.workd, self.workl, self.rwork, self.info) xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n) yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n) if self.ido == -1: # initialization self.workd[yslice] = self.OP(self.workd[xslice]) elif self.ido == 1: # compute y = Op*x if self.mode in (1, 2): self.workd[yslice] = self.OP(self.workd[xslice]) else: Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n) self.workd[yslice] = self.OPa(self.workd[Bxslice]) elif self.ido == 2: self.workd[yslice] = self.B(self.workd[xslice]) elif self.ido == 3: raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0") else: self.converged = True if self.info == 0: pass elif self.info == 1: self._raise_no_convergence() else: raise ArpackError(self.info, infodict=self.iterate_infodict) def extract(self, return_eigenvectors): k, n = self.k, self.n ierr = 0 howmny = 'A' # return all eigenvectors sselect = np.zeros(self.ncv, 'int') # unused sigmar = np.real(self.sigma) sigmai = np.imag(self.sigma) workev = np.zeros(3 * self.ncv, self.tp) if self.tp in 'fd': dr = np.zeros(k + 1, self.tp) di = np.zeros(k + 1, self.tp) zr = np.zeros((n, k + 1), self.tp) dr, di, zr, ierr = \ self._arpack_extract( return_eigenvectors, howmny, sselect, sigmar, sigmai, workev, self.bmat, self.which, k, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.workd, self.workl, self.info) if ierr != 0: raise ArpackError(ierr, infodict=self.extract_infodict) nreturned = self.iparam[4] # number of good eigenvalues returned # Build complex eigenvalues from real and imaginary parts d = dr + 1.0j * di # Arrange the eigenvectors: complex eigenvectors are stored as # real,imaginary in consecutive columns z = zr.astype(self.tp.upper()) # The ARPACK nonsymmetric real and double interface (s,d)naupd # return eigenvalues and eigenvectors in real (float,double) # arrays. # Efficiency: this should check that return_eigenvectors == True # before going through this construction. if sigmai == 0: i = 0 while i <= k: # check if complex if abs(d[i].imag) != 0: # this is a complex conjugate pair with eigenvalues # in consecutive columns if i < k: z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1] z[:, i + 1] = z[:, i].conjugate() i += 1 else: #last eigenvalue is complex: the imaginary part of # the eigenvector has not been returned #this can only happen if nreturned > k, so we'll # throw out this case. nreturned -= 1 i += 1 else: # real matrix, mode 3 or 4, imag(sigma) is nonzero: # see remark 3 in <s,d>neupd.f # Build complex eigenvalues from real and imaginary parts i = 0 while i <= k: if abs(d[i].imag) == 0: d[i] = np.dot(zr[:, i], self.matvec(zr[:, i])) else: if i < k: z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1] z[:, i + 1] = z[:, i].conjugate() d[i] = ((np.dot(zr[:, i], self.matvec(zr[:, i])) + np.dot(zr[:, i + 1], self.matvec(zr[:, i + 1]))) + 1j * (np.dot(zr[:, i], self.matvec(zr[:, i + 1])) - np.dot(zr[:, i + 1], self.matvec(zr[:, i])))) d[i + 1] = d[i].conj() i += 1 else: #last eigenvalue is complex: the imaginary part of # the eigenvector has not been returned #this can only happen if nreturned > k, so we'll # throw out this case. nreturned -= 1 i += 1 # Now we have k+1 possible eigenvalues and eigenvectors # Return the ones specified by the keyword "which" if nreturned <= k: # we got less or equal as many eigenvalues we wanted d = d[:nreturned] z = z[:, :nreturned] else: # we got one extra eigenvalue (likely a cc pair, but which?) # cut at approx precision for sorting rd = np.round(d, decimals=_ndigits[self.tp]) if self.which in ['LR', 'SR']: ind = np.argsort(rd.real) elif self.which in ['LI', 'SI']: # for LI,SI ARPACK returns largest,smallest # abs(imaginary) why? ind = np.argsort(abs(rd.imag)) else: ind = np.argsort(abs(rd)) if self.which in ['LR', 'LM', 'LI']: d = d[ind[-k:]] z = z[:, ind[-k:]] if self.which in ['SR', 'SM', 'SI']: d = d[ind[:k]] z = z[:, ind[:k]] else: # complex is so much simpler... d, z, ierr =\ self._arpack_extract( return_eigenvectors, howmny, sselect, self.sigma, workev, self.bmat, self.which, k, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.workd, self.workl, self.rwork, ierr) if ierr != 0: raise ArpackError(ierr, infodict=self.extract_infodict) k_ok = self.iparam[4] d = d[:k_ok] z = z[:, :k_ok] if return_eigenvectors: return d, z else: return d def _aslinearoperator_with_dtype(m): m = aslinearoperator(m) if not hasattr(m, 'dtype'): x = np.zeros(m.shape[1]) m.dtype = (m * x).dtype return m class SpLuInv(LinearOperator): """ SpLuInv: helper class to repeatedly solve M*x=b using a sparse LU-decopposition of M """ def __init__(self, M): self.M_lu = splu(M) LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype) self.isreal = not np.issubdtype(self.dtype, np.complexfloating) def _matvec(self, x): # careful here: splu.solve will throw away imaginary # part of x if M is real if self.isreal and np.issubdtype(x.dtype, np.complexfloating): return (self.M_lu.solve(np.real(x)) + 1j * self.M_lu.solve(np.imag(x))) else: return self.M_lu.solve(x) class LuInv(LinearOperator): """ LuInv: helper class to repeatedly solve M*x=b using an LU-decomposition of M """ def __init__(self, M): self.M_lu = lu_factor(M) LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype) def _matvec(self, x): return lu_solve(self.M_lu, x) class IterInv(LinearOperator): """ IterInv: helper class to repeatedly solve M*x=b using an iterative method. """ def __init__(self, M, ifunc=gmres, tol=0): if tol <= 0: # when tol=0, ARPACK uses machine tolerance as calculated # by LAPACK's _LAMCH function. We should match this tol = np.finfo(M.dtype).eps self.M = M self.ifunc = ifunc self.tol = tol if hasattr(M, 'dtype'): dtype = M.dtype else: x = np.zeros(M.shape[1]) dtype = (M * x).dtype LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype) def _matvec(self, x): b, info = self.ifunc(self.M, x, tol=self.tol) if info != 0: raise ValueError("Error in inverting M: function " "%s did not converge (info = %i)." % (self.ifunc.__name__, info)) return b class IterOpInv(LinearOperator): """ IterOpInv: helper class to repeatedly solve [A-sigma*M]*x = b using an iterative method """ def __init__(self, A, M, sigma, ifunc=gmres, tol=0): if tol <= 0: # when tol=0, ARPACK uses machine tolerance as calculated # by LAPACK's _LAMCH function. We should match this tol = np.finfo(A.dtype).eps self.A = A self.M = M self.sigma = sigma self.ifunc = ifunc self.tol = tol x = np.zeros(A.shape[1]) if M is None: dtype = self.mult_func_M_None(x).dtype self.OP = LinearOperator(self.A.shape, self.mult_func_M_None, dtype=dtype) else: dtype = self.mult_func(x).dtype self.OP = LinearOperator(self.A.shape, self.mult_func, dtype=dtype) LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype) def mult_func(self, x): return self.A.matvec(x) - self.sigma * self.M.matvec(x) def mult_func_M_None(self, x): return self.A.matvec(x) - self.sigma * x def _matvec(self, x): b, info = self.ifunc(self.OP, x, tol=self.tol) if info != 0: raise ValueError("Error in inverting [A-sigma*M]: function " "%s did not converge (info = %i)." % (self.ifunc.__name__, info)) return b def get_inv_matvec(M, symmetric=False, tol=0): if isdense(M): return LuInv(M).matvec elif isspmatrix(M): if isspmatrix_csr(M) and symmetric: M = M.T return SpLuInv(M).matvec else: return IterInv(M, tol=tol).matvec def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0): if sigma == 0: return get_inv_matvec(A, symmetric=symmetric, tol=tol) if M is None: #M is the identity matrix if isdense(A): if (np.issubdtype(A.dtype, np.complexfloating) or np.imag(sigma) == 0): A = np.copy(A) else: A = A + 0j A.flat[::A.shape[1] + 1] -= sigma return LuInv(A).matvec elif isspmatrix(A): A = A - sigma * identity(A.shape[0]) if symmetric and isspmatrix_csr(A): A = A.T return SpLuInv(A.tocsc()).matvec else: return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma, tol=tol).matvec else: if ((not isdense(A) and not isspmatrix(A)) or (not isdense(M) and not isspmatrix(M))): return IterOpInv(_aslinearoperator_with_dtype(A), _aslinearoperator_with_dtype(M), sigma, tol=tol).matvec elif isdense(A) or isdense(M): return LuInv(A - sigma * M).matvec else: OP = A - sigma * M if symmetric and isspmatrix_csr(OP): OP = OP.T return SpLuInv(OP.tocsc()).matvec def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None, maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None, OPpart=None): """ Find k eigenvalues and eigenvectors of the square matrix A. Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for w[i] eigenvalues with corresponding eigenvectors x[i]. If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the generalized eigenvalue problem for w[i] eigenvalues with corresponding eigenvectors x[i] Parameters ---------- A : An N x N matrix, array, sparse matrix, or LinearOperator representing \ the operation A * x, where A is a real or complex square matrix. k : int, default 6 The number of eigenvalues and eigenvectors desired. `k` must be smaller than N. It is not possible to compute all eigenvectors of a matrix. return_eigenvectors : boolean, default True Whether to return the eigenvectors along with the eigenvalues. M : An N x N matrix, array, sparse matrix, or LinearOperator representing the operation M*x for the generalized eigenvalue problem ``A * x = w * M * x`` M must represent a real symmetric matrix. For best results, M should be of the same type as A. Additionally: * If sigma==None, M is positive definite * If sigma is specified, M is positive semi-definite If sigma==None, eigs requires an operator to compute the solution of the linear equation `M * x = b`. This is done internally via a (sparse) LU decomposition for an explicit matrix M, or via an iterative solver for a general linear operator. Alternatively, the user can supply the matrix or operator Minv, which gives x = Minv * b = M^-1 * b sigma : real or complex Find eigenvalues near sigma using shift-invert mode. This requires an operator to compute the solution of the linear system `[A - sigma * M] * x = b`, where M is the identity matrix if unspecified. This is computed internally via a (sparse) LU decomposition for explicit matrices A & M, or via an iterative solver if either A or M is a general linear operator. Alternatively, the user can supply the matrix or operator OPinv, which gives x = OPinv * b = [A - sigma * M]^-1 * b. For a real matrix A, shift-invert can either be done in imaginary mode or real mode, specified by the parameter OPpart ('r' or 'i'). Note that when sigma is specified, the keyword 'which' (below) refers to the shifted eigenvalues w'[i] where: * If A is real and OPpart == 'r' (default), w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ] * If A is real and OPpart == 'i', w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ] * If A is complex, w'[i] = 1/(w[i]-sigma) v0 : array Starting vector for iteration. ncv : integer The number of Lanczos vectors generated `ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``. which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI'] Which `k` eigenvectors and eigenvalues to find: - 'LM' : largest magnitude - 'SM' : smallest magnitude - 'LR' : largest real part - 'SR' : smallest real part - 'LI' : largest imaginary part - 'SI' : smallest imaginary part When sigma != None, 'which' refers to the shifted eigenvalues w'[i] (see discussion in 'sigma', above). ARPACK is generally better at finding large values than small values. If small eigenvalues are desired, consider using shift-invert mode for better performance. maxiter : integer Maximum number of Arnoldi update iterations allowed tol : float Relative accuracy for eigenvalues (stopping criterion) The default value of 0 implies machine precision. return_eigenvectors : boolean Return eigenvectors (True) in addition to eigenvalues Minv : N x N matrix, array, sparse matrix, or linear operator See notes in M, above. OPinv : N x N matrix, array, sparse matrix, or linear operator See notes in sigma, above. OPpart : 'r' or 'i'. See notes in sigma, above Returns ------- w : array Array of k eigenvalues. v : array An array of `k` eigenvectors. ``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i]. Raises ------ ArpackNoConvergence When the requested convergence is not obtained. The currently converged eigenvalues and eigenvectors can be found as ``eigenvalues`` and ``eigenvectors`` attributes of the exception object. See Also -------- eigsh : eigenvalues and eigenvectors for symmetric matrix A svds : singular value decomposition for a matrix A Examples -------- Find 6 eigenvectors of the identity matrix: >>> from sklearn.utils.arpack import eigs >>> id = np.identity(13) >>> vals, vecs = eigs(id, k=6) >>> vals array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j]) >>> vecs.shape (13, 6) Notes ----- This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD, ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to find the eigenvalues and eigenvectors [2]_. References ---------- .. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/ .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE: Solution of Large Scale Eigenvalue Problems by Implicitly Restarted Arnoldi Methods. SIAM, Philadelphia, PA, 1998. """ if A.shape[0] != A.shape[1]: raise ValueError('expected square matrix (shape=%s)' % (A.shape,)) if M is not None: if M.shape != A.shape: raise ValueError('wrong M dimensions %s, should be %s' % (M.shape, A.shape)) if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower(): warnings.warn('M does not have the same type precision as A. ' 'This may adversely affect ARPACK convergence') n = A.shape[0] if k <= 0 or k >= n: raise ValueError("k must be between 1 and rank(A)-1") if sigma is None: matvec = _aslinearoperator_with_dtype(A).matvec if OPinv is not None: raise ValueError("OPinv should not be specified " "with sigma = None.") if OPpart is not None: raise ValueError("OPpart should not be specified with " "sigma = None or complex A") if M is None: #standard eigenvalue problem mode = 1 M_matvec = None Minv_matvec = None if Minv is not None: raise ValueError("Minv should not be " "specified with M = None.") else: #general eigenvalue problem mode = 2 if Minv is None: Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol) else: Minv = _aslinearoperator_with_dtype(Minv) Minv_matvec = Minv.matvec M_matvec = _aslinearoperator_with_dtype(M).matvec else: #sigma is not None: shift-invert mode if np.issubdtype(A.dtype, np.complexfloating): if OPpart is not None: raise ValueError("OPpart should not be specified " "with sigma=None or complex A") mode = 3 elif OPpart is None or OPpart.lower() == 'r': mode = 3 elif OPpart.lower() == 'i': if np.imag(sigma) == 0: raise ValueError("OPpart cannot be 'i' if sigma is real") mode = 4 else: raise ValueError("OPpart must be one of ('r','i')") matvec = _aslinearoperator_with_dtype(A).matvec if Minv is not None: raise ValueError("Minv should not be specified when sigma is") if OPinv is None: Minv_matvec = get_OPinv_matvec(A, M, sigma, symmetric=False, tol=tol) else: OPinv = _aslinearoperator_with_dtype(OPinv) Minv_matvec = OPinv.matvec if M is None: M_matvec = None else: M_matvec = _aslinearoperator_with_dtype(M).matvec params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode, M_matvec, Minv_matvec, sigma, ncv, v0, maxiter, which, tol) while not params.converged: params.iterate() return params.extract(return_eigenvectors) def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None, maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None, mode='normal'): """ Find k eigenvalues and eigenvectors of the real symmetric square matrix or complex hermitian matrix A. Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for w[i] eigenvalues with corresponding eigenvectors x[i]. If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the generalized eigenvalue problem for w[i] eigenvalues with corresponding eigenvectors x[i] Parameters ---------- A : An N x N matrix, array, sparse matrix, or LinearOperator representing the operation A * x, where A is a real symmetric matrix For buckling mode (see below) A must additionally be positive-definite k : integer The number of eigenvalues and eigenvectors desired. `k` must be smaller than N. It is not possible to compute all eigenvectors of a matrix. M : An N x N matrix, array, sparse matrix, or linear operator representing the operation M * x for the generalized eigenvalue problem ``A * x = w * M * x``. M must represent a real, symmetric matrix. For best results, M should be of the same type as A. Additionally: * If sigma == None, M is symmetric positive definite * If sigma is specified, M is symmetric positive semi-definite * In buckling mode, M is symmetric indefinite. If sigma == None, eigsh requires an operator to compute the solution of the linear equation `M * x = b`. This is done internally via a (sparse) LU decomposition for an explicit matrix M, or via an iterative solver for a general linear operator. Alternatively, the user can supply the matrix or operator Minv, which gives x = Minv * b = M^-1 * b sigma : real Find eigenvalues near sigma using shift-invert mode. This requires an operator to compute the solution of the linear system `[A - sigma * M] x = b`, where M is the identity matrix if unspecified. This is computed internally via a (sparse) LU decomposition for explicit matrices A & M, or via an iterative solver if either A or M is a general linear operator. Alternatively, the user can supply the matrix or operator OPinv, which gives x = OPinv * b = [A - sigma * M]^-1 * b. Note that when sigma is specified, the keyword 'which' refers to the shifted eigenvalues w'[i] where: - if mode == 'normal', w'[i] = 1 / (w[i] - sigma) - if mode == 'cayley', w'[i] = (w[i] + sigma) / (w[i] - sigma) - if mode == 'buckling', w'[i] = w[i] / (w[i] - sigma) (see further discussion in 'mode' below) v0 : array Starting vector for iteration. ncv : integer The number of Lanczos vectors generated ncv must be greater than k and smaller than n; it is recommended that ncv > 2*k which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE'] If A is a complex hermitian matrix, 'BE' is invalid. Which `k` eigenvectors and eigenvalues to find - 'LM' : Largest (in magnitude) eigenvalues - 'SM' : Smallest (in magnitude) eigenvalues - 'LA' : Largest (algebraic) eigenvalues - 'SA' : Smallest (algebraic) eigenvalues - 'BE' : Half (k/2) from each end of the spectrum When k is odd, return one more (k/2+1) from the high end When sigma != None, 'which' refers to the shifted eigenvalues w'[i] (see discussion in 'sigma', above). ARPACK is generally better at finding large values than small values. If small eigenvalues are desired, consider using shift-invert mode for better performance. maxiter : integer Maximum number of Arnoldi update iterations allowed tol : float Relative accuracy for eigenvalues (stopping criterion). The default value of 0 implies machine precision. Minv : N x N matrix, array, sparse matrix, or LinearOperator See notes in M, above OPinv : N x N matrix, array, sparse matrix, or LinearOperator See notes in sigma, above. return_eigenvectors : boolean Return eigenvectors (True) in addition to eigenvalues mode : string ['normal' | 'buckling' | 'cayley'] Specify strategy to use for shift-invert mode. This argument applies only for real-valued A and sigma != None. For shift-invert mode, ARPACK internally solves the eigenvalue problem ``OP * x'[i] = w'[i] * B * x'[i]`` and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i] into the desired eigenvectors and eigenvalues of the problem ``A * x[i] = w[i] * M * x[i]``. The modes are as follows: - 'normal' : OP = [A - sigma * M]^-1 * M B = M w'[i] = 1 / (w[i] - sigma) - 'buckling' : OP = [A - sigma * M]^-1 * A B = A w'[i] = w[i] / (w[i] - sigma) - 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M] B = M w'[i] = (w[i] + sigma) / (w[i] - sigma) The choice of mode will affect which eigenvalues are selected by the keyword 'which', and can also impact the stability of convergence (see [2] for a discussion) Returns ------- w : array Array of k eigenvalues v : array An array of k eigenvectors The v[i] is the eigenvector corresponding to the eigenvector w[i] Raises ------ ArpackNoConvergence When the requested convergence is not obtained. The currently converged eigenvalues and eigenvectors can be found as ``eigenvalues`` and ``eigenvectors`` attributes of the exception object. See Also -------- eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A svds : singular value decomposition for a matrix A Notes ----- This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD functions which use the Implicitly Restarted Lanczos Method to find the eigenvalues and eigenvectors [2]_. Examples -------- >>> from sklearn.utils.arpack import eigsh >>> id = np.identity(13) >>> vals, vecs = eigsh(id, k=6) >>> vals # doctest: +SKIP array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j]) >>> print(vecs.shape) (13, 6) References ---------- .. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/ .. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE: Solution of Large Scale Eigenvalue Problems by Implicitly Restarted Arnoldi Methods. SIAM, Philadelphia, PA, 1998. """ # complex hermitian matrices should be solved with eigs if np.issubdtype(A.dtype, np.complexfloating): if mode != 'normal': raise ValueError("mode=%s cannot be used with " "complex matrix A" % mode) if which == 'BE': raise ValueError("which='BE' cannot be used with complex matrix A") elif which == 'LA': which = 'LR' elif which == 'SA': which = 'SR' ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0, ncv=ncv, maxiter=maxiter, tol=tol, return_eigenvectors=return_eigenvectors, Minv=Minv, OPinv=OPinv) if return_eigenvectors: return ret[0].real, ret[1] else: return ret.real if A.shape[0] != A.shape[1]: raise ValueError('expected square matrix (shape=%s)' % (A.shape,)) if M is not None: if M.shape != A.shape: raise ValueError('wrong M dimensions %s, should be %s' % (M.shape, A.shape)) if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower(): warnings.warn('M does not have the same type precision as A. ' 'This may adversely affect ARPACK convergence') n = A.shape[0] if k <= 0 or k >= n: raise ValueError("k must be between 1 and rank(A)-1") if sigma is None: A = _aslinearoperator_with_dtype(A) matvec = A.matvec if OPinv is not None: raise ValueError("OPinv should not be specified " "with sigma = None.") if M is None: #standard eigenvalue problem mode = 1 M_matvec = None Minv_matvec = None if Minv is not None: raise ValueError("Minv should not be " "specified with M = None.") else: #general eigenvalue problem mode = 2 if Minv is None: Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol) else: Minv = _aslinearoperator_with_dtype(Minv) Minv_matvec = Minv.matvec M_matvec = _aslinearoperator_with_dtype(M).matvec else: # sigma is not None: shift-invert mode if Minv is not None: raise ValueError("Minv should not be specified when sigma is") # normal mode if mode == 'normal': mode = 3 matvec = None if OPinv is None: Minv_matvec = get_OPinv_matvec(A, M, sigma, symmetric=True, tol=tol) else: OPinv = _aslinearoperator_with_dtype(OPinv) Minv_matvec = OPinv.matvec if M is None: M_matvec = None else: M = _aslinearoperator_with_dtype(M) M_matvec = M.matvec # buckling mode elif mode == 'buckling': mode = 4 if OPinv is None: Minv_matvec = get_OPinv_matvec(A, M, sigma, symmetric=True, tol=tol) else: Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec matvec = _aslinearoperator_with_dtype(A).matvec M_matvec = None # cayley-transform mode elif mode == 'cayley': mode = 5 matvec = _aslinearoperator_with_dtype(A).matvec if OPinv is None: Minv_matvec = get_OPinv_matvec(A, M, sigma, symmetric=True, tol=tol) else: Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec if M is None: M_matvec = None else: M_matvec = _aslinearoperator_with_dtype(M).matvec # unrecognized mode else: raise ValueError("unrecognized mode '%s'" % mode) params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode, M_matvec, Minv_matvec, sigma, ncv, v0, maxiter, which, tol) while not params.converged: params.iterate() return params.extract(return_eigenvectors) def _svds(A, k=6, ncv=None, tol=0): """Compute k singular values/vectors for a sparse matrix using ARPACK. Parameters ---------- A : sparse matrix Array to compute the SVD on k : int, optional Number of singular values and vectors to compute. ncv : integer The number of Lanczos vectors generated ncv must be greater than k+1 and smaller than n; it is recommended that ncv > 2*k tol : float, optional Tolerance for singular values. Zero (default) means machine precision. Notes ----- This is a naive implementation using an eigensolver on A.H * A or A * A.H, depending on which one is more efficient. """ if not (isinstance(A, np.ndarray) or isspmatrix(A)): A = np.asarray(A) n, m = A.shape if np.issubdtype(A.dtype, np.complexfloating): herm = lambda x: x.T.conjugate() eigensolver = eigs else: herm = lambda x: x.T eigensolver = eigsh if n > m: X = A XH = herm(A) else: XH = A X = herm(A) if hasattr(XH, 'dot'): def matvec_XH_X(x): return XH.dot(X.dot(x)) else: def matvec_XH_X(x): return np.dot(XH, np.dot(X, x)) XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype, shape=(X.shape[1], X.shape[1])) # Ignore deprecation warnings here: dot on matrices is deprecated, # but this code is a backport anyhow with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2) s = np.sqrt(eigvals) if n > m: v = eigvec if hasattr(X, 'dot'): u = X.dot(v) / s else: u = np.dot(X, v) / s vh = herm(v) else: u = eigvec if hasattr(X, 'dot'): vh = herm(X.dot(u) / s) else: vh = herm(np.dot(X, u) / s) return u, s, vh # check if backport is actually needed: if scipy.version.version >= LooseVersion('0.10'): from scipy.sparse.linalg import eigs, eigsh, svds else: eigs, eigsh, svds = _eigs, _eigsh, _svds
bsd-3-clause
Pi2-FGABreja/FGABreja-Pi
FGABrejaAPI/monitoring/views.py
2
1354
from django.views.generic import View from django.http import HttpResponse from .models import Sensor from django.core import serializers from controlling.models import Process import json class SensorView(View): http_method_names = [u'get'] def get(self, request, sensor_id=None): if sensor_id: sensors = Sensor.read.filter(id=sensor_id) else: sensors = Sensor.read.all() data = serializers.serialize("json", sensors) return HttpResponse(data, content_type='application/json') def get_thermal_sensors(request): sensors = Sensor.read.thermal() data = serializers.serialize("json", sensors) return HttpResponse(data, content_type='application/json') def get_average_temperature(request): average = Process.current().last_temperature data = {'average': float(average), 'sensor_type': 'thermal'} return HttpResponse(json.dumps(data), content_type='application/json') def get_level_sensors(request): sensors = Sensor.read.level() data = serializers.serialize("json", sensors) return HttpResponse(data, content_type='application/json') def get_ldr_sensors(request): sensors = Sensor.read.ldr() data = serializers.serialize("json", sensors) return HttpResponse(data, content_type='application/json')
gpl-3.0
sdbonin/SOQresearch
SOQswapRK4.py
1
8364
# -*- coding: utf-8 -*- """ This code uses a loop along with our set of coupled differential equations and matrix math to create arrays of 4-vector quaternions. The old plotting functions need to be updated and incorperated into the end of this code or a better visualization solution needs to be found. """ #------------------------------------------------------------------------------ # Importing modules and copying functions # AKA "Setting stuff up" #------------------------------------------------------------------------------ import numpy as np from time import time as checktime # a set of init quaternions and the identity matrix for building general q-matrices rm = np.identity(2) im = np.array([[-1j,0],[0,1j]]) jm = np.array([[0,1],[-1,0]]) km = np.array([[0,-1j],[-1j,0]]) def vec_mat(v): ''' Converts a quaternion vector into the 2x2 imaginary matrix representation ''' return v[0]*rm + v[1]*im + v[2]*jm + v[3]*km def mat_vec(M): ''' Converts a 2x2 imaginary matrix quaternion into its vector representation ''' return np.array([ M[1,1].real , M[1,1].imag , M[0,1].real , -M[0,1].imag ]) def qvecmult(vec1,vec2): ''' Multiplies two 4-vector quaternions via matrix math ''' return mat_vec(np.dot(vec_mat(vec1),vec_mat(vec2))) def qmatcon(M): ''' conjugates a 2x2 imaginary matrix quaternion ''' return vec_mat(mat_vec(M)*np.array([1,-1,-1,-1])) def qveccon(vec): ''' conjugates 4-vector quaternion ''' return vec*np.array([1,-1,-1,-1]) def qvecnorm(vec): ''' normalizes a 4-vector quaternion ''' return vec/np.sqrt(qvecmult(qveccon(vec),vec)[0]) def qmatnorm(M): ''' piggy-backs off the previous function to normalize 2x2 imaginary matrices ''' return vec_mat(qvecnorm(mat_vec(M))) def qvecmagsqr(vec): ''' returns the magnitude squared of a 4-vector quaternion ''' return qvecmult(qveccon(vec),vec)[0] def qmatmagsqr(M): ''' piggy-backs off the previous function to give the magnitude squared of 2x2 imaginary matrix quaternions ''' return qvecmagsqr(mat_vec(M)) #------------------------------------------------------------------------------ # Defining the differential equations # AKA "Bringing (first) order to the universe" #------------------------------------------------------------------------------ def q1_dot(q1,q2,p1,p2,a): ''' takes the current value of things that we know and calculates derivatives Function assumes 2x2 complex matrices as inputs for q1,q2,p1,p2 a is the coupling constant ''' return (p1 - a*np.dot(q1,np.dot(qmatcon(q2),p2))) \ #/(1. - qmatmagsqr(q1)*qmatmagsqr(q2)*a**2) def p1_dot(q1,q2,q1dot,q2dot,a,w): ''' takes the current values of things we know and the hopefully recently calculated derivatives of q1,q2 and uses them to find other derivatives ''' return a*np.dot(q1dot,np.dot(qmatcon(q2dot),q2)) - q1*w**2 #------------------------------------------------------------------------------ # Defining necessary constants and initial conditions # AKA "on the first day..." #------------------------------------------------------------------------------ w = 1. # \omega_0 in our notation a = 0.01 # coupling constant. \alpha in our notation print 'alpha =',a seed = 42 np.random.seed(seed) print 'seed =',seed q1 = vec_mat([1,0,0,0]) q2 = vec_mat([1,0,0,0]) p1 = np.random.rand(4) p2 = np.random.rand(4) p1[0] = 0 p2[0] = 0 p1 = vec_mat(p1) p2 = vec_mat(p2) q1 = qmatnorm(q1) q2 = qmatnorm(q2) p1 = qmatnorm(p1) p2 = qmatnorm(p2) #------------------------------------------------------------------------------ # Defining loop parameters # AKA "Configuring the space-time continuum" #------------------------------------------------------------------------------ dt = 0.01 #time step t = 0 print 'dt = ',dt q1a = [mat_vec(q1)] p1a = [mat_vec(p1)] s1a = [mat_vec(np.dot(qmatcon(p1),q1))] q2a = [mat_vec(q2)] p2a = [mat_vec(p2)] s2a = [mat_vec(np.dot(qmatcon(p2),q2))] time = [t] swaptime = 0.8785/a #determined 'experimentally' #------------------------------------------------------------------------------ # Checking conserved quantity # AKA "might as well..." #------------------------------------------------------------------------------ con = [] #checking to see if our conserved quantity is actually conserved def conserved(q1,q2,p1,p2): return np.dot(qmatcon(p1),q1) + np.dot(qmatcon(p2),q2) #------------------------------------------------------------------------------ # Creating the time loop # AKA "Let 'er rip" #------------------------------------------------------------------------------ runtime = checktime() while t<swaptime: ''' This integrator works on an RK4 algorithm. For a good explaination, see wikipedia https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods note that the algorithm is modified slightly to fit our function ''' q1k1 = q1_dot(q1,q2,p1,p2,a) q2k1 = q1_dot(q2,q1,p2,p1,a) p1k1 = p1_dot(q1,q2,q1k1,q2k1,a,w) p2k1 = p1_dot(q2,q1,q2k1,q1k1,a,w) q1k2 = q1_dot(q1+q1k1*dt/2.,q2+q2k1*dt/2.,p1+p1k1*dt/2.,p2+p2k1*dt/2.,a) q2k2 = q1_dot(q2+q2k1*dt/2.,q1+q1k1*dt/2.,p2+p2k1*dt/2.,p1+p1k1*dt/2.,a) p1k2 = p1_dot(q1+q1k1*dt/2.,q2+q2k1*dt/2.,q1k1,q2k1,a,w) p2k2 = p1_dot(q2+q2k1*dt/2.,q1+q1k1*dt/2.,q2k1,q1k1,a,w) q1k3 = q1_dot(q1+q1k2*dt/2.,q2+q2k2*dt/2.,p1+p1k2*dt/2.,p2+p2k2*dt/2.,a) q2k3 = q1_dot(q2+q2k2*dt/2.,q1+q1k2*dt/2.,p2+p2k2*dt/2.,p1+p1k2*dt/2.,a) p1k3 = p1_dot(q1+q1k2*dt/2.,q2+q2k2*dt/2.,q1k1,q2k1,a,w) p2k3 = p1_dot(q2+q2k2*dt/2.,q1+q1k2*dt/2.,q2k1,q1k1,a,w) q1k4 = q1_dot(q1+q1k3*dt,q2+q2k3*dt,p1+p1k3*dt,p2+p2k3*dt,a) q2k4 = q1_dot(q2+q2k3*dt,q1+q1k3*dt,p2+p2k3*dt,p1+p1k3*dt,a) p1k4 = p1_dot(q1+q1k3*dt,q2+q2k3*dt,q1k1,q2k1,a,w) p2k4 = p1_dot(q2+q2k3*dt,q1+q1k3*dt,q2k1,q1k1,a,w) q1 += (q1k1 + 2*q1k2 + 2*q1k3 + q1k4)*dt/6. q2 += (q2k1 + 2*q2k2 + 2*q2k3 + q2k4)*dt/6. p1 += (p1k1 + 2*p1k2 + 2*p1k3 + p1k4)*dt/6. p2 += (p2k1 + 2*p2k2 + 2*p2k3 + p2k4)*dt/6. t += dt q1a.append(mat_vec(q1)) p1a.append(mat_vec(p1)) s1a.append(mat_vec(np.dot(qmatcon(p1),q1))) q2a.append(mat_vec(q2)) p2a.append(mat_vec(p2)) s2a.append(mat_vec(np.dot(qmatcon(p2),q2))) time.append(t) runtime = checktime() - runtime q1a = np.array(q1a) q2a = np.array(q2a) p1a = np.array(p1a) p2a = np.array(p2a) s1a = np.array(s1a) s2a = np.array(s2a) time = np.array(time) #------------------------------------------------------------------------------ # Plotting things # AKA "Can we see it now?" #------------------------------------------------------------------------------ import matplotlib.pyplot as plt def vecplot(thing,time,name): plt.clf() plt.title(name) plt.plot(time,thing[:,0],label='Real', color = 'black') plt.plot(time,thing[:,1],label='i', color = 'red') plt.plot(time,thing[:,2],label='j', color = 'green') plt.plot(time,thing[:,3],label='k', color = 'blue') plt.legend(loc='best') plt.xlim([time[0], time[-1]]) plt.grid() plt.show() def scalarplot(thing,time,name): plt.clf() plt.title(name) plt.plot(time,thing,color = 'black') plt.grid() plt.xlim([time[0], time[-1]]) plt.show() vecplot(q1a,time,'$q_1$') vecplot(q2a,time,'$q_2$') vecplot(p1a,time,'$p_1$') vecplot(p2a,time,'$p_2$') vecplot(s1a,time,'$p_1^{\dagger}q_1$') vecplot(s2a,time,'$p_2^{\dagger}q_2$') print 'Initial:' print 'q1 = ', q1a[0] print 'q2 = ', q2a[0] print 'p1 = ', p1a[0] print 'p2 = ', p2a[0] print 's1 = ', s1a[0] print 's2 = ', s2a[0] print 'Final:' print 'q1 = ', q1a[-1] print 'q2 = ', q2a[-1] print 'p1 = ', p1a[-1] print 'p2 = ', p2a[-1] print 's1 = ', s1a[-1] print 's2 = ', s2a[-1] print 'runtime is',runtime, 'seconds'
mit
benspaulding/django
django/views/generic/dates.py
7
25073
from __future__ import unicode_literals import datetime from django.conf import settings from django.db import models from django.core.exceptions import ImproperlyConfigured from django.http import Http404 from django.utils.encoding import force_unicode from django.utils.functional import cached_property from django.utils.translation import ugettext as _ from django.utils import timezone from django.views.generic.base import View from django.views.generic.detail import BaseDetailView, SingleObjectTemplateResponseMixin from django.views.generic.list import MultipleObjectMixin, MultipleObjectTemplateResponseMixin class YearMixin(object): """ Mixin for views manipulating year-based data. """ year_format = '%Y' year = None def get_year_format(self): """ Get a year format string in strptime syntax to be used to parse the year from url variables. """ return self.year_format def get_year(self): """ Return the year for which this view should display data. """ year = self.year if year is None: try: year = self.kwargs['year'] except KeyError: try: year = self.request.GET['year'] except KeyError: raise Http404(_("No year specified")) return year def get_next_year(self, date): """ Get the next valid year. """ return _get_next_prev(self, date, is_previous=False, period='year') def get_previous_year(self, date): """ Get the previous valid year. """ return _get_next_prev(self, date, is_previous=True, period='year') def _get_next_year(self, date): """ Return the start date of the next interval. The interval is defined by start date <= item date < next start date. """ return date.replace(year=date.year + 1, month=1, day=1) def _get_current_year(self, date): """ Return the start date of the current interval. """ return date.replace(month=1, day=1) class MonthMixin(object): """ Mixin for views manipulating month-based data. """ month_format = '%b' month = None def get_month_format(self): """ Get a month format string in strptime syntax to be used to parse the month from url variables. """ return self.month_format def get_month(self): """ Return the month for which this view should display data. """ month = self.month if month is None: try: month = self.kwargs['month'] except KeyError: try: month = self.request.GET['month'] except KeyError: raise Http404(_("No month specified")) return month def get_next_month(self, date): """ Get the next valid month. """ return _get_next_prev(self, date, is_previous=False, period='month') def get_previous_month(self, date): """ Get the previous valid month. """ return _get_next_prev(self, date, is_previous=True, period='month') def _get_next_month(self, date): """ Return the start date of the next interval. The interval is defined by start date <= item date < next start date. """ if date.month == 12: return date.replace(year=date.year + 1, month=1, day=1) else: return date.replace(month=date.month + 1, day=1) def _get_current_month(self, date): """ Return the start date of the previous interval. """ return date.replace(day=1) class DayMixin(object): """ Mixin for views manipulating day-based data. """ day_format = '%d' day = None def get_day_format(self): """ Get a day format string in strptime syntax to be used to parse the day from url variables. """ return self.day_format def get_day(self): """ Return the day for which this view should display data. """ day = self.day if day is None: try: day = self.kwargs['day'] except KeyError: try: day = self.request.GET['day'] except KeyError: raise Http404(_("No day specified")) return day def get_next_day(self, date): """ Get the next valid day. """ return _get_next_prev(self, date, is_previous=False, period='day') def get_previous_day(self, date): """ Get the previous valid day. """ return _get_next_prev(self, date, is_previous=True, period='day') def _get_next_day(self, date): """ Return the start date of the next interval. The interval is defined by start date <= item date < next start date. """ return date + datetime.timedelta(days=1) def _get_current_day(self, date): """ Return the start date of the current interval. """ return date class WeekMixin(object): """ Mixin for views manipulating week-based data. """ week_format = '%U' week = None def get_week_format(self): """ Get a week format string in strptime syntax to be used to parse the week from url variables. """ return self.week_format def get_week(self): """ Return the week for which this view should display data """ week = self.week if week is None: try: week = self.kwargs['week'] except KeyError: try: week = self.request.GET['week'] except KeyError: raise Http404(_("No week specified")) return week def get_next_week(self, date): """ Get the next valid week. """ return _get_next_prev(self, date, is_previous=False, period='week') def get_previous_week(self, date): """ Get the previous valid week. """ return _get_next_prev(self, date, is_previous=True, period='week') def _get_next_week(self, date): """ Return the start date of the next interval. The interval is defined by start date <= item date < next start date. """ return date + datetime.timedelta(days=7 - self._get_weekday(date)) def _get_current_week(self, date): """ Return the start date of the current interval. """ return date - datetime.timedelta(self._get_weekday(date)) def _get_weekday(self, date): """ Return the weekday for a given date. The first day according to the week format is 0 and the last day is 6. """ week_format = self.get_week_format() if week_format == '%W': # week starts on Monday return date.weekday() elif week_format == '%U': # week starts on Sunday return (date.weekday() + 1) % 7 else: raise ValueError("unknown week format: %s" % week_format) class DateMixin(object): """ Mixin class for views manipulating date-based data. """ date_field = None allow_future = False def get_date_field(self): """ Get the name of the date field to be used to filter by. """ if self.date_field is None: raise ImproperlyConfigured("%s.date_field is required." % self.__class__.__name__) return self.date_field def get_allow_future(self): """ Returns `True` if the view should be allowed to display objects from the future. """ return self.allow_future # Note: the following three methods only work in subclasses that also # inherit SingleObjectMixin or MultipleObjectMixin. @cached_property def uses_datetime_field(self): """ Return `True` if the date field is a `DateTimeField` and `False` if it's a `DateField`. """ model = self.get_queryset().model if self.model is None else self.model field = model._meta.get_field(self.get_date_field()) return isinstance(field, models.DateTimeField) def _make_date_lookup_arg(self, value): """ Convert a date into a datetime when the date field is a DateTimeField. When time zone support is enabled, `date` is assumed to be in the current time zone, so that displayed items are consistent with the URL. """ if self.uses_datetime_field: value = datetime.datetime.combine(value, datetime.time.min) if settings.USE_TZ: value = timezone.make_aware(value, timezone.get_current_timezone()) return value def _make_single_date_lookup(self, date): """ Get the lookup kwargs for filtering on a single date. If the date field is a DateTimeField, we can't just filter on date_field=date because that doesn't take the time into account. """ date_field = self.get_date_field() if self.uses_datetime_field: since = self._make_date_lookup_arg(date) until = self._make_date_lookup_arg(date + datetime.timedelta(days=1)) return { '%s__gte' % date_field: since, '%s__lt' % date_field: until, } else: # Skip self._make_date_lookup_arg, it's a no-op in this branch. return {date_field: date} class BaseDateListView(MultipleObjectMixin, DateMixin, View): """ Abstract base class for date-based views displaying a list of objects. """ allow_empty = False def get(self, request, *args, **kwargs): self.date_list, self.object_list, extra_context = self.get_dated_items() context = self.get_context_data(object_list=self.object_list, date_list=self.date_list) context.update(extra_context) return self.render_to_response(context) def get_dated_items(self): """ Obtain the list of dates and items. """ raise NotImplementedError('A DateView must provide an implementation of get_dated_items()') def get_dated_queryset(self, ordering=None, **lookup): """ Get a queryset properly filtered according to `allow_future` and any extra lookup kwargs. """ qs = self.get_queryset().filter(**lookup) date_field = self.get_date_field() allow_future = self.get_allow_future() allow_empty = self.get_allow_empty() paginate_by = self.get_paginate_by(qs) if ordering is not None: qs = qs.order_by(ordering) if not allow_future: now = timezone.now() if self.uses_datetime_field else timezone_today() qs = qs.filter(**{'%s__lte' % date_field: now}) if not allow_empty: # When pagination is enabled, it's better to do a cheap query # than to load the unpaginated queryset in memory. is_empty = len(qs) == 0 if paginate_by is None else not qs.exists() if is_empty: raise Http404(_("No %(verbose_name_plural)s available") % { 'verbose_name_plural': force_unicode(qs.model._meta.verbose_name_plural) }) return qs def get_date_list(self, queryset, date_type): """ Get a date list by calling `queryset.dates()`, checking along the way for empty lists that aren't allowed. """ date_field = self.get_date_field() allow_empty = self.get_allow_empty() date_list = queryset.dates(date_field, date_type)[::-1] if date_list is not None and not date_list and not allow_empty: name = force_unicode(queryset.model._meta.verbose_name_plural) raise Http404(_("No %(verbose_name_plural)s available") % {'verbose_name_plural': name}) return date_list class BaseArchiveIndexView(BaseDateListView): """ Base class for archives of date-based items. Requires a response mixin. """ context_object_name = 'latest' def get_dated_items(self): """ Return (date_list, items, extra_context) for this request. """ qs = self.get_dated_queryset(ordering='-%s' % self.get_date_field()) date_list = self.get_date_list(qs, 'year') if not date_list: qs = qs.none() return (date_list, qs, {}) class ArchiveIndexView(MultipleObjectTemplateResponseMixin, BaseArchiveIndexView): """ Top-level archive of date-based items. """ template_name_suffix = '_archive' class BaseYearArchiveView(YearMixin, BaseDateListView): """ List of objects published in a given year. """ make_object_list = False def get_dated_items(self): """ Return (date_list, items, extra_context) for this request. """ year = self.get_year() date_field = self.get_date_field() date = _date_from_string(year, self.get_year_format()) since = self._make_date_lookup_arg(date) until = self._make_date_lookup_arg(self._get_next_year(date)) lookup_kwargs = { '%s__gte' % date_field: since, '%s__lt' % date_field: until, } qs = self.get_dated_queryset(ordering='-%s' % date_field, **lookup_kwargs) date_list = self.get_date_list(qs, 'month') if not self.get_make_object_list(): # We need this to be a queryset since parent classes introspect it # to find information about the model. qs = qs.none() return (date_list, qs, { 'year': date, 'next_year': self.get_next_year(date), 'previous_year': self.get_previous_year(date), }) def get_make_object_list(self): """ Return `True` if this view should contain the full list of objects in the given year. """ return self.make_object_list class YearArchiveView(MultipleObjectTemplateResponseMixin, BaseYearArchiveView): """ List of objects published in a given year. """ template_name_suffix = '_archive_year' class BaseMonthArchiveView(YearMixin, MonthMixin, BaseDateListView): """ List of objects published in a given year. """ def get_dated_items(self): """ Return (date_list, items, extra_context) for this request. """ year = self.get_year() month = self.get_month() date_field = self.get_date_field() date = _date_from_string(year, self.get_year_format(), month, self.get_month_format()) since = self._make_date_lookup_arg(date) until = self._make_date_lookup_arg(self._get_next_month(date)) lookup_kwargs = { '%s__gte' % date_field: since, '%s__lt' % date_field: until, } qs = self.get_dated_queryset(**lookup_kwargs) date_list = self.get_date_list(qs, 'day') return (date_list, qs, { 'month': date, 'next_month': self.get_next_month(date), 'previous_month': self.get_previous_month(date), }) class MonthArchiveView(MultipleObjectTemplateResponseMixin, BaseMonthArchiveView): """ List of objects published in a given year. """ template_name_suffix = '_archive_month' class BaseWeekArchiveView(YearMixin, WeekMixin, BaseDateListView): """ List of objects published in a given week. """ def get_dated_items(self): """ Return (date_list, items, extra_context) for this request. """ year = self.get_year() week = self.get_week() date_field = self.get_date_field() week_format = self.get_week_format() week_start = { '%W': '1', '%U': '0', }[week_format] date = _date_from_string(year, self.get_year_format(), week_start, '%w', week, week_format) since = self._make_date_lookup_arg(date) until = self._make_date_lookup_arg(self._get_next_week(date)) lookup_kwargs = { '%s__gte' % date_field: since, '%s__lt' % date_field: until, } qs = self.get_dated_queryset(**lookup_kwargs) return (None, qs, { 'week': date, 'next_week': self.get_next_week(date), 'previous_week': self.get_previous_week(date), }) class WeekArchiveView(MultipleObjectTemplateResponseMixin, BaseWeekArchiveView): """ List of objects published in a given week. """ template_name_suffix = '_archive_week' class BaseDayArchiveView(YearMixin, MonthMixin, DayMixin, BaseDateListView): """ List of objects published on a given day. """ def get_dated_items(self): """ Return (date_list, items, extra_context) for this request. """ year = self.get_year() month = self.get_month() day = self.get_day() date = _date_from_string(year, self.get_year_format(), month, self.get_month_format(), day, self.get_day_format()) return self._get_dated_items(date) def _get_dated_items(self, date): """ Do the actual heavy lifting of getting the dated items; this accepts a date object so that TodayArchiveView can be trivial. """ lookup_kwargs = self._make_single_date_lookup(date) qs = self.get_dated_queryset(**lookup_kwargs) return (None, qs, { 'day': date, 'previous_day': self.get_previous_day(date), 'next_day': self.get_next_day(date), 'previous_month': self.get_previous_month(date), 'next_month': self.get_next_month(date) }) class DayArchiveView(MultipleObjectTemplateResponseMixin, BaseDayArchiveView): """ List of objects published on a given day. """ template_name_suffix = "_archive_day" class BaseTodayArchiveView(BaseDayArchiveView): """ List of objects published today. """ def get_dated_items(self): """ Return (date_list, items, extra_context) for this request. """ return self._get_dated_items(datetime.date.today()) class TodayArchiveView(MultipleObjectTemplateResponseMixin, BaseTodayArchiveView): """ List of objects published today. """ template_name_suffix = "_archive_day" class BaseDateDetailView(YearMixin, MonthMixin, DayMixin, DateMixin, BaseDetailView): """ Detail view of a single object on a single date; this differs from the standard DetailView by accepting a year/month/day in the URL. """ def get_object(self, queryset=None): """ Get the object this request displays. """ year = self.get_year() month = self.get_month() day = self.get_day() date = _date_from_string(year, self.get_year_format(), month, self.get_month_format(), day, self.get_day_format()) # Use a custom queryset if provided qs = queryset or self.get_queryset() if not self.get_allow_future() and date > datetime.date.today(): raise Http404(_("Future %(verbose_name_plural)s not available because %(class_name)s.allow_future is False.") % { 'verbose_name_plural': qs.model._meta.verbose_name_plural, 'class_name': self.__class__.__name__, }) # Filter down a queryset from self.queryset using the date from the # URL. This'll get passed as the queryset to DetailView.get_object, # which'll handle the 404 lookup_kwargs = self._make_single_date_lookup(date) qs = qs.filter(**lookup_kwargs) return super(BaseDetailView, self).get_object(queryset=qs) class DateDetailView(SingleObjectTemplateResponseMixin, BaseDateDetailView): """ Detail view of a single object on a single date; this differs from the standard DetailView by accepting a year/month/day in the URL. """ template_name_suffix = '_detail' def _date_from_string(year, year_format, month='', month_format='', day='', day_format='', delim='__'): """ Helper: get a datetime.date object given a format string and a year, month, and day (only year is mandatory). Raise a 404 for an invalid date. """ format = delim.join((year_format, month_format, day_format)) datestr = delim.join((year, month, day)) try: return datetime.datetime.strptime(datestr, format).date() except ValueError: raise Http404(_("Invalid date string '%(datestr)s' given format '%(format)s'") % { 'datestr': datestr, 'format': format, }) def _get_next_prev(generic_view, date, is_previous, period): """ Helper: Get the next or the previous valid date. The idea is to allow links on month/day views to never be 404s by never providing a date that'll be invalid for the given view. This is a bit complicated since it handles different intervals of time, hence the coupling to generic_view. However in essence the logic comes down to: * If allow_empty and allow_future are both true, this is easy: just return the naive result (just the next/previous day/week/month, reguardless of object existence.) * If allow_empty is true, allow_future is false, and the naive result isn't in the future, then return it; otherwise return None. * If allow_empty is false and allow_future is true, return the next date *that contains a valid object*, even if it's in the future. If there are no next objects, return None. * If allow_empty is false and allow_future is false, return the next date that contains a valid object. If that date is in the future, or if there are no next objects, return None. """ date_field = generic_view.get_date_field() allow_empty = generic_view.get_allow_empty() allow_future = generic_view.get_allow_future() get_current = getattr(generic_view, '_get_current_%s' % period) get_next = getattr(generic_view, '_get_next_%s' % period) # Bounds of the current interval start, end = get_current(date), get_next(date) # If allow_empty is True, the naive result will be valid if allow_empty: if is_previous: result = get_current(start - datetime.timedelta(days=1)) else: result = end if allow_future or result <= timezone_today(): return result else: return None # Otherwise, we'll need to go to the database to look for an object # whose date_field is at least (greater than/less than) the given # naive result else: # Construct a lookup and an ordering depending on whether we're doing # a previous date or a next date lookup. if is_previous: lookup = {'%s__lt' % date_field: generic_view._make_date_lookup_arg(start)} ordering = '-%s' % date_field else: lookup = {'%s__gte' % date_field: generic_view._make_date_lookup_arg(end)} ordering = date_field # Filter out objects in the future if appropriate. if not allow_future: # Fortunately, to match the implementation of allow_future, # we need __lte, which doesn't conflict with __lt above. if generic_view.uses_datetime_field: now = timezone.now() else: now = timezone_today() lookup['%s__lte' % date_field] = now qs = generic_view.get_queryset().filter(**lookup).order_by(ordering) # Snag the first object from the queryset; if it doesn't exist that # means there's no next/previous link available. try: result = getattr(qs[0], date_field) except IndexError: return None # Convert datetimes to dates in the current time zone. if generic_view.uses_datetime_field: if settings.USE_TZ: result = timezone.localtime(result) result = result.date() # Return the first day of the period. return get_current(result) def timezone_today(): """ Return the current date in the current time zone. """ if settings.USE_TZ: return timezone.localtime(timezone.now()).date() else: return datetime.date.today()
bsd-3-clause
byterom/android_external_chromium_org
android_webview/buildbot/generate_local_manifest.py
114
3782
#!/usr/bin/env python # Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Generate local manifest in an Android repository. This is used to generate a local manifest in an Android repository. The purpose of the generated manifest is to remove the set of projects that exist under a certain path. """ from optparse import OptionParser import os import xml.etree.ElementTree as ET def createLocalManifest(manifest_path, local_manifest_path, path_to_exclude, pinned_projects=None): manifest_tree = ET.parse(manifest_path) local_manifest_root = ET.Element('manifest') def remove_project(project): remove_project = ET.SubElement(local_manifest_root, 'remove-project') remove_project.set('name', project.get('name')) def pin_project(project, revision): pin_project = ET.SubElement(local_manifest_root, 'project') pin_project.set('name', project.get('name')) if project.get('path') != None: pin_project.set('path', project.get('path')) pin_project.set('revision', revision) for project in manifest_tree.getroot().findall('project'): project_path = project.get('path') project_name = project.get('name') exclude_project = ((project_path != None and project_path.startswith(path_to_exclude)) or (project_path == None and project_name.startswith(path_to_exclude))) if exclude_project: print 'Excluding project name="%s" path="%s"' % (project_name, project_path) remove_project(project) continue pinned_projects = pinned_projects or [] for pinned in pinned_projects: if pinned['path'] == project_path and pinned['name'] == project_name: remove_project(project) pin_project(project, pinned['revision']) break local_manifest_tree = ET.ElementTree(local_manifest_root) local_manifest_dir = os.path.dirname(local_manifest_path) if not os.path.exists(local_manifest_dir): os.makedirs(local_manifest_dir) local_manifest_tree.write(local_manifest_path, xml_declaration=True, encoding='UTF-8', method='xml') def main(): usage = 'usage: %prog [options] <android_build_top> <path_to_exclude>' parser = OptionParser(usage=usage) parser.add_option('--ndk-revision', dest='ndk_revision', help='pin the ndk project at a particular REVISION', metavar='REVISION', default=None) parser.add_option('--manifest_filename', dest='manifest_filename', help='name of the manifest file', default='default.xml') (options, args) = parser.parse_args() if len(args) != 2: parser.error('Wrong number of arguments.') android_build_top = args[0] path_to_exclude = args[1] manifest_filename = options.manifest_filename manifest_path = os.path.join(android_build_top, '.repo/manifests', manifest_filename) local_manifest_path = os.path.join(android_build_top, '.repo/local_manifest.xml') pinned_projects = [] if options.ndk_revision: pinned_projects = [{ 'path': 'ndk', 'name': 'platform/ndk', 'revision' : options.ndk_revision, },] print 'Path to exclude: %s' % path_to_exclude print 'Path to manifest file: %s' % manifest_path createLocalManifest(manifest_path, local_manifest_path, path_to_exclude, pinned_projects) print 'Local manifest created in: %s' % local_manifest_path if __name__ == '__main__': main()
bsd-3-clause
wangxuan007/flasky
venv/lib/python2.7/site-packages/alembic/testing/provision.py
15
9382
"""NOTE: copied/adapted from SQLAlchemy master for backwards compatibility; this should be removable when Alembic targets SQLAlchemy 1.0.0 """ from sqlalchemy.engine import url as sa_url from sqlalchemy import text from sqlalchemy import exc from ..util import compat from . import config, engines from .compat import get_url_backend_name import os import time import logging log = logging.getLogger(__name__) FOLLOWER_IDENT = None class register(object): def __init__(self): self.fns = {} @classmethod def init(cls, fn): return register().for_db("*")(fn) def for_db(self, dbname): def decorate(fn): self.fns[dbname] = fn return self return decorate def __call__(self, cfg, *arg): if isinstance(cfg, compat.string_types): url = sa_url.make_url(cfg) elif isinstance(cfg, sa_url.URL): url = cfg else: url = cfg.db.url backend = get_url_backend_name(url) if backend in self.fns: return self.fns[backend](cfg, *arg) else: return self.fns['*'](cfg, *arg) def create_follower_db(follower_ident): for cfg in _configs_for_db_operation(): _create_db(cfg, cfg.db, follower_ident) def configure_follower(follower_ident): for cfg in config.Config.all_configs(): _configure_follower(cfg, follower_ident) def setup_config(db_url, options, file_config, follower_ident): if follower_ident: db_url = _follower_url_from_main(db_url, follower_ident) db_opts = {} _update_db_opts(db_url, db_opts) eng = engines.testing_engine(db_url, db_opts) _post_configure_engine(db_url, eng, follower_ident) eng.connect().close() cfg = config.Config.register(eng, db_opts, options, file_config) if follower_ident: _configure_follower(cfg, follower_ident) return cfg def drop_follower_db(follower_ident): for cfg in _configs_for_db_operation(): _drop_db(cfg, cfg.db, follower_ident) def _configs_for_db_operation(): hosts = set() for cfg in config.Config.all_configs(): cfg.db.dispose() for cfg in config.Config.all_configs(): url = cfg.db.url backend = get_url_backend_name(url) host_conf = ( backend, url.username, url.host, url.database) if host_conf not in hosts: yield cfg hosts.add(host_conf) for cfg in config.Config.all_configs(): cfg.db.dispose() @register.init def _create_db(cfg, eng, ident): raise NotImplementedError("no DB creation routine for cfg: %s" % eng.url) @register.init def _drop_db(cfg, eng, ident): raise NotImplementedError("no DB drop routine for cfg: %s" % eng.url) @register.init def _update_db_opts(db_url, db_opts): pass @register.init def _configure_follower(cfg, ident): pass @register.init def _post_configure_engine(url, engine, follower_ident): pass @register.init def _follower_url_from_main(url, ident): url = sa_url.make_url(url) url.database = ident return url @_update_db_opts.for_db("mssql") def _mssql_update_db_opts(db_url, db_opts): db_opts['legacy_schema_aliasing'] = False @_follower_url_from_main.for_db("sqlite") def _sqlite_follower_url_from_main(url, ident): url = sa_url.make_url(url) if not url.database or url.database == ':memory:': return url else: return sa_url.make_url("sqlite:///%s.db" % ident) @_post_configure_engine.for_db("sqlite") def _sqlite_post_configure_engine(url, engine, follower_ident): from sqlalchemy import event @event.listens_for(engine, "connect") def connect(dbapi_connection, connection_record): # use file DBs in all cases, memory acts kind of strangely # as an attached if not follower_ident: dbapi_connection.execute( 'ATTACH DATABASE "test_schema.db" AS test_schema') else: dbapi_connection.execute( 'ATTACH DATABASE "%s_test_schema.db" AS test_schema' % follower_ident) @_create_db.for_db("postgresql") def _pg_create_db(cfg, eng, ident): with eng.connect().execution_options( isolation_level="AUTOCOMMIT") as conn: try: _pg_drop_db(cfg, conn, ident) except Exception: pass currentdb = conn.scalar("select current_database()") for attempt in range(3): try: conn.execute( "CREATE DATABASE %s TEMPLATE %s" % (ident, currentdb)) except exc.OperationalError as err: if attempt != 2 and "accessed by other users" in str(err): time.sleep(.2) continue else: raise else: break @_create_db.for_db("mysql") def _mysql_create_db(cfg, eng, ident): with eng.connect() as conn: try: _mysql_drop_db(cfg, conn, ident) except Exception: pass conn.execute("CREATE DATABASE %s" % ident) conn.execute("CREATE DATABASE %s_test_schema" % ident) conn.execute("CREATE DATABASE %s_test_schema_2" % ident) @_configure_follower.for_db("mysql") def _mysql_configure_follower(config, ident): config.test_schema = "%s_test_schema" % ident config.test_schema_2 = "%s_test_schema_2" % ident @_create_db.for_db("sqlite") def _sqlite_create_db(cfg, eng, ident): pass @_drop_db.for_db("postgresql") def _pg_drop_db(cfg, eng, ident): with eng.connect().execution_options( isolation_level="AUTOCOMMIT") as conn: conn.execute( text( "select pg_terminate_backend(pid) from pg_stat_activity " "where usename=current_user and pid != pg_backend_pid() " "and datname=:dname" ), dname=ident) conn.execute("DROP DATABASE %s" % ident) @_drop_db.for_db("sqlite") def _sqlite_drop_db(cfg, eng, ident): if ident: os.remove("%s_test_schema.db" % ident) else: os.remove("%s.db" % ident) @_drop_db.for_db("mysql") def _mysql_drop_db(cfg, eng, ident): with eng.connect() as conn: conn.execute("DROP DATABASE %s_test_schema" % ident) conn.execute("DROP DATABASE %s_test_schema_2" % ident) conn.execute("DROP DATABASE %s" % ident) @_create_db.for_db("oracle") def _oracle_create_db(cfg, eng, ident): # NOTE: make sure you've run "ALTER DATABASE default tablespace users" or # similar, so that the default tablespace is not "system"; reflection will # fail otherwise with eng.connect() as conn: conn.execute("create user %s identified by xe" % ident) conn.execute("create user %s_ts1 identified by xe" % ident) conn.execute("create user %s_ts2 identified by xe" % ident) conn.execute("grant dba to %s" % (ident, )) conn.execute("grant unlimited tablespace to %s" % ident) conn.execute("grant unlimited tablespace to %s_ts1" % ident) conn.execute("grant unlimited tablespace to %s_ts2" % ident) @_configure_follower.for_db("oracle") def _oracle_configure_follower(config, ident): config.test_schema = "%s_ts1" % ident config.test_schema_2 = "%s_ts2" % ident def _ora_drop_ignore(conn, dbname): try: conn.execute("drop user %s cascade" % dbname) log.info("Reaped db: %s" % dbname) return True except exc.DatabaseError as err: log.warn("couldn't drop db: %s" % err) return False @_drop_db.for_db("oracle") def _oracle_drop_db(cfg, eng, ident): with eng.connect() as conn: # cx_Oracle seems to occasionally leak open connections when a large # suite it run, even if we confirm we have zero references to # connection objects. # while there is a "kill session" command in Oracle, # it unfortunately does not release the connection sufficiently. _ora_drop_ignore(conn, ident) _ora_drop_ignore(conn, "%s_ts1" % ident) _ora_drop_ignore(conn, "%s_ts2" % ident) def reap_oracle_dbs(eng): log.info("Reaping Oracle dbs...") with eng.connect() as conn: to_reap = conn.execute( "select u.username from all_users u where username " "like 'TEST_%' and not exists (select username " "from v$session where username=u.username)") all_names = set([username.lower() for (username, ) in to_reap]) to_drop = set() for name in all_names: if name.endswith("_ts1") or name.endswith("_ts2"): continue else: to_drop.add(name) if "%s_ts1" % name in all_names: to_drop.add("%s_ts1" % name) if "%s_ts2" % name in all_names: to_drop.add("%s_ts2" % name) dropped = total = 0 for total, username in enumerate(to_drop, 1): if _ora_drop_ignore(conn, username): dropped += 1 log.info( "Dropped %d out of %d stale databases detected", dropped, total) @_follower_url_from_main.for_db("oracle") def _oracle_follower_url_from_main(url, ident): url = sa_url.make_url(url) url.username = ident url.password = 'xe' return url
gpl-3.0
Titan-C/sympy
sympy/functions/elementary/tests/test_miscellaneous.py
22
12136
import itertools as it from sympy.core.function import Function from sympy.core.numbers import I, oo, Rational from sympy.core.singleton import S from sympy.core.symbol import Symbol from sympy.functions.elementary.miscellaneous import (sqrt, cbrt, root, Min, Max, real_root) from sympy.functions.elementary.trigonometric import cos, sin from sympy.functions.elementary.integers import floor, ceiling from sympy.functions.special.delta_functions import Heaviside from sympy.utilities.pytest import raises def test_Min(): from sympy.abc import x, y, z n = Symbol('n', negative=True) n_ = Symbol('n_', negative=True) nn = Symbol('nn', nonnegative=True) nn_ = Symbol('nn_', nonnegative=True) p = Symbol('p', positive=True) p_ = Symbol('p_', positive=True) np = Symbol('np', nonpositive=True) np_ = Symbol('np_', nonpositive=True) r = Symbol('r', real=True) assert Min(5, 4) == 4 assert Min(-oo, -oo) == -oo assert Min(-oo, n) == -oo assert Min(n, -oo) == -oo assert Min(-oo, np) == -oo assert Min(np, -oo) == -oo assert Min(-oo, 0) == -oo assert Min(0, -oo) == -oo assert Min(-oo, nn) == -oo assert Min(nn, -oo) == -oo assert Min(-oo, p) == -oo assert Min(p, -oo) == -oo assert Min(-oo, oo) == -oo assert Min(oo, -oo) == -oo assert Min(n, n) == n assert Min(n, np) == Min(n, np) assert Min(np, n) == Min(np, n) assert Min(n, 0) == n assert Min(0, n) == n assert Min(n, nn) == n assert Min(nn, n) == n assert Min(n, p) == n assert Min(p, n) == n assert Min(n, oo) == n assert Min(oo, n) == n assert Min(np, np) == np assert Min(np, 0) == np assert Min(0, np) == np assert Min(np, nn) == np assert Min(nn, np) == np assert Min(np, p) == np assert Min(p, np) == np assert Min(np, oo) == np assert Min(oo, np) == np assert Min(0, 0) == 0 assert Min(0, nn) == 0 assert Min(nn, 0) == 0 assert Min(0, p) == 0 assert Min(p, 0) == 0 assert Min(0, oo) == 0 assert Min(oo, 0) == 0 assert Min(nn, nn) == nn assert Min(nn, p) == Min(nn, p) assert Min(p, nn) == Min(p, nn) assert Min(nn, oo) == nn assert Min(oo, nn) == nn assert Min(p, p) == p assert Min(p, oo) == p assert Min(oo, p) == p assert Min(oo, oo) == oo assert Min(n, n_).func is Min assert Min(nn, nn_).func is Min assert Min(np, np_).func is Min assert Min(p, p_).func is Min # lists raises(ValueError, lambda: Min()) assert Min(x, y) == Min(y, x) assert Min(x, y, z) == Min(z, y, x) assert Min(x, Min(y, z)) == Min(z, y, x) assert Min(x, Max(y, -oo)) == Min(x, y) assert Min(p, oo, n, p, p, p_) == n assert Min(p_, n_, p) == n_ assert Min(n, oo, -7, p, p, 2) == Min(n, -7) assert Min(2, x, p, n, oo, n_, p, 2, -2, -2) == Min(-2, x, n, n_) assert Min(0, x, 1, y) == Min(0, x, y) assert Min(1000, 100, -100, x, p, n) == Min(n, x, -100) assert Min(cos(x), sin(x)) == Min(cos(x), sin(x)) assert Min(cos(x), sin(x)).subs(x, 1) == cos(1) assert Min(cos(x), sin(x)).subs(x, S(1)/2) == sin(S(1)/2) raises(ValueError, lambda: Min(cos(x), sin(x)).subs(x, I)) raises(ValueError, lambda: Min(I)) raises(ValueError, lambda: Min(I, x)) raises(ValueError, lambda: Min(S.ComplexInfinity, x)) assert Min(1, x).diff(x) == Heaviside(1 - x) assert Min(x, 1).diff(x) == Heaviside(1 - x) assert Min(0, -x, 1 - 2*x).diff(x) == -Heaviside(x + Min(0, -2*x + 1)) \ - 2*Heaviside(2*x + Min(0, -x) - 1) # issue 7619 f = Function('f') assert Min(1, 2*Min(f(1), 2)) # doesn't fail # issue 7233 e = Min(0, x) assert e.evalf == e.n assert e.n().args == (0, x) # issue 8643 m = Min(n, p_, n_, r) assert m.is_positive is False assert m.is_nonnegative is False assert m.is_negative is True m = Min(p, p_) assert m.is_positive is True assert m.is_nonnegative is True assert m.is_negative is False m = Min(p, nn_, p_) assert m.is_positive is None assert m.is_nonnegative is True assert m.is_negative is False m = Min(nn, p, r) assert m.is_positive is None assert m.is_nonnegative is None assert m.is_negative is None def test_Max(): from sympy.abc import x, y, z n = Symbol('n', negative=True) n_ = Symbol('n_', negative=True) nn = Symbol('nn', nonnegative=True) nn_ = Symbol('nn_', nonnegative=True) p = Symbol('p', positive=True) p_ = Symbol('p_', positive=True) np = Symbol('np', nonpositive=True) np_ = Symbol('np_', nonpositive=True) r = Symbol('r', real=True) assert Max(5, 4) == 5 # lists raises(ValueError, lambda: Max()) assert Max(x, y) == Max(y, x) assert Max(x, y, z) == Max(z, y, x) assert Max(x, Max(y, z)) == Max(z, y, x) assert Max(x, Min(y, oo)) == Max(x, y) assert Max(n, -oo, n_, p, 2) == Max(p, 2) assert Max(n, -oo, n_, p) == p assert Max(2, x, p, n, -oo, S.NegativeInfinity, n_, p, 2) == Max(2, x, p) assert Max(0, x, 1, y) == Max(1, x, y) assert Max(r, r + 1, r - 1) == 1 + r assert Max(1000, 100, -100, x, p, n) == Max(p, x, 1000) assert Max(cos(x), sin(x)) == Max(sin(x), cos(x)) assert Max(cos(x), sin(x)).subs(x, 1) == sin(1) assert Max(cos(x), sin(x)).subs(x, S(1)/2) == cos(S(1)/2) raises(ValueError, lambda: Max(cos(x), sin(x)).subs(x, I)) raises(ValueError, lambda: Max(I)) raises(ValueError, lambda: Max(I, x)) raises(ValueError, lambda: Max(S.ComplexInfinity, 1)) assert Max(n, -oo, n_, p, 2) == Max(p, 2) assert Max(n, -oo, n_, p, 1000) == Max(p, 1000) assert Max(1, x).diff(x) == Heaviside(x - 1) assert Max(x, 1).diff(x) == Heaviside(x - 1) assert Max(x**2, 1 + x, 1).diff(x) == \ 2*x*Heaviside(x**2 - Max(1, x + 1)) \ + Heaviside(x - Max(1, x**2) + 1) e = Max(0, x) assert e.evalf == e.n assert e.n().args == (0, x) # issue 8643 m = Max(p, p_, n, r) assert m.is_positive is True assert m.is_nonnegative is True assert m.is_negative is False m = Max(n, n_) assert m.is_positive is False assert m.is_nonnegative is False assert m.is_negative is True m = Max(n, n_, r) assert m.is_positive is None assert m.is_nonnegative is None assert m.is_negative is None m = Max(n, nn, r) assert m.is_positive is None assert m.is_nonnegative is True assert m.is_negative is False def test_minmax_assumptions(): r = Symbol('r', real=True) a = Symbol('a', real=True, algebraic=True) t = Symbol('t', real=True, transcendental=True) q = Symbol('q', rational=True) p = Symbol('p', real=True, rational=False) n = Symbol('n', rational=True, integer=False) i = Symbol('i', integer=True) o = Symbol('o', odd=True) e = Symbol('e', even=True) k = Symbol('k', prime=True) reals = [r, a, t, q, p, n, i, o, e, k] for ext in (Max, Min): for x, y in it.product(reals, repeat=2): # Must be real assert ext(x, y).is_real # Algebraic? if x.is_algebraic and y.is_algebraic: assert ext(x, y).is_algebraic elif x.is_transcendental and y.is_transcendental: assert ext(x, y).is_transcendental else: assert ext(x, y).is_algebraic is None # Rational? if x.is_rational and y.is_rational: assert ext(x, y).is_rational elif x.is_irrational and y.is_irrational: assert ext(x, y).is_irrational else: assert ext(x, y).is_rational is None # Integer? if x.is_integer and y.is_integer: assert ext(x, y).is_integer elif x.is_noninteger and y.is_noninteger: assert ext(x, y).is_noninteger else: assert ext(x, y).is_integer is None # Odd? if x.is_odd and y.is_odd: assert ext(x, y).is_odd elif x.is_odd is False and y.is_odd is False: assert ext(x, y).is_odd is False else: assert ext(x, y).is_odd is None # Even? if x.is_even and y.is_even: assert ext(x, y).is_even elif x.is_even is False and y.is_even is False: assert ext(x, y).is_even is False else: assert ext(x, y).is_even is None # Prime? if x.is_prime and y.is_prime: assert ext(x, y).is_prime elif x.is_prime is False and y.is_prime is False: assert ext(x, y).is_prime is False else: assert ext(x, y).is_prime is None def test_issue_8413(): x = Symbol('x', real=True) # we can't evaluate in general because non-reals are not # comparable: Min(floor(3.2 + I), 3.2 + I) -> ValueError assert Min(floor(x), x) == floor(x) assert Min(ceiling(x), x) == x assert Max(floor(x), x) == x assert Max(ceiling(x), x) == ceiling(x) def test_root(): from sympy.abc import x n = Symbol('n', integer=True) k = Symbol('k', integer=True) assert root(2, 2) == sqrt(2) assert root(2, 1) == 2 assert root(2, 3) == 2**Rational(1, 3) assert root(2, 3) == cbrt(2) assert root(2, -5) == 2**Rational(4, 5)/2 assert root(-2, 1) == -2 assert root(-2, 2) == sqrt(2)*I assert root(-2, 1) == -2 assert root(x, 2) == sqrt(x) assert root(x, 1) == x assert root(x, 3) == x**Rational(1, 3) assert root(x, 3) == cbrt(x) assert root(x, -5) == x**Rational(-1, 5) assert root(x, n) == x**(1/n) assert root(x, -n) == x**(-1/n) assert root(x, n, k) == x**(1/n)*(-1)**(2*k/n) def test_real_root(): assert real_root(-8, 3) == -2 assert real_root(-16, 4) == root(-16, 4) r = root(-7, 4) assert real_root(r) == r r1 = root(-1, 3) r2 = r1**2 r3 = root(-1, 4) assert real_root(r1 + r2 + r3) == -1 + r2 + r3 assert real_root(root(-2, 3)) == -root(2, 3) assert real_root(-8., 3) == -2 x = Symbol('x') n = Symbol('n') g = real_root(x, n) assert g.subs(dict(x=-8, n=3)) == -2 assert g.subs(dict(x=8, n=3)) == 2 # give principle root if there is no real root -- if this is not desired # then maybe a Root class is needed to raise an error instead assert g.subs(dict(x=I, n=3)) == cbrt(I) assert g.subs(dict(x=-8, n=2)) == sqrt(-8) assert g.subs(dict(x=I, n=2)) == sqrt(I) def test_rewrite_MaxMin_as_Heaviside(): from sympy.abc import x assert Max(0, x).rewrite(Heaviside) == x*Heaviside(x) assert Max(3, x).rewrite(Heaviside) == x*Heaviside(x - 3) + \ 3*Heaviside(-x + 3) assert Max(0, x+2, 2*x).rewrite(Heaviside) == \ 2*x*Heaviside(2*x)*Heaviside(x - 2) + \ (x + 2)*Heaviside(-x + 2)*Heaviside(x + 2) assert Min(0, x).rewrite(Heaviside) == x*Heaviside(-x) assert Min(3, x).rewrite(Heaviside) == x*Heaviside(-x + 3) + \ 3*Heaviside(x - 3) assert Min(x, -x, -2).rewrite(Heaviside) == \ x*Heaviside(-2*x)*Heaviside(-x - 2) - \ x*Heaviside(2*x)*Heaviside(x - 2) \ - 2*Heaviside(-x + 2)*Heaviside(x + 2) def test_issue_11099(): from sympy.abc import x, y # some fixed value tests fixed_test_data = {x: -2, y: 3} assert Min(x, y).evalf(subs=fixed_test_data) == \ Min(x, y).subs(fixed_test_data).evalf() assert Max(x, y).evalf(subs=fixed_test_data) == \ Max(x, y).subs(fixed_test_data).evalf() # randomly generate some test data from random import randint for i in range(20): random_test_data = {x: randint(-100, 100), y: randint(-100, 100)} assert Min(x, y).evalf(subs=random_test_data) == \ Min(x, y).subs(random_test_data).evalf() assert Max(x, y).evalf(subs=random_test_data) == \ Max(x, y).subs(random_test_data).evalf()
bsd-3-clause
meteorcloudy/tensorflow
tensorflow/python/kernel_tests/decode_bmp_op_test.py
20
3131
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for DecodeBmpOp.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import image_ops from tensorflow.python.platform import test class DecodeBmpOpTest(test.TestCase): def testex1(self): img_bytes = [[[0, 0, 255], [0, 255, 0]], [[255, 0, 0], [255, 255, 255]]] # Encoded BMP bytes from Wikipedia encoded_bytes = [ 0x42, 0x40, 0x46, 0, 0, 0, 0, 0, 0, 0, 0x36, 0, 0, 0, 0x28, 0, 0, 0, 0x2, 0, 0, 0, 0x2, 0, 0, 0, 0x1, 0, 0x18, 0, 0, 0, 0, 0, 0x10, 0, 0, 0, 0x13, 0xb, 0, 0, 0x13, 0xb, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0, 0xff, 0, 0, 0, 0xff, 0, 0, 0, ] byte_string = bytes(bytearray(encoded_bytes)) img_in = constant_op.constant(byte_string, dtype=dtypes.string) decode = array_ops.squeeze(image_ops.decode_bmp(img_in)) with self.test_session(): decoded = decode.eval() self.assertAllEqual(decoded, img_bytes) def testGrayscale(self): img_bytes = [[[255], [0]], [[255], [0]]] encoded_bytes = [ 0x42, 0x40, 0x3d, 0, 0, 0, 0, 0, 0, 0, 0x36, 0, 0, 0, 0x28, 0, 0, 0, 0x2, 0, 0, 0, 0x2, 0, 0, 0, 0x1, 0, 0x8, 0, 0, 0, 0, 0, 0x10, 0, 0, 0, 0x13, 0xb, 0, 0, 0x13, 0xb, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0, 0, 0, 0xff, 0, 0, 0, ] byte_string = bytes(bytearray(encoded_bytes)) img_in = constant_op.constant(byte_string, dtype=dtypes.string) decode = image_ops.decode_bmp(img_in) with self.test_session(): decoded = decode.eval() self.assertAllEqual(decoded, img_bytes) if __name__ == "__main__": test.main()
apache-2.0
Hope6537/hope-tactical-equipment
hope-python-script/network/ssh/simple_ssh_server.py
2
2224
# coding=utf-8 """ 创建SSH服务端 前置依赖:sudo pip install paramiko """ import socket import sys import threading import paramiko host_key = paramiko.RSAKey(filename='test_rsa.key') class Server(paramiko.ServerInterface): def __init__(self): self.event = threading.Event() def check_channel_request(self, kind, chanid): if kind == 'session': return paramiko.OPEN_SUCCEEDED return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED def check_auth_password(self, username, password): if (username == 'hope') and (password == 'customPassword'): return paramiko.AUTH_SUCCESSFUL return paramiko.AUTH_FAILED def driver(server, port): server = server ssh_port = int(port) try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind((server, ssh_port)) sock.listen(100) print("[+] 监听连接中") client, addr = sock.accept() except Exception, e: print("[-] 监听失败 " + str(e)) sys.exit(1) print("[+] 获得连接") try: bhSession = paramiko.Transport(client) bhSession.add_server_key(host_key) server = Server() try: bhSession.start_server(server=server) except paramiko.SSHException, x: print("[-] SSH 操作失败") chan = bhSession.accept(20) print("[+] 认证成功") print(chan.recv(1024)) while True: try: command = raw_input("输入命令: ").strip('\n') if command != 'exit': chan.send(command) print(chan.recv(4096) + '\n') else: chan.send('exit') print("正在退出") bhSession.close() raise Exception('exit') except KeyboardInterrupt: bhSession.close() except Exception, e: print("[-] 捕获到异常 " + str(e)) try: bhSession.close() except: pass sys.exit(1) driver("192.168.1.102", 23)
apache-2.0
mapnik/mapnik
scons/scons-local-4.1.0/SCons/Variables/ListVariable.py
4
4340
# MIT License # # Copyright The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """Option type for list Variables. This file defines the option type for SCons implementing 'lists'. A 'list' option may either be 'all', 'none' or a list of names separated by comma. After the option has been processed, the option value holds either the named list elements, all list elements or no list elements at all. Usage example:: list_of_libs = Split('x11 gl qt ical') opts = Variables() opts.Add( ListVariable( 'shared', 'libraries to build as shared libraries', 'all', elems=list_of_libs, ) ) ... for lib in list_of_libs: if lib in env['shared']: env.SharedObject(...) else: env.Object(...) """ # Known Bug: This should behave like a Set-Type, but does not really, # since elements can occur twice. __all__ = ['ListVariable',] import collections import SCons.Util class _ListVariable(collections.UserList): def __init__(self, initlist=[], allowedElems=[]): collections.UserList.__init__(self, [_f for _f in initlist if _f]) self.allowedElems = sorted(allowedElems) def __cmp__(self, other): raise NotImplementedError def __eq__(self, other): raise NotImplementedError def __ge__(self, other): raise NotImplementedError def __gt__(self, other): raise NotImplementedError def __le__(self, other): raise NotImplementedError def __lt__(self, other): raise NotImplementedError def __str__(self): if len(self) == 0: return 'none' self.data.sort() if self.data == self.allowedElems: return 'all' else: return ','.join(self) def prepare_to_store(self): return self.__str__() def _converter(val, allowedElems, mapdict): """ """ if val == 'none': val = [] elif val == 'all': val = allowedElems else: val = [_f for _f in val.split(',') if _f] val = [mapdict.get(v, v) for v in val] notAllowed = [v for v in val if v not in allowedElems] if notAllowed: raise ValueError("Invalid value(s) for option: %s" % ','.join(notAllowed)) return _ListVariable(val, allowedElems) ## def _validator(key, val, env): ## """ ## """ ## # todo: write validator for pgk list ## return 1 def ListVariable(key, help, default, names, map={}): """ The input parameters describe a 'package list' option, thus they are returned with the correct converter and validator appended. The result is usable for input to opts.Add() . A 'package list' option may either be 'all', 'none' or a list of package names (separated by space). """ names_str = 'allowed names: %s' % ' '.join(names) if SCons.Util.is_List(default): default = ','.join(default) help = '\n '.join( (help, '(all|none|comma-separated list of names)', names_str)) return (key, help, default, None, #_validator, lambda val: _converter(val, names, map)) # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
lgpl-2.1
AusTac/parma
b3/lib/yaml/composer.py
534
4921
__all__ = ['Composer', 'ComposerError'] from error import MarkedYAMLError from events import * from nodes import * class ComposerError(MarkedYAMLError): pass class Composer(object): def __init__(self): self.anchors = {} def check_node(self): # Drop the STREAM-START event. if self.check_event(StreamStartEvent): self.get_event() # If there are more documents available? return not self.check_event(StreamEndEvent) def get_node(self): # Get the root node of the next document. if not self.check_event(StreamEndEvent): return self.compose_document() def get_single_node(self): # Drop the STREAM-START event. self.get_event() # Compose a document if the stream is not empty. document = None if not self.check_event(StreamEndEvent): document = self.compose_document() # Ensure that the stream contains no more documents. if not self.check_event(StreamEndEvent): event = self.get_event() raise ComposerError("expected a single document in the stream", document.start_mark, "but found another document", event.start_mark) # Drop the STREAM-END event. self.get_event() return document def compose_document(self): # Drop the DOCUMENT-START event. self.get_event() # Compose the root node. node = self.compose_node(None, None) # Drop the DOCUMENT-END event. self.get_event() self.anchors = {} return node def compose_node(self, parent, index): if self.check_event(AliasEvent): event = self.get_event() anchor = event.anchor if anchor not in self.anchors: raise ComposerError(None, None, "found undefined alias %r" % anchor.encode('utf-8'), event.start_mark) return self.anchors[anchor] event = self.peek_event() anchor = event.anchor if anchor is not None: if anchor in self.anchors: raise ComposerError("found duplicate anchor %r; first occurence" % anchor.encode('utf-8'), self.anchors[anchor].start_mark, "second occurence", event.start_mark) self.descend_resolver(parent, index) if self.check_event(ScalarEvent): node = self.compose_scalar_node(anchor) elif self.check_event(SequenceStartEvent): node = self.compose_sequence_node(anchor) elif self.check_event(MappingStartEvent): node = self.compose_mapping_node(anchor) self.ascend_resolver() return node def compose_scalar_node(self, anchor): event = self.get_event() tag = event.tag if tag is None or tag == u'!': tag = self.resolve(ScalarNode, event.value, event.implicit) node = ScalarNode(tag, event.value, event.start_mark, event.end_mark, style=event.style) if anchor is not None: self.anchors[anchor] = node return node def compose_sequence_node(self, anchor): start_event = self.get_event() tag = start_event.tag if tag is None or tag == u'!': tag = self.resolve(SequenceNode, None, start_event.implicit) node = SequenceNode(tag, [], start_event.start_mark, None, flow_style=start_event.flow_style) if anchor is not None: self.anchors[anchor] = node index = 0 while not self.check_event(SequenceEndEvent): node.value.append(self.compose_node(node, index)) index += 1 end_event = self.get_event() node.end_mark = end_event.end_mark return node def compose_mapping_node(self, anchor): start_event = self.get_event() tag = start_event.tag if tag is None or tag == u'!': tag = self.resolve(MappingNode, None, start_event.implicit) node = MappingNode(tag, [], start_event.start_mark, None, flow_style=start_event.flow_style) if anchor is not None: self.anchors[anchor] = node while not self.check_event(MappingEndEvent): #key_event = self.peek_event() item_key = self.compose_node(node, None) #if item_key in node.value: # raise ComposerError("while composing a mapping", start_event.start_mark, # "found duplicate key", key_event.start_mark) item_value = self.compose_node(node, item_key) #node.value[item_key] = item_value node.value.append((item_key, item_value)) end_event = self.get_event() node.end_mark = end_event.end_mark return node
gpl-2.0
DMS-Aus/Roam
src/roam_tests/tests_editorwidgets/test_numberwidget.py
1
2390
import pytest from roam.editorwidgets.numberwidget import NumberWidget, DoubleNumberWidget config = { "prefix": "test_pre", "suffix": "test_suf", "max": 100, "min": 0 } def test_should_return_same_value(): widget = NumberWidget().createWidget(None) wrapper = NumberWidget(widget=widget) wrapper.setvalue(1) assert wrapper.value() == 1 wrapper.setvalue(None) assert wrapper.value() == 0 def test_non_int_should_error(): widget = NumberWidget().createWidget(None) wrapper = NumberWidget(widget=widget) with pytest.raises(ValueError): wrapper.setvalue("Test") def test_should_return_value_within_range(): widget = NumberWidget().createWidget(None) wrapper = NumberWidget(widget) wrapper.initWidget(widget, {}) wrapper.config = config wrapper.setvalue(101) assert wrapper.value() == 100 wrapper.setvalue(-1) assert wrapper.value() == 0 wrapper.setvalue(1) assert wrapper.value() == 1 def test_sets_prfeix_suffix_on_widget(): widget = NumberWidget().createWidget(None) wrapper = NumberWidget(widget) wrapper.initWidget(widget, {}) wrapper.config = config assert widget.prefix == config['prefix'] assert widget.suffix == config['suffix'] def test_should_return_same_value_doublewidget(): widget = DoubleNumberWidget().createWidget(None) wrapper = NumberWidget(widget=widget) wrapper.setvalue(1) assert wrapper.value() == 1 wrapper.setvalue(None) assert wrapper.value() == 0 def test_non_int_should_error(): widget = widget = NumberWidget().createWidget(None) wrapper = NumberWidget(widget=widget) with pytest.raises(ValueError): wrapper.setvalue("Test") def test_should_return_value_within_range(): widget = widget = NumberWidget().createWidget(None) wrapper = NumberWidget(widget) wrapper.initWidget(widget, {}) wrapper.config = config wrapper.setvalue(101) assert wrapper.value() == 100 wrapper.setvalue(-1) assert wrapper.value() == 0 wrapper.setvalue(1) assert wrapper.value() == 1 def test_sets_prfeix_suffix_on_widget(): widget = widget = NumberWidget().createWidget(None) wrapper = NumberWidget(widget) wrapper.initWidget(widget, {}) wrapper.config = config assert widget.prefix == config['prefix'] assert widget.suffix == config['suffix']
gpl-2.0
jwang98052/incubator-reef
dev/update_website.py
21
6141
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ This script updates website files with the new release information. (How to run) python update_website <reef_home> <reef_version> <sha512> <release_notes_link> You can also see how to run the script with "python update_website.py -h" (Example) python update_website ~/reef 0.14.0 8f542ae...c4fed7 https://issues.apache.org/jira/secure/ReleaseNote.jspa?projectId=12315820&version=12333768 """ import os import re import sys import argparse import datetime """ pom.xml """ def update_pom(file, new_version): changed_str = "" f = open(file, 'r') while True: line = f.readline() if not line: break if "<currentStableVersion>" in line: r = re.compile('>(.*?)<') m = r.search(line) old_version = m.group(1) line = line.replace(old_version, new_version) changed_str += line f.close() f = open(file, 'w') f.write(changed_str) f.close() print file """ doap.rdf """ def update_doap(file, new_version): changed_str = "" after_release_tag = False f = open(file, 'r') # keep the part of the file before <release> tag as is # necessary because name and created tags appear several times in the file while True: line = f.readline() if not line: break if "<release>" in line: after_release_tag = True # update <name>, <revision> and <created> tags if (("<name>" in line) or ("<revision>" in line)) and after_release_tag: r = re.compile('>(.*?)<') m = r.search(line) old_version = m.group(1) line = line.replace(old_version, new_version) if "<created>" in line and after_release_tag: r = re.compile('<created>(.*?)</created>') m = r.search(line) old_date = m.group(1) new_date = datetime.date.today().strftime('%Y-%m-%d') line = line.replace(old_date, new_date) changed_str += line f.close() f = open(file, 'w') f.write(changed_str) f.close() print file """ downloads.md: replace all release information with new one """ def update_downloads(file, new_version, sha512, notes_link): changed_str = "" old_version = "" f = open(file, 'r') while True: line = f.readline() if not line: break # figure out old version if "selected" in line: r = re.compile('>(.*?)<') m = r.search(line) old_version = m.group(1) if old_version != "" and old_version in line: line = line.replace(old_version, new_version) if "selected" in line: # write this line and construct new one changed_str += line line = ' <option value="' + old_version + '">' + old_version + '</option>\n' if "sha512Text" in line: r = re.compile('<span id="sha512Text">(.*?)</span>') m = r.search(line) line = line.replace(m.group(1), sha512) if "releaseNotesLink" in line: r = re.compile('href="(.*?)"') m = r.search(line) url = re.sub('&(?!amp;)', '&amp;', notes_link) # '&' should be replaced to prevent parsing error line = line.replace(m.group(1), url) if "dotnetApiLink" in line: r = re.compile('<span id="dotnetApiLink">(.*?)</span>') m = r.search(line) line = line.replace(m.group(1), '<a href="apidoc_net/' + new_version + '/index.html">.NET API</a>') changed_str += line f.close() f = open(file, 'w') f.write(changed_str) f.close() print file """ release.js: add releaseSha512 and releaseNotes for new version """ def update_release_js(file, new_version, sha512, notes_link): changed_str = "" f = open(file, 'r') while True: line = f.readline() if not line: break if "var releaseSha512" in line: changed_str += line changed_str += ' "' + new_version + '": "' + sha512 + '",\n' continue if "var releaseNotes" in line: changed_str += line changed_str += ' "' + new_version + '": "' + notes_link + '",\n' continue changed_str += line f.close() f = open(file, 'w') f.write(changed_str) f.close() print file if __name__ == "__main__": parser = argparse.ArgumentParser(description="Script for updating REEF website with information about new release.") parser.add_argument("reef_home", type=str, help="REEF home") parser.add_argument("reef_version", type=str, help="REEF version") parser.add_argument("sha512", type=str, help="SHA512 of .tar.gz file") parser.add_argument("notes_link", type=str, help="Link to release notes for the version") args = parser.parse_args() reef_home = os.path.abspath(args.reef_home) update_pom(reef_home + "/pom.xml", args.reef_version) update_doap(reef_home + "/website/src/site/resources/doap.rdf", args.reef_version) update_release_js(reef_home + "/website/src/site/resources/js/release.js", args.reef_version, args.sha512, args.notes_link) update_downloads(reef_home + "/website/src/site/markdown/downloads.md", args.reef_version, args.sha512, args.notes_link)
apache-2.0
tigwyk/eve-wspace
evewspace/Jabber/migrations/0001_initial.py
22
7000
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'JabberGroup' db.create_table('Jabber_jabbergroup', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=64)), ('desc', self.gf('django.db.models.fields.CharField')(max_length=200)), ('special', self.gf('django.db.models.fields.BooleanField')(default=False)), )) db.send_create_signal('Jabber', ['JabberGroup']) # Adding model 'JabberGroupMember' db.create_table('Jabber_jabbergroupmember', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('group', self.gf('django.db.models.fields.related.ForeignKey')(related_name='members', to=orm['Jabber.JabberGroup'])), ('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='jabber_groups', to=orm['auth.User'])), )) db.send_create_signal('Jabber', ['JabberGroupMember']) # Adding model 'JabberGroupPermissions' db.create_table('Jabber_jabbergrouppermissions', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('usergroup', self.gf('django.db.models.fields.related.ForeignKey')(related_name='jabber_groups', to=orm['auth.Group'])), ('jabbergroup', self.gf('django.db.models.fields.related.ForeignKey')(related_name='group_permissions', to=orm['Jabber.JabberGroup'])), ('canbroadcast', self.gf('django.db.models.fields.BooleanField')(default=False)), ('canjoin', self.gf('django.db.models.fields.BooleanField')(default=False)), )) db.send_create_signal('Jabber', ['JabberGroupPermissions']) def backwards(self, orm): # Deleting model 'JabberGroup' db.delete_table('Jabber_jabbergroup') # Deleting model 'JabberGroupMember' db.delete_table('Jabber_jabbergroupmember') # Deleting model 'JabberGroupPermissions' db.delete_table('Jabber_jabbergrouppermissions') models = { 'Jabber.jabbergroup': { 'Meta': {'object_name': 'JabberGroup'}, 'desc': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'special': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'Jabber.jabbergroupmember': { 'Meta': {'object_name': 'JabberGroupMember'}, 'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['Jabber.JabberGroup']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jabber_groups'", 'to': "orm['auth.User']"}) }, 'Jabber.jabbergrouppermissions': { 'Meta': {'object_name': 'JabberGroupPermissions'}, 'canbroadcast': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'canjoin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'jabbergroup': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'group_permissions'", 'to': "orm['Jabber.JabberGroup']"}), 'usergroup': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jabber_groups'", 'to': "orm['auth.Group']"}) }, 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['Jabber']
gpl-3.0
genialis/resolwe-bio
resolwe_bio/tools/make_report.py
1
13645
#!/usr/bin/env python3 """Generate amplicon report.""" import argparse import csv import re import subprocess DECIMALS = 2 # Decimal precision when presenting results: parser = argparse.ArgumentParser(description="Fill data into tex template file.") parser.add_argument("--sample", help="Sample name.") parser.add_argument("--panel", help="Panel name") parser.add_argument("--covmetrics", help="Coverge metrics") parser.add_argument("--cov", help="Amplicon coverage") parser.add_argument("--metrics", help="CollectTargetedPcrMetrics report file.") parser.add_argument("--annot_vars", help="Annotated variant files.", nargs="+") parser.add_argument("--template", help="Report template file.") parser.add_argument("--logo", help="Logo.") parser.add_argument("--afthreshold", help="Allele Frequency lower threshold.") def escape_latex(string): """ Get string, where reserved LaTeX charachters are escaped. For more info regarding LaTeX reserved charachters read this: https://en.wikibooks.org/wiki/LaTeX/Basics#Reserved_Characters """ string = string.replace("\\", "\\\\") string = string.replace("&", r"\&") string = string.replace("%", r"\%") string = string.replace("$", r"\$") string = string.replace("#", r"\#") string = string.replace("_", r"\_") string = string.replace("{", r"\{") string = string.replace("}", r"\}") string = string.replace("~", r"\textasciitilde ") string = string.replace("^", r"\textasciicircum ") return string def format_float(value, decimals=DECIMALS, to_percent=False): """Round string representation of float to ``decimals`` decimal places. If ``to_percent``==True, also multiply by 100 and add latex percent sign. """ value = float(value) * 100 if to_percent else float(value) out = "{0:g}".format(round(value, decimals)) if to_percent: out += "\\%" return out def parse_covmetrics(covmetrics_file, stats): """ Parse covmetrics report and write content to ``stats``. Covmetrics report is a one-line file with three colunmns:: mean_coverage mean_coverage*0.2 coverage_uniformity """ with open(covmetrics_file) as handle: cov_data = handle.readline().strip().split("\t") stats["mean_coverage"] = float(cov_data[0]) stats["mean_coverage_20"] = float(cov_data[1]) stats["coverage_uniformity"] = ( float(cov_data[2]) / 100 ) # Convert from percent to ratio def parse_target_pcr_metrics(metrics_report, stats): """ Parse Picard PcrMetrics report file and save relevant data to ``stats``. The are only two relevant lines: the one starting with CUSTOM_AMPLICON_SET (containing labels) and the next one (containing coresponding values). """ with open(metrics_report) as pcr_metrics: for line in pcr_metrics: if line.startswith("#"): continue if line.startswith("CUSTOM_AMPLICON_SET"): labels = line.strip().split("\t") values = next(pcr_metrics).strip().split("\t") for lab, val in zip(labels, values): stats[lab] = val def cut_to_pieces(string, piece_size): """Cut long string into smaller pieces.""" assert isinstance(string, str) string_size = len(string) pieces = [string[i : i + piece_size] for i in range(0, string_size, piece_size)] return " ".join(pieces) def list_to_tex_table( data, header=None, caption=None, long_columns=False, uncut_columns=[] ): """Make a TeX table from python list.""" lines = [] column_alingnment = ["l" for h in header] if long_columns: for col_index in long_columns: column_alingnment[col_index] = "L" if caption: lines.append("\\captionof{{table}}{{{}}}".format(caption)) lines.append("\\noindent") lines.append( "\\begin{{longtable}}[l] {{ {} }}".format(" ".join(column_alingnment)) ) if header: header_formatted = [ "\\leavevmode\\color{{white}}\\textbf{{{}}}".format(hdr) for hdr in header ] lines.append("\\rowcolor{darkblue1}") lines.append(" & ".join(header_formatted) + "\\\\") lines.append("\\endhead") for line in data: if long_columns: for col_index in long_columns: if r"\href" in line[col_index] or col_index in uncut_columns: # If hyperlink line or `uncut_columns`, don't do a thing. new_val = line[col_index] else: # Otherwise, insert spaces, so that wrapping can happen: new_val = cut_to_pieces(line[col_index], 8) line[col_index] = "\\multicolumn{{1}}{{m{{2.3cm}}}}{{{}}}".format( new_val ) lines.append(" & ".join(line) + " \\\\") lines.append("\\end{longtable}") lines.append("{\n\\addtocounter{table}{-1}}") return "\n".join(lines) def snp_href(snpid): """Create LaTeX hyperlink for given SNP ID.""" if snpid.startswith("rs"): url = r"http://www.ncbi.nlm.nih.gov/snp/?term={}".format(snpid) elif snpid.startswith("COSM"): url = r"http://cancer.sanger.ac.uk/cosmic/mutation/overview?genome=37\&id={}".format( snpid.lstrip("COSM") ) elif snpid.startswith("COSN"): url = r"http://cancer.sanger.ac.uk/cosmic/ncv/overview?genome=37\&id={}".format( snpid.lstrip("COSN") ) else: return snpid return "\\href{{{}}}{{{}}}".format(url, escape_latex(snpid)) def gene_href(gene_name): """Create LaTeX hyperlink for given GENE ID.""" url = "http://www.ncbi.nlm.nih.gov/gene/?term={}".format(gene_name) return "\\href{{{}}}{{{}}}".format(url, escape_latex(gene_name)) def format_aa_change(aa_list): """Create Amino Acid Change information.""" output = set() if aa_list: for aa in aa_list: match_obj = re.match(r"p\.([A-Za-z]*)[0-9]*([A-Za-z]*)", aa) if match_obj and match_obj.group(1) == match_obj.group(2): output.add("Synon") else: output.add(escape_latex(aa)) return output def make_variant_table(variant_file, header, af_threshold): """Make variant table.""" var_table = [] with open(variant_file, "r") as vfile: for row_raw in csv.DictReader(vfile, delimiter="\t"): # Reorder columns according to header row = [row_raw[column_name] for column_name in header] row[-3] = " ".join( map(gene_href, row[-3].split(",")) ) # Create gene hypelinks row[-2] = " ".join( map(snp_href, row[-2].split(";")) ) # Create SNP hypelinks row[-1] = " ".join( format_aa_change(row[-1].split(",")) ) # Create amino acid column # One line can contain two or more ALT values (and consequently two or more AF values) # We need to split such "multiple" entries to one alt value per row alt_cell, afq_cell = row[3], row[4] for alt, afq in zip(alt_cell.split(","), afq_cell.split(",")): afq = float(afq) if afq >= af_threshold: afq = "{:.3f}".format(afq) var_table.append(row[:3] + [alt] + [afq] + row[5:]) return var_table if __name__ == "__main__": args = parser.parse_args() # Open template and fill it with data: with open(args.template, "r") as template_in, open( "report.tex", "wt" ) as template_out: stats = {} # Container for all-reound report statistics. parse_covmetrics(args.covmetrics, stats) parse_target_pcr_metrics(args.metrics, stats) template = template_in.read() template = template.replace("{#LOGO#}", args.logo) template = template.replace("{#SAMPLE_NAME#}", escape_latex(args.sample)) template = template.replace("{#PANEL#}", escape_latex(args.panel)) template = template.replace( "{#AF_THRESHOLD#}", format_float(args.afthreshold, to_percent=True) ) template = template.replace("{#TOTAL_READS#}", stats["TOTAL_READS"]) template = template.replace("{#ALIGNED_READS#}", stats["PF_UQ_READS_ALIGNED"]) template = template.replace( "{#PCT_ALIGNED_READS#}", format_float(stats["PCT_PF_UQ_READS_ALIGNED"], to_percent=True), ) template = template.replace( "{#ALIGN_BASES_ONTARGET#}", format_float(stats["PCT_AMPLIFIED_BASES"], to_percent=True), ) template = template.replace( "{#COV_MEAN#}", format_float(stats["mean_coverage"]) ) template = template.replace( "{#COV_20#}", format_float(stats["mean_coverage_20"]) ) template = template.replace( "{#COV_UNI#}", format_float(stats["coverage_uniformity"], to_percent=True) ) # Parse *.cov file: a tabular file where each row represents an amplicon. # Take only 5th (amplicon name) and 9th (ratio of amplicon covered) column. # Produce ``uncovered_amplicons`` table for amplicons with < 100% coverage (ratio below 1). uncovered_amplicons = [] amplicon_count = 0 with open(args.cov, "r") as tfile: for row in csv.reader(tfile, delimiter="\t"): amplicon_count += 1 cov_ratio = float(row[8]) if cov_ratio < 1: uncovered_amplicons.append( [ escape_latex(row[4]), # Amplicon name "{:.1f}".format( cov_ratio * 100 ), # Amplicon coverage (percentage) ] ) stats["ALL_AMPLICONS"] = amplicon_count stats["COVERED_AMPLICONS"] = amplicon_count - len(uncovered_amplicons) stats["RATIO_COVERED_AMPLICONS"] = ( amplicon_count - len(uncovered_amplicons) ) / amplicon_count template = template.replace("{#ALL_AMPLICONS#}", str(stats["ALL_AMPLICONS"])) template = template.replace( "{#COVERED_AMPLICONS#}", str(stats["COVERED_AMPLICONS"]) ) template = template.replace( "{#PCT_COV#}", format_float(stats["RATIO_COVERED_AMPLICONS"], to_percent=True), ) sentence_text = "" if not uncovered_amplicons: uncovered_amplicons = [["/", "/"]] message = "All amplicons are completely covered with short reads." sentence_text = ( "\\medskip \\noindent \n {{\\boldfont " + message + "}} \n\n \\lightfont \n" ) caption = r"Amplicons with coverage $<$ 100\%" table_text = list_to_tex_table( uncovered_amplicons, header=["Amplicon name", r"\% covered"], caption=caption, ) template = template.replace( "{#BAD_AMPLICON_TABLE#}", sentence_text + table_text ) # Make tables with variants for each variant caller. table_text = "" af_threshold = float(args.afthreshold) header_glossary = {"GEN[0].AD": "AD", "EFF[*].GENE": "GENE", "EFF[*].AA": "AA"} for variant_file in args.annot_vars: # We have multiple (=2) variant files: this are NOT *.vcf files, but tabular files derived from them. # The problem is that their columns (and header names) differ depending on the variant caller tool. COMMON_FIELDS = ["CHROM", "POS", "REF", "ALT", "AF", "DP"] if "gatkhc.finalvars" in variant_file.lower(): header = COMMON_FIELDS + [ "GEN[0].AD", "FS", "EFF[*].GENE", "ID", "EFF[*].AA", ] caption = "GATK HaplotypeCaller variant calls" elif "lf.finalvars" in variant_file.lower(): header = COMMON_FIELDS + ["DP4", "SB", "EFF[*].GENE", "ID", "EFF[*].AA"] caption = "Lofreq variant calls" else: raise ValueError( "This variant caller is not supported for report generation" ) var_table = make_variant_table(variant_file, header, af_threshold) header = [ escape_latex(header_glossary.get(col_name, col_name)) for col_name in header ] long_cls = [ 2, 3, -3, -2, -1, ] # Potentially long columns are REF, ALT, GENE, ID and AA table_text += list_to_tex_table( var_table, header=header, caption=caption, long_columns=long_cls, uncut_columns=[-1], ) table_text += "\n\\newpage\n" template = template.replace("{#VCF_TABLES#}", table_text) # Write template to 'report.tex' template_out.write(template) # Generate PDF: args = ["pdflatex", "-interaction=nonstopmode", "report.tex"] subprocess.call(args) subprocess.check_call(args) with open("stats.txt", "wt") as handle: for key, value in sorted(stats.items()): print("{}\t{}".format(key, value), file=handle) print("Done.")
apache-2.0
isyippee/ceilometer
ceilometer/tests/unit/dns/test_notifications.py
8
3721
# # Copyright (c) 2015 Hewlett Packard Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_utils import timeutils from oslotest import base from ceilometer.dns import notifications from ceilometer import sample NOW = timeutils.utcnow().isoformat() TENANT_ID = u'76538754af6548f5b53cf9af2d35d582' USER_ID = u'b70ece400e4e45c187168c40fa42ff7a' DOMAIN_STATUS = u'ACTIVE' RESOURCE_ID = u'a8b55824-e731-40a3-a32d-de81474d74b2' PUBLISHER_ID = u'central.ubuntu' POOL_ID = u'794ccc2c-d751-44fe-b57f-8894c9f5c842' def _dns_notification_for(operation): return { u'event_type': '%s.domain.%s' % (notifications.SERVICE, operation), u'_context_roles': [u'admin'], u'timestamp': NOW, u'_context_tenant': TENANT_ID, u'payload': { u'status': DOMAIN_STATUS, u'retry': 600, u'description': None, u'expire': 86400, u'deleted': u'0', u'tenant_id': TENANT_ID, u'created_at': u'2015-07-10T20:05:29.870091Z', u'updated_at': None, u'refresh': 3600, u'pool_id': POOL_ID, u'email': u'admin@hpcloud.com', u'minimum': 3600, u'parent_domain_id': None, u'version': 1, u'ttl': 3600, u'action': operation.upper(), u'serial': 1426295326, u'deleted_at': None, u'id': RESOURCE_ID, u'name': u'paas.hpcloud.com.', u'audit_period_beginning': u'2015-07-10T20:05:29.870091Z', u'audit_period_ending': u'2015-07-10T21:05:29.870091Z' }, u'_context_user': USER_ID, u'_context_auth_token': u'b95d4fc3bb2e4a5487cad06af65ffcfc', u'_context_tenant': TENANT_ID, u'priority': u'INFO', u'_context_is_admin': False, u'publisher_id': PUBLISHER_ID, u'message_id': u'67ba0a2a-32bd-4cdf-9bfb-ef9cefcd0f63', } class TestNotification(base.BaseTestCase): def _verify_common_sample(self, actual, operation): self.assertIsNotNone(actual) self.assertEqual('%s.domain.%s' % (notifications.SERVICE, operation), actual.name) self.assertEqual(NOW, actual.timestamp) self.assertEqual(sample.TYPE_CUMULATIVE, actual.type) self.assertEqual(TENANT_ID, actual.project_id) self.assertEqual(RESOURCE_ID, actual.resource_id) self.assertEqual(USER_ID, actual.user_id) metadata = actual.resource_metadata self.assertEqual(PUBLISHER_ID, metadata.get('host')) self.assertEqual(operation.upper(), metadata.get('action')) self.assertEqual(DOMAIN_STATUS, metadata.get('status')) self.assertEqual(3600, actual.volume) self.assertEqual('s', actual.unit) def _test_operation(self, operation): notif = _dns_notification_for(operation) handler = notifications.DomainExists(mock.Mock()) data = list(handler.process_notification(notif)) self.assertEqual(1, len(data)) self._verify_common_sample(data[0], operation) def test_exists(self): self._test_operation('exists')
apache-2.0
supermari0/ironic
ironic/common/paths.py
6
2156
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg path_opts = [ cfg.StrOpt('pybasedir', default=os.path.abspath(os.path.join(os.path.dirname(__file__), '../')), help='Directory where the ironic python module is installed.'), cfg.StrOpt('bindir', default='$pybasedir/bin', help='Directory where ironic binaries are installed.'), cfg.StrOpt('state_path', default='$pybasedir', help="Top-level directory for maintaining ironic's state."), ] CONF = cfg.CONF CONF.register_opts(path_opts) def basedir_def(*args): """Return an uninterpolated path relative to $pybasedir.""" return os.path.join('$pybasedir', *args) def bindir_def(*args): """Return an uninterpolated path relative to $bindir.""" return os.path.join('$bindir', *args) def state_path_def(*args): """Return an uninterpolated path relative to $state_path.""" return os.path.join('$state_path', *args) def basedir_rel(*args): """Return a path relative to $pybasedir.""" return os.path.join(CONF.pybasedir, *args) def bindir_rel(*args): """Return a path relative to $bindir.""" return os.path.join(CONF.bindir, *args) def state_path_rel(*args): """Return a path relative to $state_path.""" return os.path.join(CONF.state_path, *args)
apache-2.0
NeuralEnsemble/neuroConstruct
lib/jython/Lib/test/test_copy.py
14
17931
"""Unit tests for the copy module.""" import copy import copy_reg import unittest from test import test_support class TestCopy(unittest.TestCase): # Attempt full line coverage of copy.py from top to bottom def test_exceptions(self): self.assert_(copy.Error is copy.error) self.assert_(issubclass(copy.Error, Exception)) # The copy() method def test_copy_basic(self): x = 42 y = copy.copy(x) self.assertEqual(x, y) def test_copy_copy(self): class C(object): def __init__(self, foo): self.foo = foo def __copy__(self): return C(self.foo) x = C(42) y = copy.copy(x) self.assertEqual(y.__class__, x.__class__) self.assertEqual(y.foo, x.foo) def test_copy_registry(self): class C(object): def __new__(cls, foo): obj = object.__new__(cls) obj.foo = foo return obj def pickle_C(obj): return (C, (obj.foo,)) x = C(42) self.assertRaises(TypeError, copy.copy, x) copy_reg.pickle(C, pickle_C, C) y = copy.copy(x) def test_copy_reduce_ex(self): class C(object): def __reduce_ex__(self, proto): return "" def __reduce__(self): raise test_support.TestFailed, "shouldn't call this" x = C() y = copy.copy(x) self.assert_(y is x) def test_copy_reduce(self): class C(object): def __reduce__(self): return "" x = C() y = copy.copy(x) self.assert_(y is x) def test_copy_cant(self): class C(object): def __getattribute__(self, name): if name.startswith("__reduce"): raise AttributeError, name return object.__getattribute__(self, name) x = C() self.assertRaises(copy.Error, copy.copy, x) # Type-specific _copy_xxx() methods def test_copy_atomic(self): class Classic: pass class NewStyle(object): pass def f(): pass tests = [None, 42, 2L**100, 3.14, True, False, 1j, "hello", u"hello\u1234", f.func_code, NewStyle, xrange(10), Classic, max] for x in tests: self.assert_(copy.copy(x) is x, repr(x)) def test_copy_list(self): x = [1, 2, 3] self.assertEqual(copy.copy(x), x) def test_copy_tuple(self): x = (1, 2, 3) self.assertEqual(copy.copy(x), x) def test_copy_dict(self): x = {"foo": 1, "bar": 2} self.assertEqual(copy.copy(x), x) def test_copy_inst_vanilla(self): class C: def __init__(self, foo): self.foo = foo def __cmp__(self, other): return cmp(self.foo, other.foo) x = C(42) self.assertEqual(copy.copy(x), x) def test_copy_inst_copy(self): class C: def __init__(self, foo): self.foo = foo def __copy__(self): return C(self.foo) def __cmp__(self, other): return cmp(self.foo, other.foo) x = C(42) self.assertEqual(copy.copy(x), x) def test_copy_inst_getinitargs(self): class C: def __init__(self, foo): self.foo = foo def __getinitargs__(self): return (self.foo,) def __cmp__(self, other): return cmp(self.foo, other.foo) x = C(42) self.assertEqual(copy.copy(x), x) def test_copy_inst_getstate(self): class C: def __init__(self, foo): self.foo = foo def __getstate__(self): return {"foo": self.foo} def __cmp__(self, other): return cmp(self.foo, other.foo) x = C(42) self.assertEqual(copy.copy(x), x) def test_copy_inst_setstate(self): class C: def __init__(self, foo): self.foo = foo def __setstate__(self, state): self.foo = state["foo"] def __cmp__(self, other): return cmp(self.foo, other.foo) x = C(42) self.assertEqual(copy.copy(x), x) def test_copy_inst_getstate_setstate(self): class C: def __init__(self, foo): self.foo = foo def __getstate__(self): return self.foo def __setstate__(self, state): self.foo = state def __cmp__(self, other): return cmp(self.foo, other.foo) x = C(42) self.assertEqual(copy.copy(x), x) # The deepcopy() method def test_deepcopy_basic(self): x = 42 y = copy.deepcopy(x) self.assertEqual(y, x) def test_deepcopy_memo(self): # Tests of reflexive objects are under type-specific sections below. # This tests only repetitions of objects. x = [] x = [x, x] y = copy.deepcopy(x) self.assertEqual(y, x) self.assert_(y is not x) self.assert_(y[0] is not x[0]) self.assert_(y[0] is y[1]) def test_deepcopy_issubclass(self): # XXX Note: there's no way to test the TypeError coming out of # issubclass() -- this can only happen when an extension # module defines a "type" that doesn't formally inherit from # type. class Meta(type): pass class C: __metaclass__ = Meta self.assertEqual(copy.deepcopy(C), C) def test_deepcopy_deepcopy(self): class C(object): def __init__(self, foo): self.foo = foo def __deepcopy__(self, memo=None): return C(self.foo) x = C(42) y = copy.deepcopy(x) self.assertEqual(y.__class__, x.__class__) self.assertEqual(y.foo, x.foo) def test_deepcopy_registry(self): class C(object): def __new__(cls, foo): obj = object.__new__(cls) obj.foo = foo return obj def pickle_C(obj): return (C, (obj.foo,)) x = C(42) self.assertRaises(TypeError, copy.deepcopy, x) copy_reg.pickle(C, pickle_C, C) y = copy.deepcopy(x) def test_deepcopy_reduce_ex(self): class C(object): def __reduce_ex__(self, proto): return "" def __reduce__(self): raise test_support.TestFailed, "shouldn't call this" x = C() y = copy.deepcopy(x) self.assert_(y is x) def test_deepcopy_reduce(self): class C(object): def __reduce__(self): return "" x = C() y = copy.deepcopy(x) self.assert_(y is x) def test_deepcopy_cant(self): class C(object): def __getattribute__(self, name): if name.startswith("__reduce"): raise AttributeError, name return object.__getattribute__(self, name) x = C() self.assertRaises(copy.Error, copy.deepcopy, x) # Type-specific _deepcopy_xxx() methods def test_deepcopy_atomic(self): class Classic: pass class NewStyle(object): pass def f(): pass tests = [None, 42, 2L**100, 3.14, True, False, 1j, "hello", u"hello\u1234", f.func_code, NewStyle, xrange(10), Classic, max] for x in tests: self.assert_(copy.deepcopy(x) is x, repr(x)) def test_deepcopy_list(self): x = [[1, 2], 3] y = copy.deepcopy(x) self.assertEqual(y, x) self.assert_(x is not y) self.assert_(x[0] is not y[0]) # modified for Jython def test_deepcopy_reflexive_list(self): x = [] x.append(x) y = copy.deepcopy(x) self.assertEqual(y, x) self.assert_(y is not x) self.assert_(y[0] is y) self.assertEqual(len(y), 1) def test_deepcopy_tuple(self): x = ([1, 2], 3) y = copy.deepcopy(x) self.assertEqual(y, x) self.assert_(x is not y) self.assert_(x[0] is not y[0]) # modified for Jython def test_deepcopy_reflexive_tuple(self): x = ([],) x[0].append(x) y = copy.deepcopy(x) self.assertEqual(y, x) self.assert_(y is not x) self.assert_(y[0] is not x[0]) self.assert_(y[0][0] is y) def test_deepcopy_dict(self): x = {"foo": [1, 2], "bar": 3} y = copy.deepcopy(x) self.assertEqual(y, x) self.assert_(x is not y) self.assert_(x["foo"] is not y["foo"]) # modified for Jython def test_deepcopy_reflexive_dict(self): x = {} x['foo'] = x y = copy.deepcopy(x) self.assertEqual(y, x) self.assert_(y is not x) self.assert_(y['foo'] is y) self.assertEqual(len(y), 1) def test_deepcopy_keepalive(self): memo = {} x = 42 y = copy.deepcopy(x, memo) self.assert_(memo[id(x)] is x) def test_deepcopy_inst_vanilla(self): class C: def __init__(self, foo): self.foo = foo def __cmp__(self, other): return cmp(self.foo, other.foo) x = C([42]) y = copy.deepcopy(x) self.assertEqual(y, x) self.assert_(y.foo is not x.foo) def test_deepcopy_inst_deepcopy(self): class C: def __init__(self, foo): self.foo = foo def __deepcopy__(self, memo): return C(copy.deepcopy(self.foo, memo)) def __cmp__(self, other): return cmp(self.foo, other.foo) x = C([42]) y = copy.deepcopy(x) self.assertEqual(y, x) self.assert_(y is not x) self.assert_(y.foo is not x.foo) def test_deepcopy_inst_getinitargs(self): class C: def __init__(self, foo): self.foo = foo def __getinitargs__(self): return (self.foo,) def __cmp__(self, other): return cmp(self.foo, other.foo) x = C([42]) y = copy.deepcopy(x) self.assertEqual(y, x) self.assert_(y is not x) self.assert_(y.foo is not x.foo) def test_deepcopy_inst_getstate(self): class C: def __init__(self, foo): self.foo = foo def __getstate__(self): return {"foo": self.foo} def __cmp__(self, other): return cmp(self.foo, other.foo) x = C([42]) y = copy.deepcopy(x) self.assertEqual(y, x) self.assert_(y is not x) self.assert_(y.foo is not x.foo) def test_deepcopy_inst_setstate(self): class C: def __init__(self, foo): self.foo = foo def __setstate__(self, state): self.foo = state["foo"] def __cmp__(self, other): return cmp(self.foo, other.foo) x = C([42]) y = copy.deepcopy(x) self.assertEqual(y, x) self.assert_(y is not x) self.assert_(y.foo is not x.foo) def test_deepcopy_inst_getstate_setstate(self): class C: def __init__(self, foo): self.foo = foo def __getstate__(self): return self.foo def __setstate__(self, state): self.foo = state def __cmp__(self, other): return cmp(self.foo, other.foo) x = C([42]) y = copy.deepcopy(x) self.assertEqual(y, x) self.assert_(y is not x) self.assert_(y.foo is not x.foo) def test_deepcopy_reflexive_inst(self): class C: pass x = C() x.foo = x y = copy.deepcopy(x) self.assert_(y is not x) self.assert_(y.foo is y) # _reconstruct() def test_reconstruct_string(self): class C(object): def __reduce__(self): return "" x = C() y = copy.copy(x) self.assert_(y is x) y = copy.deepcopy(x) self.assert_(y is x) def test_reconstruct_nostate(self): class C(object): def __reduce__(self): return (C, ()) x = C() x.foo = 42 y = copy.copy(x) self.assert_(y.__class__ is x.__class__) y = copy.deepcopy(x) self.assert_(y.__class__ is x.__class__) def test_reconstruct_state(self): class C(object): def __reduce__(self): return (C, (), self.__dict__) def __cmp__(self, other): return cmp(self.__dict__, other.__dict__) __hash__ = None # Silence Py3k warning x = C() x.foo = [42] y = copy.copy(x) self.assertEqual(y, x) y = copy.deepcopy(x) self.assertEqual(y, x) self.assert_(y.foo is not x.foo) def test_reconstruct_state_setstate(self): class C(object): def __reduce__(self): return (C, (), self.__dict__) def __setstate__(self, state): self.__dict__.update(state) def __cmp__(self, other): return cmp(self.__dict__, other.__dict__) __hash__ = None # Silence Py3k warning x = C() x.foo = [42] y = copy.copy(x) self.assertEqual(y, x) y = copy.deepcopy(x) self.assertEqual(y, x) self.assert_(y.foo is not x.foo) def test_reconstruct_reflexive(self): class C(object): pass x = C() x.foo = x y = copy.deepcopy(x) self.assert_(y is not x) self.assert_(y.foo is y) # Additions for Python 2.3 and pickle protocol 2 def test_reduce_4tuple(self): class C(list): def __reduce__(self): return (C, (), self.__dict__, iter(self)) def __cmp__(self, other): return (cmp(list(self), list(other)) or cmp(self.__dict__, other.__dict__)) __hash__ = None # Silence Py3k warning x = C([[1, 2], 3]) y = copy.copy(x) self.assertEqual(x, y) self.assert_(x is not y) self.assert_(x[0] is y[0]) y = copy.deepcopy(x) self.assertEqual(x, y) self.assert_(x is not y) self.assert_(x[0] is not y[0]) def test_reduce_5tuple(self): class C(dict): def __reduce__(self): return (C, (), self.__dict__, None, self.iteritems()) def __cmp__(self, other): return (cmp(dict(self), list(dict)) or cmp(self.__dict__, other.__dict__)) __hash__ = None # Silence Py3k warning x = C([("foo", [1, 2]), ("bar", 3)]) y = copy.copy(x) self.assertEqual(x, y) self.assert_(x is not y) self.assert_(x["foo"] is y["foo"]) y = copy.deepcopy(x) self.assertEqual(x, y) self.assert_(x is not y) self.assert_(x["foo"] is not y["foo"]) def test_copy_slots(self): class C(object): __slots__ = ["foo"] x = C() x.foo = [42] y = copy.copy(x) self.assert_(x.foo is y.foo) def test_deepcopy_slots(self): class C(object): __slots__ = ["foo"] x = C() x.foo = [42] y = copy.deepcopy(x) self.assertEqual(x.foo, y.foo) self.assert_(x.foo is not y.foo) def test_copy_list_subclass(self): class C(list): pass x = C([[1, 2], 3]) x.foo = [4, 5] y = copy.copy(x) self.assertEqual(list(x), list(y)) self.assertEqual(x.foo, y.foo) self.assert_(x[0] is y[0]) self.assert_(x.foo is y.foo) def test_deepcopy_list_subclass(self): class C(list): pass x = C([[1, 2], 3]) x.foo = [4, 5] y = copy.deepcopy(x) self.assertEqual(list(x), list(y)) self.assertEqual(x.foo, y.foo) self.assert_(x[0] is not y[0]) self.assert_(x.foo is not y.foo) def test_copy_tuple_subclass(self): class C(tuple): pass x = C([1, 2, 3]) self.assertEqual(tuple(x), (1, 2, 3)) y = copy.copy(x) self.assertEqual(tuple(y), (1, 2, 3)) def test_deepcopy_tuple_subclass(self): class C(tuple): pass x = C([[1, 2], 3]) self.assertEqual(tuple(x), ([1, 2], 3)) y = copy.deepcopy(x) self.assertEqual(tuple(y), ([1, 2], 3)) self.assert_(x is not y) self.assert_(x[0] is not y[0]) def test_getstate_exc(self): class EvilState(object): def __getstate__(self): raise ValueError, "ain't got no stickin' state" self.assertRaises(ValueError, copy.copy, EvilState()) def test_copy_function(self): self.assertEqual(copy.copy(global_foo), global_foo) def foo(x, y): return x+y self.assertEqual(copy.copy(foo), foo) bar = lambda: None self.assertEqual(copy.copy(bar), bar) def test_deepcopy_function(self): self.assertEqual(copy.deepcopy(global_foo), global_foo) def foo(x, y): return x+y self.assertEqual(copy.deepcopy(foo), foo) bar = lambda: None self.assertEqual(copy.deepcopy(bar), bar) def global_foo(x, y): return x+y def test_main(): test_support.run_unittest(TestCopy) if __name__ == "__main__": test_main()
gpl-2.0
vmax-feihu/hue
desktop/core/ext-py/Django-1.6.10/django/contrib/gis/tests/utils.py
114
1698
from django.conf import settings from django.db import DEFAULT_DB_ALIAS # function that will pass a test. def pass_test(*args): return def no_backend(test_func, backend): "Use this decorator to disable test on specified backend." if settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1] == backend: return pass_test else: return test_func # Decorators to disable entire test functions for specific # spatial backends. def no_oracle(func): return no_backend(func, 'oracle') def no_postgis(func): return no_backend(func, 'postgis') def no_mysql(func): return no_backend(func, 'mysql') def no_spatialite(func): return no_backend(func, 'spatialite') # Shortcut booleans to omit only portions of tests. _default_db = settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE'].rsplit('.')[-1] oracle = _default_db == 'oracle' postgis = _default_db == 'postgis' mysql = _default_db == 'mysql' spatialite = _default_db == 'spatialite' HAS_SPATIALREFSYS = True if oracle and 'gis' in settings.DATABASES[DEFAULT_DB_ALIAS]['ENGINE']: from django.contrib.gis.db.backends.oracle.models import SpatialRefSys elif postgis: from django.contrib.gis.db.backends.postgis.models import SpatialRefSys elif spatialite: from django.contrib.gis.db.backends.spatialite.models import SpatialRefSys else: HAS_SPATIALREFSYS = False SpatialRefSys = None def has_spatial_db(): # All databases must have spatial backends to run GeoDjango tests. spatial_dbs = [name for name, db_dict in settings.DATABASES.items() if db_dict['ENGINE'].startswith('django.contrib.gis')] return len(spatial_dbs) == len(settings.DATABASES) HAS_SPATIAL_DB = has_spatial_db()
apache-2.0
tcpcloud/openvstorage
ovs/dal/hybrids/t_testemachine.py
1
1117
# Copyright 2014 Open vStorage NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TestEMachine module """ from ovs.dal.structures import Property, Relation from ovs.dal.hybrids.t_testmachine import TestMachine from ovs.dal.hybrids.t_testdisk import TestDisk class TestEMachine(TestMachine): """ This ExtendedDisk object is used for running unittests. WARNING: These properties should not be changed """ __properties = [Property('extended', str, mandatory=False, doc='Extended property')] __relations = [Relation('the_disk', TestDisk, 'the_machines', mandatory=False)] __dynamics = []
apache-2.0
nongxiaoming/rt-thread
bsp/mm32l3xx/rtconfig.py
11
3674
# BSP Note: For TI EK-TM4C1294XL Tiva C Series Connected LancuhPad (REV D) import os import sys # toolchains options CROSS_TOOL = 'gcc' if os.getenv('RTT_CC'): CROSS_TOOL = os.getenv('RTT_CC') # device options ARCH = 'arm' CPU = 'cortex-m3' # cross_tool provides the cross compiler # EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR if CROSS_TOOL == 'gcc': PLATFORM = 'gcc' EXEC_PATH = '/Users/zhangyihong/.env/gcc-arm-none-eabi-5_4-2016q3/bin' elif CROSS_TOOL == 'keil': PLATFORM = 'armcc' EXEC_PATH = 'C:/Keil_v5' elif CROSS_TOOL == 'iar': PLATFORM = 'iar' EXEC_PATH = 'C:/Program Files (x86)/IAR Systems/Embedded Workbench 7.2' if os.getenv('RTT_EXEC_PATH'): EXEC_PATH = os.getenv('RTT_EXEC_PATH') BUILD = 'debug' #BUILD = 'release' if PLATFORM == 'gcc': PREFIX = 'arm-none-eabi-' CC = PREFIX + 'gcc' CXX = PREFIX + 'g++' AS = PREFIX + 'gcc' AR = PREFIX + 'ar' LINK = PREFIX + 'gcc' TARGET_EXT = 'elf' SIZE = PREFIX + 'size' OBJDUMP = PREFIX + 'objdump' OBJCPY = PREFIX + 'objcopy' DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections -Wall' CFLAGS = DEVICE + ' -std=c99' AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb ' LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread.map,-cref,-u,Reset_Handler -T drivers/linker_scripts/link.lds' CPATH = '' LPATH = '' if BUILD == 'debug': CFLAGS += ' -O0 -gdwarf-2 -g' AFLAGS += ' -gdwarf-2' else: CFLAGS += ' -O2' POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n' elif PLATFORM == 'armcc': # toolchains CC = 'armcc' AS = 'armasm' AR = 'armar' LINK = 'armlink' TARGET_EXT = 'axf' DEVICE = ' --cpu ' + CPU CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99' AFLAGS = DEVICE + ' --apcs=interwork ' LFLAGS = DEVICE + ' --scatter "drivers/linker_scripts/link.sct" --info sizes --info totals --info unused --info veneers --list rtthread.map --strict' CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/INC' LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/ARMCC/LIB' CFLAGS += ' -D__MICROLIB ' AFLAGS += ' --pd "__MICROLIB SETA 1" ' LFLAGS += ' --library_type=microlib ' EXEC_PATH += '/arm/armcc/bin/' if BUILD == 'debug': CFLAGS += ' -g -O0' AFLAGS += ' -g' else: CFLAGS += ' -O2' POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET' elif PLATFORM == 'iar': # toolchains CC = 'iccarm' AS = 'iasmarm' AR = 'iarchive' LINK = 'ilinkarm' TARGET_EXT = 'out' DEVICE = '-Dewarm' # + ' -D' + PART_TYPE CFLAGS = DEVICE CFLAGS += ' --diag_suppress Pa050' CFLAGS += ' --no_cse' CFLAGS += ' --no_unroll' CFLAGS += ' --no_inline' CFLAGS += ' --no_code_motion' CFLAGS += ' --no_tbaa' CFLAGS += ' --no_clustering' CFLAGS += ' --no_scheduling' CFLAGS += ' --endian=little' CFLAGS += ' --cpu=Cortex-M3' CFLAGS += ' -e' CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"' CFLAGS += ' --silent' AFLAGS = DEVICE AFLAGS += ' -s+' AFLAGS += ' -w+' AFLAGS += ' -r' AFLAGS += ' --cpu ' + CPU AFLAGS += ' -S' if BUILD == 'debug': CFLAGS += ' --debug' CFLAGS += ' -On' else: CFLAGS += ' -Oh' LFLAGS = ' --config "drivers/linker_scripts/link.icf"' LFLAGS += ' --entry __iar_program_start' #LFLAGS += ' --silent' EXEC_PATH = EXEC_PATH + '/arm/bin/' POST_ACTION = ''
apache-2.0
AutorestCI/azure-sdk-for-python
azure-mgmt-network/azure/mgmt/network/v2016_12_01/models/effective_network_security_group.py
1
1777
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class EffectiveNetworkSecurityGroup(Model): """Effective network security group. :param network_security_group: The ID of network security group that is applied. :type network_security_group: ~azure.mgmt.network.v2016_12_01.models.SubResource :param association: :type association: ~azure.mgmt.network.v2016_12_01.models.EffectiveNetworkSecurityGroupAssociation :param effective_security_rules: A collection of effective security rules. :type effective_security_rules: list[~azure.mgmt.network.v2016_12_01.models.EffectiveNetworkSecurityRule] """ _attribute_map = { 'network_security_group': {'key': 'networkSecurityGroup', 'type': 'SubResource'}, 'association': {'key': 'association', 'type': 'EffectiveNetworkSecurityGroupAssociation'}, 'effective_security_rules': {'key': 'effectiveSecurityRules', 'type': '[EffectiveNetworkSecurityRule]'}, } def __init__(self, network_security_group=None, association=None, effective_security_rules=None): super(EffectiveNetworkSecurityGroup, self).__init__() self.network_security_group = network_security_group self.association = association self.effective_security_rules = effective_security_rules
mit
neurosnap/modular-file-storage
mfs/__init__.py
1
1644
""" Abstract interface for multiple file storage services""" from tempfile import TemporaryFile def loader(backend, *args, **kwargs): """ Main entry point for all backends :param backend: File storage class that will be used for this instance """ return backend(*args, **kwargs) def detector(): """ Finds all subclasses of mfs.BaseStorage """ return BaseStorage.__subclasses__() def saver(self, fname, filepointer): """ Saves the file from a so called file-like object :param fname: Where the location and name of the file will be saved to :param filepointer: File-like object of the file to be saved """ with open(fname, "wb") as fp: fp.write(filepointer.read()) return fname class BaseStorage(object): """ Base class that all file storage interfaces must inherit """ def __init__(self, key, secret, **kwargs): """ Most file storage services require a key and a secret code :param key: Key supplied by file storage service :param secret: Secret supplied by file storage service """ self._key = key self._secret = secret if self._connect: self._connect(**kwargs) else: raise AttributeError(str(self) + " needs a `_connect` method") if not self.downloader: raise AttributeError(str(self) + " needs a `downloader` method to download files") if not self.put: raise AttributeError(str(self) + " needs a `put` method to upload files") def get(self, *args, **kwargs): """ Wrapper for downloading """ contents = self.downloader(self, *args, **kwargs) fp = TemporaryFile() fp.write(contents) fp.flush() fp.seek(0) return fp def __str__(self): return str(self.__class__)
mit
GustavePate/lycheesync
lycheesync/sync.py
1
4223
#!/usr/bin/python # -*- coding: utf-8 -*- from __future__ import print_function # from __future__ import unicode_literals from lycheesync.lycheesyncer import LycheeSyncer from lycheesync.update_scripts import inf_to_lychee_2_6_2 import logging.config import click import os import sys import pwd import grp from lycheesync.utils.boilerplatecode import script_init logger = logging.getLogger(__name__) @click.command() @click.option('-v', '--verbose', is_flag=True, help='Program verbosity.') @click.option('-n', '--normal', 'exclusive_mode', flag_value='normal', default=True, help='normal mode exclusive with replace and delete mode') @click.option('-r', '--replace', 'exclusive_mode', flag_value='replace', default=False, help='delete mode exclusive with replace mode and normal') @click.option('-d', '--dropdb', 'exclusive_mode', flag_value='delete', default=False, help='delete mode exclusive with replace and normal mode') @click.option('-s', '--sort_album_by_name', is_flag=True, help='Sort album by name') @click.option('-c', '--sanitycheck', is_flag=True, help='Sort album by name') @click.option('-l', '--link', is_flag=True, help="Don't copy files create link instead") @click.option('-u26', '--updatedb26', is_flag=True, help="Update lycheesync added data in lychee db to the lychee 2.6.2 required values") @click.argument('imagedirpath', metavar='PHOTO_DIRECTORY_ROOT', type=click.Path(exists=True, resolve_path=True)) @click.argument('lycheepath', metavar='PATH_TO_LYCHEE_INSTALL', type=click.Path(exists=True, resolve_path=True)) @click.argument('confpath', metavar='PATH_TO_YOUR_CONFIG_FILE', type=click.Path(exists=True, resolve_path=True)) # checks file existence and attributes # @click.argument('file2', type=click.Path(exists=True, file_okay=True, dir_okay=False, writable=False, readable=True, resolve_path=True)) def main(verbose, exclusive_mode, sort_album_by_name, sanitycheck, link, updatedb26, imagedirpath, lycheepath, confpath): """Lycheesync A script to synchronize any directory containing photos with Lychee. Source directory should be on the same host than Lychee's """ if sys.version_info.major == 2: imagedirpath = imagedirpath.decode('UTF-8') lycheepath = lycheepath.decode('UTF-8') confpath = confpath.decode('UTF-8') conf_data = {} conf_data['verbose'] = verbose conf_data["srcdir"] = imagedirpath conf_data["lycheepath"] = lycheepath conf_data['confpath'] = confpath conf_data["dropdb"] = False conf_data["replace"] = False if exclusive_mode == "delete": conf_data["dropdb"] = True elif exclusive_mode == "replace": conf_data["replace"] = True conf_data["user"] = None conf_data["group"] = None conf_data["uid"] = None conf_data["gid"] = None conf_data["sort"] = sort_album_by_name if sanitycheck: logger.info("!!!!!!!!!!!!!!!! SANITY ON") else: logger.info("!!!!!!!!!!!!!!!! SANITY OFF") conf_data["sanity"] = sanitycheck conf_data["link"] = link # if conf_data["dropdb"]: # conf_data["sort"] = True # read permission of the lycheepath directory to apply it to the uploade photos img_path = os.path.join(conf_data["lycheepath"], "uploads", "big") stat_info = os.stat(img_path) uid = stat_info.st_uid gid = stat_info.st_gid user = pwd.getpwuid(uid)[0] group = grp.getgrgid(gid)[0] conf_data["user"] = user conf_data["group"] = group conf_data["uid"] = uid conf_data["gid"] = gid script_init(conf_data) # DB update if updatedb26: inf_to_lychee_2_6_2.updatedb(conf_data) logger.info("=================== start adding to lychee ==================") try: # DELEGATE WORK TO LYCHEESYNCER s = LycheeSyncer() s.sync() except Exception: logger.exception('Failed to run batch') logger.error("=================== script ended with errors ==================") else: logger.info("=================== script successfully ended ==================") if __name__ == '__main__': main()
mit
seanwestfall/django
tests/file_storage/tests.py
199
31638
# -*- coding: utf-8 -*- from __future__ import unicode_literals import errno import os import shutil import sys import tempfile import threading import time import unittest import warnings from datetime import datetime, timedelta from django.core.cache import cache from django.core.exceptions import SuspiciousFileOperation, SuspiciousOperation from django.core.files.base import ContentFile, File from django.core.files.storage import FileSystemStorage, get_storage_class from django.core.files.uploadedfile import ( InMemoryUploadedFile, SimpleUploadedFile, TemporaryUploadedFile, ) from django.test import ( LiveServerTestCase, SimpleTestCase, TestCase, override_settings, ) from django.utils import six from django.utils._os import upath from django.utils.six.moves.urllib.request import urlopen from .models import Storage, temp_storage, temp_storage_location FILE_SUFFIX_REGEX = '[A-Za-z0-9]{7}' class GetStorageClassTests(SimpleTestCase): def test_get_filesystem_storage(self): """ get_storage_class returns the class for a storage backend name/path. """ self.assertEqual( get_storage_class('django.core.files.storage.FileSystemStorage'), FileSystemStorage) def test_get_invalid_storage_module(self): """ get_storage_class raises an error if the requested import don't exist. """ with six.assertRaisesRegex(self, ImportError, "No module named '?storage'?"): get_storage_class('storage.NonExistingStorage') def test_get_nonexisting_storage_class(self): """ get_storage_class raises an error if the requested class don't exist. """ self.assertRaises(ImportError, get_storage_class, 'django.core.files.storage.NonExistingStorage') def test_get_nonexisting_storage_module(self): """ get_storage_class raises an error if the requested module don't exist. """ # Error message may or may not be the fully qualified path. with six.assertRaisesRegex(self, ImportError, "No module named '?(django.core.files.)?non_existing_storage'?"): get_storage_class( 'django.core.files.non_existing_storage.NonExistingStorage') class FileStorageDeconstructionTests(unittest.TestCase): def test_deconstruction(self): path, args, kwargs = temp_storage.deconstruct() self.assertEqual(path, "django.core.files.storage.FileSystemStorage") self.assertEqual(args, tuple()) self.assertEqual(kwargs, {'location': temp_storage_location}) kwargs_orig = { 'location': temp_storage_location, 'base_url': 'http://myfiles.example.com/' } storage = FileSystemStorage(**kwargs_orig) path, args, kwargs = storage.deconstruct() self.assertEqual(kwargs, kwargs_orig) class FileStorageTests(unittest.TestCase): storage_class = FileSystemStorage def setUp(self): self.temp_dir = tempfile.mkdtemp() self.storage = self.storage_class(location=self.temp_dir, base_url='/test_media_url/') # Set up a second temporary directory which is ensured to have a mixed # case name. self.temp_dir2 = tempfile.mkdtemp(suffix='aBc') def tearDown(self): shutil.rmtree(self.temp_dir) shutil.rmtree(self.temp_dir2) def test_empty_location(self): """ Makes sure an exception is raised if the location is empty """ storage = self.storage_class(location='') self.assertEqual(storage.base_location, '') self.assertEqual(storage.location, upath(os.getcwd())) def test_file_access_options(self): """ Standard file access options are available, and work as expected. """ self.assertFalse(self.storage.exists('storage_test')) f = self.storage.open('storage_test', 'w') f.write('storage contents') f.close() self.assertTrue(self.storage.exists('storage_test')) f = self.storage.open('storage_test', 'r') self.assertEqual(f.read(), 'storage contents') f.close() self.storage.delete('storage_test') self.assertFalse(self.storage.exists('storage_test')) def test_file_accessed_time(self): """ File storage returns a Datetime object for the last accessed time of a file. """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f_name = self.storage.save('test.file', f) atime = self.storage.accessed_time(f_name) self.assertEqual(atime, datetime.fromtimestamp( os.path.getatime(self.storage.path(f_name)))) self.assertLess(datetime.now() - self.storage.accessed_time(f_name), timedelta(seconds=2)) self.storage.delete(f_name) def test_file_created_time(self): """ File storage returns a Datetime object for the creation time of a file. """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f_name = self.storage.save('test.file', f) ctime = self.storage.created_time(f_name) self.assertEqual(ctime, datetime.fromtimestamp( os.path.getctime(self.storage.path(f_name)))) self.assertLess(datetime.now() - self.storage.created_time(f_name), timedelta(seconds=2)) self.storage.delete(f_name) def test_file_modified_time(self): """ File storage returns a Datetime object for the last modified time of a file. """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f_name = self.storage.save('test.file', f) mtime = self.storage.modified_time(f_name) self.assertEqual(mtime, datetime.fromtimestamp( os.path.getmtime(self.storage.path(f_name)))) self.assertLess(datetime.now() - self.storage.modified_time(f_name), timedelta(seconds=2)) self.storage.delete(f_name) def test_file_save_without_name(self): """ File storage extracts the filename from the content object if no name is given explicitly. """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f.name = 'test.file' storage_f_name = self.storage.save(None, f) self.assertEqual(storage_f_name, f.name) self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name))) self.storage.delete(storage_f_name) def test_file_save_with_path(self): """ Saving a pathname should create intermediate directories as necessary. """ self.assertFalse(self.storage.exists('path/to')) self.storage.save('path/to/test.file', ContentFile('file saved with path')) self.assertTrue(self.storage.exists('path/to')) with self.storage.open('path/to/test.file') as f: self.assertEqual(f.read(), b'file saved with path') self.assertTrue(os.path.exists( os.path.join(self.temp_dir, 'path', 'to', 'test.file'))) self.storage.delete('path/to/test.file') def test_save_doesnt_close(self): with TemporaryUploadedFile('test', 'text/plain', 1, 'utf8') as file: file.write(b'1') file.seek(0) self.assertFalse(file.closed) self.storage.save('path/to/test.file', file) self.assertFalse(file.closed) self.assertFalse(file.file.closed) file = InMemoryUploadedFile(six.StringIO('1'), '', 'test', 'text/plain', 1, 'utf8') with file: self.assertFalse(file.closed) self.storage.save('path/to/test.file', file) self.assertFalse(file.closed) self.assertFalse(file.file.closed) def test_file_path(self): """ File storage returns the full path of a file """ self.assertFalse(self.storage.exists('test.file')) f = ContentFile('custom contents') f_name = self.storage.save('test.file', f) self.assertEqual(self.storage.path(f_name), os.path.join(self.temp_dir, f_name)) self.storage.delete(f_name) def test_file_url(self): """ File storage returns a url to access a given file from the Web. """ self.assertEqual(self.storage.url('test.file'), '%s%s' % (self.storage.base_url, 'test.file')) # should encode special chars except ~!*()' # like encodeURIComponent() JavaScript function do self.assertEqual(self.storage.url(r"""~!*()'@#$%^&*abc`+ =.file"""), """/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file""") # should translate os path separator(s) to the url path separator self.assertEqual(self.storage.url("""a/b\\c.file"""), """/test_media_url/a/b/c.file""") self.storage.base_url = None self.assertRaises(ValueError, self.storage.url, 'test.file') # #22717: missing ending slash in base_url should be auto-corrected storage = self.storage_class(location=self.temp_dir, base_url='/no_ending_slash') self.assertEqual( storage.url('test.file'), '%s%s' % (storage.base_url, 'test.file') ) def test_listdir(self): """ File storage returns a tuple containing directories and files. """ self.assertFalse(self.storage.exists('storage_test_1')) self.assertFalse(self.storage.exists('storage_test_2')) self.assertFalse(self.storage.exists('storage_dir_1')) self.storage.save('storage_test_1', ContentFile('custom content')) self.storage.save('storage_test_2', ContentFile('custom content')) os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1')) dirs, files = self.storage.listdir('') self.assertEqual(set(dirs), {'storage_dir_1'}) self.assertEqual(set(files), {'storage_test_1', 'storage_test_2'}) self.storage.delete('storage_test_1') self.storage.delete('storage_test_2') os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1')) def test_file_storage_prevents_directory_traversal(self): """ File storage prevents directory traversal (files can only be accessed if they're below the storage location). """ self.assertRaises(SuspiciousOperation, self.storage.exists, '..') self.assertRaises(SuspiciousOperation, self.storage.exists, '/etc/passwd') def test_file_storage_preserves_filename_case(self): """The storage backend should preserve case of filenames.""" # Create a storage backend associated with the mixed case name # directory. other_temp_storage = self.storage_class(location=self.temp_dir2) # Ask that storage backend to store a file with a mixed case filename. mixed_case = 'CaSe_SeNsItIvE' file = other_temp_storage.open(mixed_case, 'w') file.write('storage contents') file.close() self.assertEqual(os.path.join(self.temp_dir2, mixed_case), other_temp_storage.path(mixed_case)) other_temp_storage.delete(mixed_case) def test_makedirs_race_handling(self): """ File storage should be robust against directory creation race conditions. """ real_makedirs = os.makedirs # Monkey-patch os.makedirs, to simulate a normal call, a raced call, # and an error. def fake_makedirs(path): if path == os.path.join(self.temp_dir, 'normal'): real_makedirs(path) elif path == os.path.join(self.temp_dir, 'raced'): real_makedirs(path) raise OSError(errno.EEXIST, 'simulated EEXIST') elif path == os.path.join(self.temp_dir, 'error'): raise OSError(errno.EACCES, 'simulated EACCES') else: self.fail('unexpected argument %r' % path) try: os.makedirs = fake_makedirs self.storage.save('normal/test.file', ContentFile('saved normally')) with self.storage.open('normal/test.file') as f: self.assertEqual(f.read(), b'saved normally') self.storage.save('raced/test.file', ContentFile('saved with race')) with self.storage.open('raced/test.file') as f: self.assertEqual(f.read(), b'saved with race') # Check that OSErrors aside from EEXIST are still raised. self.assertRaises(OSError, self.storage.save, 'error/test.file', ContentFile('not saved')) finally: os.makedirs = real_makedirs def test_remove_race_handling(self): """ File storage should be robust against file removal race conditions. """ real_remove = os.remove # Monkey-patch os.remove, to simulate a normal call, a raced call, # and an error. def fake_remove(path): if path == os.path.join(self.temp_dir, 'normal.file'): real_remove(path) elif path == os.path.join(self.temp_dir, 'raced.file'): real_remove(path) raise OSError(errno.ENOENT, 'simulated ENOENT') elif path == os.path.join(self.temp_dir, 'error.file'): raise OSError(errno.EACCES, 'simulated EACCES') else: self.fail('unexpected argument %r' % path) try: os.remove = fake_remove self.storage.save('normal.file', ContentFile('delete normally')) self.storage.delete('normal.file') self.assertFalse(self.storage.exists('normal.file')) self.storage.save('raced.file', ContentFile('delete with race')) self.storage.delete('raced.file') self.assertFalse(self.storage.exists('normal.file')) # Check that OSErrors aside from ENOENT are still raised. self.storage.save('error.file', ContentFile('delete with error')) self.assertRaises(OSError, self.storage.delete, 'error.file') finally: os.remove = real_remove def test_file_chunks_error(self): """ Test behavior when file.chunks() is raising an error """ f1 = ContentFile('chunks fails') def failing_chunks(): raise IOError f1.chunks = failing_chunks with self.assertRaises(IOError): self.storage.save('error.file', f1) def test_delete_no_name(self): """ Calling delete with an empty name should not try to remove the base storage directory, but fail loudly (#20660). """ with self.assertRaises(AssertionError): self.storage.delete('') class CustomStorage(FileSystemStorage): def get_available_name(self, name, max_length=None): """ Append numbers to duplicate files rather than underscores, like Trac. """ parts = name.split('.') basename, ext = parts[0], parts[1:] number = 2 while self.exists(name): name = '.'.join([basename, str(number)] + ext) number += 1 return name class CustomStorageTests(FileStorageTests): storage_class = CustomStorage def test_custom_get_available_name(self): first = self.storage.save('custom_storage', ContentFile('custom contents')) self.assertEqual(first, 'custom_storage') second = self.storage.save('custom_storage', ContentFile('more contents')) self.assertEqual(second, 'custom_storage.2') self.storage.delete(first) self.storage.delete(second) class FileFieldStorageTests(TestCase): def tearDown(self): shutil.rmtree(temp_storage_location) def _storage_max_filename_length(self, storage): """ Query filesystem for maximum filename length (e.g. AUFS has 242). """ dir_to_test = storage.location while not os.path.exists(dir_to_test): dir_to_test = os.path.dirname(dir_to_test) try: return os.pathconf(dir_to_test, 'PC_NAME_MAX') except Exception: return 255 # Should be safe on most backends def test_files(self): # Attempting to access a FileField from the class raises a descriptive # error self.assertRaises(AttributeError, lambda: Storage.normal) # An object without a file has limited functionality. obj1 = Storage() self.assertEqual(obj1.normal.name, "") self.assertRaises(ValueError, lambda: obj1.normal.size) # Saving a file enables full functionality. obj1.normal.save("django_test.txt", ContentFile("content")) self.assertEqual(obj1.normal.name, "tests/django_test.txt") self.assertEqual(obj1.normal.size, 7) self.assertEqual(obj1.normal.read(), b"content") obj1.normal.close() # File objects can be assigned to FileField attributes, but shouldn't # get committed until the model it's attached to is saved. obj1.normal = SimpleUploadedFile("assignment.txt", b"content") dirs, files = temp_storage.listdir("tests") self.assertEqual(dirs, []) self.assertNotIn("assignment.txt", files) obj1.save() dirs, files = temp_storage.listdir("tests") self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"]) # Save another file with the same name. obj2 = Storage() obj2.normal.save("django_test.txt", ContentFile("more content")) obj2_name = obj2.normal.name six.assertRegex(self, obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX) self.assertEqual(obj2.normal.size, 12) obj2.normal.close() # Deleting an object does not delete the file it uses. obj2.delete() obj2.normal.save("django_test.txt", ContentFile("more content")) self.assertNotEqual(obj2_name, obj2.normal.name) six.assertRegex(self, obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX) obj2.normal.close() def test_filefield_read(self): # Files can be read in a little at a time, if necessary. obj = Storage.objects.create( normal=SimpleUploadedFile("assignment.txt", b"content")) obj.normal.open() self.assertEqual(obj.normal.read(3), b"con") self.assertEqual(obj.normal.read(), b"tent") self.assertEqual(list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"]) obj.normal.close() def test_duplicate_filename(self): # Multiple files with the same name get _(7 random chars) appended to them. objs = [Storage() for i in range(2)] for o in objs: o.normal.save("multiple_files.txt", ContentFile("Same Content")) try: names = [o.normal.name for o in objs] self.assertEqual(names[0], "tests/multiple_files.txt") six.assertRegex(self, names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX) finally: for o in objs: o.delete() def test_file_truncation(self): # Given the max_length is limited, when multiple files get uploaded # under the same name, then the filename get truncated in order to fit # in _(7 random chars). When most of the max_length is taken by # dirname + extension and there are not enough characters in the # filename to truncate, an exception should be raised. objs = [Storage() for i in range(2)] filename = 'filename.ext' for o in objs: o.limited_length.save(filename, ContentFile('Same Content')) try: # Testing truncation. names = [o.limited_length.name for o in objs] self.assertEqual(names[0], 'tests/%s' % filename) six.assertRegex(self, names[1], 'tests/fi_%s.ext' % FILE_SUFFIX_REGEX) # Testing exception is raised when filename is too short to truncate. filename = 'short.longext' objs[0].limited_length.save(filename, ContentFile('Same Content')) self.assertRaisesMessage( SuspiciousFileOperation, 'Storage can not find an available filename', objs[1].limited_length.save, *(filename, ContentFile('Same Content')) ) finally: for o in objs: o.delete() @unittest.skipIf( sys.platform.startswith('win'), "Windows supports at most 260 characters in a path.", ) def test_extended_length_storage(self): # Testing FileField with max_length > 255. Most systems have filename # length limitation of 255. Path takes extra chars. filename = (self._storage_max_filename_length(temp_storage) - 4) * 'a' # 4 chars for extension. obj = Storage() obj.extended_length.save('%s.txt' % filename, ContentFile('Same Content')) self.assertEqual(obj.extended_length.name, 'tests/%s.txt' % filename) self.assertEqual(obj.extended_length.read(), b'Same Content') obj.extended_length.close() def test_old_style_storage(self): # Testing backward-compatibility with old-style storage backends that # don't take ``max_length`` parameter in ``get_available_name()`` # and save(). A deprecation warning should be raised. obj = Storage() with warnings.catch_warnings(record=True) as warns: warnings.simplefilter('always') obj.old_style.save('deprecated_storage_test.txt', ContentFile('Same Content')) self.assertEqual(len(warns), 2) self.assertEqual( str(warns[0].message), 'Backwards compatibility for storage backends without support for ' 'the `max_length` argument in Storage.save() will be removed in ' 'Django 1.10.' ) self.assertEqual( str(warns[1].message), 'Backwards compatibility for storage backends without support for ' 'the `max_length` argument in Storage.get_available_name() will ' 'be removed in Django 1.10.' ) self.assertEqual(obj.old_style.name, 'tests/deprecated_storage_test.txt') self.assertEqual(obj.old_style.read(), b'Same Content') obj.old_style.close() def test_filefield_default(self): # Default values allow an object to access a single file. temp_storage.save('tests/default.txt', ContentFile('default content')) obj = Storage.objects.create() self.assertEqual(obj.default.name, "tests/default.txt") self.assertEqual(obj.default.read(), b"default content") obj.default.close() # But it shouldn't be deleted, even if there are no more objects using # it. obj.delete() obj = Storage() self.assertEqual(obj.default.read(), b"default content") obj.default.close() def test_empty_upload_to(self): # upload_to can be empty, meaning it does not use subdirectory. obj = Storage() obj.empty.save('django_test.txt', ContentFile('more content')) self.assertEqual(obj.empty.name, "./django_test.txt") self.assertEqual(obj.empty.read(), b"more content") obj.empty.close() def test_random_upload_to(self): # Verify the fix for #5655, making sure the directory is only # determined once. obj = Storage() obj.random.save("random_file", ContentFile("random content")) self.assertTrue(obj.random.name.endswith("/random_file")) obj.random.close() def test_custom_valid_name_callable_upload_to(self): """ Storage.get_valid_name() should be called when upload_to is a callable. """ obj = Storage() obj.custom_valid_name.save("random_file", ContentFile("random content")) # CustomValidNameStorage.get_valid_name() appends '_valid' to the name self.assertTrue(obj.custom_valid_name.name.endswith("/random_file_valid")) obj.custom_valid_name.close() def test_filefield_pickling(self): # Push an object into the cache to make sure it pickles properly obj = Storage() obj.normal.save("django_test.txt", ContentFile("more content")) obj.normal.close() cache.set("obj", obj) self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt") def test_file_object(self): # Create sample file temp_storage.save('tests/example.txt', ContentFile('some content')) # Load it as python file object with open(temp_storage.path('tests/example.txt')) as file_obj: # Save it using storage and read its content temp_storage.save('tests/file_obj', file_obj) self.assertTrue(temp_storage.exists('tests/file_obj')) with temp_storage.open('tests/file_obj') as f: self.assertEqual(f.read(), b'some content') def test_stringio(self): # Test passing StringIO instance as content argument to save output = six.StringIO() output.write('content') output.seek(0) # Save it and read written file temp_storage.save('tests/stringio', output) self.assertTrue(temp_storage.exists('tests/stringio')) with temp_storage.open('tests/stringio') as f: self.assertEqual(f.read(), b'content') # Tests for a race condition on file saving (#4948). # This is written in such a way that it'll always pass on platforms # without threading. class SlowFile(ContentFile): def chunks(self): time.sleep(1) return super(ContentFile, self).chunks() class FileSaveRaceConditionTest(unittest.TestCase): def setUp(self): self.storage_dir = tempfile.mkdtemp() self.storage = FileSystemStorage(self.storage_dir) self.thread = threading.Thread(target=self.save_file, args=['conflict']) def tearDown(self): shutil.rmtree(self.storage_dir) def save_file(self, name): name = self.storage.save(name, SlowFile(b"Data")) def test_race_condition(self): self.thread.start() self.save_file('conflict') self.thread.join() files = sorted(os.listdir(self.storage_dir)) self.assertEqual(files[0], 'conflict') six.assertRegex(self, files[1], 'conflict_%s' % FILE_SUFFIX_REGEX) @unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports umasks and chmod.") class FileStoragePermissions(unittest.TestCase): def setUp(self): self.umask = 0o027 self.old_umask = os.umask(self.umask) self.storage_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.storage_dir) os.umask(self.old_umask) @override_settings(FILE_UPLOAD_PERMISSIONS=0o654) def test_file_upload_permissions(self): self.storage = FileSystemStorage(self.storage_dir) name = self.storage.save("the_file", ContentFile("data")) actual_mode = os.stat(self.storage.path(name))[0] & 0o777 self.assertEqual(actual_mode, 0o654) @override_settings(FILE_UPLOAD_PERMISSIONS=None) def test_file_upload_default_permissions(self): self.storage = FileSystemStorage(self.storage_dir) fname = self.storage.save("some_file", ContentFile("data")) mode = os.stat(self.storage.path(fname))[0] & 0o777 self.assertEqual(mode, 0o666 & ~self.umask) @override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765) def test_file_upload_directory_permissions(self): self.storage = FileSystemStorage(self.storage_dir) name = self.storage.save("the_directory/the_file", ContentFile("data")) dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777 self.assertEqual(dir_mode, 0o765) @override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None) def test_file_upload_directory_default_permissions(self): self.storage = FileSystemStorage(self.storage_dir) name = self.storage.save("the_directory/the_file", ContentFile("data")) dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777 self.assertEqual(dir_mode, 0o777 & ~self.umask) class FileStoragePathParsing(unittest.TestCase): def setUp(self): self.storage_dir = tempfile.mkdtemp() self.storage = FileSystemStorage(self.storage_dir) def tearDown(self): shutil.rmtree(self.storage_dir) def test_directory_with_dot(self): """Regression test for #9610. If the directory name contains a dot and the file name doesn't, make sure we still mangle the file name instead of the directory name. """ self.storage.save('dotted.path/test', ContentFile("1")) self.storage.save('dotted.path/test', ContentFile("2")) files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path'))) self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path'))) self.assertEqual(files[0], 'test') six.assertRegex(self, files[1], 'test_%s' % FILE_SUFFIX_REGEX) def test_first_character_dot(self): """ File names with a dot as their first character don't have an extension, and the underscore should get added to the end. """ self.storage.save('dotted.path/.test', ContentFile("1")) self.storage.save('dotted.path/.test', ContentFile("2")) files = sorted(os.listdir(os.path.join(self.storage_dir, 'dotted.path'))) self.assertFalse(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path'))) self.assertEqual(files[0], '.test') six.assertRegex(self, files[1], '.test_%s' % FILE_SUFFIX_REGEX) class ContentFileStorageTestCase(unittest.TestCase): def setUp(self): self.storage_dir = tempfile.mkdtemp() self.storage = FileSystemStorage(self.storage_dir) def tearDown(self): shutil.rmtree(self.storage_dir) def test_content_saving(self): """ Test that ContentFile can be saved correctly with the filesystem storage, both if it was initialized with string or unicode content""" self.storage.save('bytes.txt', ContentFile(b"content")) self.storage.save('unicode.txt', ContentFile("español")) @override_settings(ROOT_URLCONF='file_storage.urls') class FileLikeObjectTestCase(LiveServerTestCase): """ Test file-like objects (#15644). """ available_apps = [] def setUp(self): self.temp_dir = tempfile.mkdtemp() self.storage = FileSystemStorage(location=self.temp_dir) def tearDown(self): shutil.rmtree(self.temp_dir) def test_urllib2_urlopen(self): """ Test the File storage API with a file like object coming from urllib2.urlopen() """ file_like_object = urlopen(self.live_server_url + '/') f = File(file_like_object) stored_filename = self.storage.save("remote_file.html", f) remote_file = urlopen(self.live_server_url + '/') with self.storage.open(stored_filename) as stored_file: self.assertEqual(stored_file.read(), remote_file.read())
bsd-3-clause
40223121/2015cd_midterm
static/Brython3.1.1-20150328-091302/Lib/configparser.py
692
50025
"""Configuration file parser. A configuration file consists of sections, lead by a "[section]" header, and followed by "name: value" entries, with continuations and such in the style of RFC 822. Intrinsic defaults can be specified by passing them into the ConfigParser constructor as a dictionary. class: ConfigParser -- responsible for parsing a list of configuration files, and managing the parsed database. methods: __init__(defaults=None, dict_type=_default_dict, allow_no_value=False, delimiters=('=', ':'), comment_prefixes=('#', ';'), inline_comment_prefixes=None, strict=True, empty_lines_in_values=True): Create the parser. When `defaults' is given, it is initialized into the dictionary or intrinsic defaults. The keys must be strings, the values must be appropriate for %()s string interpolation. When `dict_type' is given, it will be used to create the dictionary objects for the list of sections, for the options within a section, and for the default values. When `delimiters' is given, it will be used as the set of substrings that divide keys from values. When `comment_prefixes' is given, it will be used as the set of substrings that prefix comments in empty lines. Comments can be indented. When `inline_comment_prefixes' is given, it will be used as the set of substrings that prefix comments in non-empty lines. When `strict` is True, the parser won't allow for any section or option duplicates while reading from a single source (file, string or dictionary). Default is True. When `empty_lines_in_values' is False (default: True), each empty line marks the end of an option. Otherwise, internal empty lines of a multiline option are kept as part of the value. When `allow_no_value' is True (default: False), options without values are accepted; the value presented for these is None. sections() Return all the configuration section names, sans DEFAULT. has_section(section) Return whether the given section exists. has_option(section, option) Return whether the given option exists in the given section. options(section) Return list of configuration options for the named section. read(filenames, encoding=None) Read and parse the list of named configuration files, given by name. A single filename is also allowed. Non-existing files are ignored. Return list of successfully read files. read_file(f, filename=None) Read and parse one configuration file, given as a file object. The filename defaults to f.name; it is only used in error messages (if f has no `name' attribute, the string `<???>' is used). read_string(string) Read configuration from a given string. read_dict(dictionary) Read configuration from a dictionary. Keys are section names, values are dictionaries with keys and values that should be present in the section. If the used dictionary type preserves order, sections and their keys will be added in order. Values are automatically converted to strings. get(section, option, raw=False, vars=None, fallback=_UNSET) Return a string value for the named option. All % interpolations are expanded in the return values, based on the defaults passed into the constructor and the DEFAULT section. Additional substitutions may be provided using the `vars' argument, which must be a dictionary whose contents override any pre-existing defaults. If `option' is a key in `vars', the value from `vars' is used. getint(section, options, raw=False, vars=None, fallback=_UNSET) Like get(), but convert value to an integer. getfloat(section, options, raw=False, vars=None, fallback=_UNSET) Like get(), but convert value to a float. getboolean(section, options, raw=False, vars=None, fallback=_UNSET) Like get(), but convert value to a boolean (currently case insensitively defined as 0, false, no, off for False, and 1, true, yes, on for True). Returns False or True. items(section=_UNSET, raw=False, vars=None) If section is given, return a list of tuples with (name, value) for each option in the section. Otherwise, return a list of tuples with (section_name, section_proxy) for each section, including DEFAULTSECT. remove_section(section) Remove the given file section and all its options. remove_option(section, option) Remove the given option from the given section. set(section, option, value) Set the given option. write(fp, space_around_delimiters=True) Write the configuration state in .ini format. If `space_around_delimiters' is True (the default), delimiters between keys and values are surrounded by spaces. """ from collections.abc import MutableMapping from collections import OrderedDict as _default_dict, ChainMap as _ChainMap import functools import io import itertools import re import sys import warnings __all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError", "NoOptionError", "InterpolationError", "InterpolationDepthError", "InterpolationSyntaxError", "ParsingError", "MissingSectionHeaderError", "ConfigParser", "SafeConfigParser", "RawConfigParser", "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"] DEFAULTSECT = "DEFAULT" MAX_INTERPOLATION_DEPTH = 10 # exception classes class Error(Exception): """Base class for ConfigParser exceptions.""" def _get_message(self): """Getter for 'message'; needed only to override deprecation in BaseException. """ return self.__message def _set_message(self, value): """Setter for 'message'; needed only to override deprecation in BaseException. """ self.__message = value # BaseException.message has been deprecated since Python 2.6. To prevent # DeprecationWarning from popping up over this pre-existing attribute, use # a new property that takes lookup precedence. message = property(_get_message, _set_message) def __init__(self, msg=''): self.message = msg Exception.__init__(self, msg) def __repr__(self): return self.message __str__ = __repr__ class NoSectionError(Error): """Raised when no section matches a requested option.""" def __init__(self, section): Error.__init__(self, 'No section: %r' % (section,)) self.section = section self.args = (section, ) class DuplicateSectionError(Error): """Raised when a section is repeated in an input source. Possible repetitions that raise this exception are: multiple creation using the API or in strict parsers when a section is found more than once in a single input file, string or dictionary. """ def __init__(self, section, source=None, lineno=None): msg = [repr(section), " already exists"] if source is not None: message = ["While reading from ", source] if lineno is not None: message.append(" [line {0:2d}]".format(lineno)) message.append(": section ") message.extend(msg) msg = message else: msg.insert(0, "Section ") Error.__init__(self, "".join(msg)) self.section = section self.source = source self.lineno = lineno self.args = (section, source, lineno) class DuplicateOptionError(Error): """Raised by strict parsers when an option is repeated in an input source. Current implementation raises this exception only when an option is found more than once in a single file, string or dictionary. """ def __init__(self, section, option, source=None, lineno=None): msg = [repr(option), " in section ", repr(section), " already exists"] if source is not None: message = ["While reading from ", source] if lineno is not None: message.append(" [line {0:2d}]".format(lineno)) message.append(": option ") message.extend(msg) msg = message else: msg.insert(0, "Option ") Error.__init__(self, "".join(msg)) self.section = section self.option = option self.source = source self.lineno = lineno self.args = (section, option, source, lineno) class NoOptionError(Error): """A requested option was not found.""" def __init__(self, option, section): Error.__init__(self, "No option %r in section: %r" % (option, section)) self.option = option self.section = section self.args = (option, section) class InterpolationError(Error): """Base class for interpolation-related exceptions.""" def __init__(self, option, section, msg): Error.__init__(self, msg) self.option = option self.section = section self.args = (option, section, msg) class InterpolationMissingOptionError(InterpolationError): """A string substitution required a setting which was not available.""" def __init__(self, option, section, rawval, reference): msg = ("Bad value substitution:\n" "\tsection: [%s]\n" "\toption : %s\n" "\tkey : %s\n" "\trawval : %s\n" % (section, option, reference, rawval)) InterpolationError.__init__(self, option, section, msg) self.reference = reference self.args = (option, section, rawval, reference) class InterpolationSyntaxError(InterpolationError): """Raised when the source text contains invalid syntax. Current implementation raises this exception when the source text into which substitutions are made does not conform to the required syntax. """ class InterpolationDepthError(InterpolationError): """Raised when substitutions are nested too deeply.""" def __init__(self, option, section, rawval): msg = ("Value interpolation too deeply recursive:\n" "\tsection: [%s]\n" "\toption : %s\n" "\trawval : %s\n" % (section, option, rawval)) InterpolationError.__init__(self, option, section, msg) self.args = (option, section, rawval) class ParsingError(Error): """Raised when a configuration file does not follow legal syntax.""" def __init__(self, source=None, filename=None): # Exactly one of `source'/`filename' arguments has to be given. # `filename' kept for compatibility. if filename and source: raise ValueError("Cannot specify both `filename' and `source'. " "Use `source'.") elif not filename and not source: raise ValueError("Required argument `source' not given.") elif filename: source = filename Error.__init__(self, 'Source contains parsing errors: %s' % source) self.source = source self.errors = [] self.args = (source, ) @property def filename(self): """Deprecated, use `source'.""" warnings.warn( "The 'filename' attribute will be removed in future versions. " "Use 'source' instead.", DeprecationWarning, stacklevel=2 ) return self.source @filename.setter def filename(self, value): """Deprecated, user `source'.""" warnings.warn( "The 'filename' attribute will be removed in future versions. " "Use 'source' instead.", DeprecationWarning, stacklevel=2 ) self.source = value def append(self, lineno, line): self.errors.append((lineno, line)) self.message += '\n\t[line %2d]: %s' % (lineno, line) class MissingSectionHeaderError(ParsingError): """Raised when a key-value pair is found before any section header.""" def __init__(self, filename, lineno, line): Error.__init__( self, 'File contains no section headers.\nfile: %s, line: %d\n%r' % (filename, lineno, line)) self.source = filename self.lineno = lineno self.line = line self.args = (filename, lineno, line) # Used in parser getters to indicate the default behaviour when a specific # option is not found it to raise an exception. Created to enable `None' as # a valid fallback value. _UNSET = object() class Interpolation: """Dummy interpolation that passes the value through with no changes.""" def before_get(self, parser, section, option, value, defaults): return value def before_set(self, parser, section, option, value): return value def before_read(self, parser, section, option, value): return value def before_write(self, parser, section, option, value): return value class BasicInterpolation(Interpolation): """Interpolation as implemented in the classic ConfigParser. The option values can contain format strings which refer to other values in the same section, or values in the special default section. For example: something: %(dir)s/whatever would resolve the "%(dir)s" to the value of dir. All reference expansions are done late, on demand. If a user needs to use a bare % in a configuration file, she can escape it by writing %%. Other % usage is considered a user error and raises `InterpolationSyntaxError'.""" _KEYCRE = re.compile(r"%\(([^)]+)\)s") def before_get(self, parser, section, option, value, defaults): L = [] self._interpolate_some(parser, option, L, value, section, defaults, 1) return ''.join(L) def before_set(self, parser, section, option, value): tmp_value = value.replace('%%', '') # escaped percent signs tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax if '%' in tmp_value: raise ValueError("invalid interpolation syntax in %r at " "position %d" % (value, tmp_value.find('%'))) return value def _interpolate_some(self, parser, option, accum, rest, section, map, depth): if depth > MAX_INTERPOLATION_DEPTH: raise InterpolationDepthError(option, section, rest) while rest: p = rest.find("%") if p < 0: accum.append(rest) return if p > 0: accum.append(rest[:p]) rest = rest[p:] # p is no longer used c = rest[1:2] if c == "%": accum.append("%") rest = rest[2:] elif c == "(": m = self._KEYCRE.match(rest) if m is None: raise InterpolationSyntaxError(option, section, "bad interpolation variable reference %r" % rest) var = parser.optionxform(m.group(1)) rest = rest[m.end():] try: v = map[var] except KeyError: raise InterpolationMissingOptionError( option, section, rest, var) if "%" in v: self._interpolate_some(parser, option, accum, v, section, map, depth + 1) else: accum.append(v) else: raise InterpolationSyntaxError( option, section, "'%%' must be followed by '%%' or '(', " "found: %r" % (rest,)) class ExtendedInterpolation(Interpolation): """Advanced variant of interpolation, supports the syntax used by `zc.buildout'. Enables interpolation between sections.""" _KEYCRE = re.compile(r"\$\{([^}]+)\}") def before_get(self, parser, section, option, value, defaults): L = [] self._interpolate_some(parser, option, L, value, section, defaults, 1) return ''.join(L) def before_set(self, parser, section, option, value): tmp_value = value.replace('$$', '') # escaped dollar signs tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax if '$' in tmp_value: raise ValueError("invalid interpolation syntax in %r at " "position %d" % (value, tmp_value.find('%'))) return value def _interpolate_some(self, parser, option, accum, rest, section, map, depth): if depth > MAX_INTERPOLATION_DEPTH: raise InterpolationDepthError(option, section, rest) while rest: p = rest.find("$") if p < 0: accum.append(rest) return if p > 0: accum.append(rest[:p]) rest = rest[p:] # p is no longer used c = rest[1:2] if c == "$": accum.append("$") rest = rest[2:] elif c == "{": m = self._KEYCRE.match(rest) if m is None: raise InterpolationSyntaxError(option, section, "bad interpolation variable reference %r" % rest) path = m.group(1).split(':') rest = rest[m.end():] sect = section opt = option try: if len(path) == 1: opt = parser.optionxform(path[0]) v = map[opt] elif len(path) == 2: sect = path[0] opt = parser.optionxform(path[1]) v = parser.get(sect, opt, raw=True) else: raise InterpolationSyntaxError( option, section, "More than one ':' found: %r" % (rest,)) except (KeyError, NoSectionError, NoOptionError): raise InterpolationMissingOptionError( option, section, rest, ":".join(path)) if "$" in v: self._interpolate_some(parser, opt, accum, v, sect, dict(parser.items(sect, raw=True)), depth + 1) else: accum.append(v) else: raise InterpolationSyntaxError( option, section, "'$' must be followed by '$' or '{', " "found: %r" % (rest,)) class LegacyInterpolation(Interpolation): """Deprecated interpolation used in old versions of ConfigParser. Use BasicInterpolation or ExtendedInterpolation instead.""" _KEYCRE = re.compile(r"%\(([^)]*)\)s|.") def before_get(self, parser, section, option, value, vars): rawval = value depth = MAX_INTERPOLATION_DEPTH while depth: # Loop through this until it's done depth -= 1 if value and "%(" in value: replace = functools.partial(self._interpolation_replace, parser=parser) value = self._KEYCRE.sub(replace, value) try: value = value % vars except KeyError as e: raise InterpolationMissingOptionError( option, section, rawval, e.args[0]) else: break if value and "%(" in value: raise InterpolationDepthError(option, section, rawval) return value def before_set(self, parser, section, option, value): return value @staticmethod def _interpolation_replace(match, parser): s = match.group(1) if s is None: return match.group() else: return "%%(%s)s" % parser.optionxform(s) class RawConfigParser(MutableMapping): """ConfigParser that does not do interpolation.""" # Regular expressions for parsing section headers and options _SECT_TMPL = r""" \[ # [ (?P<header>[^]]+) # very permissive! \] # ] """ _OPT_TMPL = r""" (?P<option>.*?) # very permissive! \s*(?P<vi>{delim})\s* # any number of space/tab, # followed by any of the # allowed delimiters, # followed by any space/tab (?P<value>.*)$ # everything up to eol """ _OPT_NV_TMPL = r""" (?P<option>.*?) # very permissive! \s*(?: # any number of space/tab, (?P<vi>{delim})\s* # optionally followed by # any of the allowed # delimiters, followed by any # space/tab (?P<value>.*))?$ # everything up to eol """ # Interpolation algorithm to be used if the user does not specify another _DEFAULT_INTERPOLATION = Interpolation() # Compiled regular expression for matching sections SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE) # Compiled regular expression for matching options with typical separators OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE) # Compiled regular expression for matching options with optional values # delimited using typical separators OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE) # Compiled regular expression for matching leading whitespace in a line NONSPACECRE = re.compile(r"\S") # Possible boolean values in the configuration. BOOLEAN_STATES = {'1': True, 'yes': True, 'true': True, 'on': True, '0': False, 'no': False, 'false': False, 'off': False} def __init__(self, defaults=None, dict_type=_default_dict, allow_no_value=False, *, delimiters=('=', ':'), comment_prefixes=('#', ';'), inline_comment_prefixes=None, strict=True, empty_lines_in_values=True, default_section=DEFAULTSECT, interpolation=_UNSET): self._dict = dict_type self._sections = self._dict() self._defaults = self._dict() self._proxies = self._dict() self._proxies[default_section] = SectionProxy(self, default_section) if defaults: for key, value in defaults.items(): self._defaults[self.optionxform(key)] = value self._delimiters = tuple(delimiters) if delimiters == ('=', ':'): self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE else: d = "|".join(re.escape(d) for d in delimiters) if allow_no_value: self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d), re.VERBOSE) else: self._optcre = re.compile(self._OPT_TMPL.format(delim=d), re.VERBOSE) self._comment_prefixes = tuple(comment_prefixes or ()) self._inline_comment_prefixes = tuple(inline_comment_prefixes or ()) self._strict = strict self._allow_no_value = allow_no_value self._empty_lines_in_values = empty_lines_in_values self.default_section=default_section self._interpolation = interpolation if self._interpolation is _UNSET: self._interpolation = self._DEFAULT_INTERPOLATION if self._interpolation is None: self._interpolation = Interpolation() def defaults(self): return self._defaults def sections(self): """Return a list of section names, excluding [DEFAULT]""" # self._sections will never have [DEFAULT] in it return list(self._sections.keys()) def add_section(self, section): """Create a new section in the configuration. Raise DuplicateSectionError if a section by the specified name already exists. Raise ValueError if name is DEFAULT. """ if section == self.default_section: raise ValueError('Invalid section name: %r' % section) if section in self._sections: raise DuplicateSectionError(section) self._sections[section] = self._dict() self._proxies[section] = SectionProxy(self, section) def has_section(self, section): """Indicate whether the named section is present in the configuration. The DEFAULT section is not acknowledged. """ return section in self._sections def options(self, section): """Return a list of option names for the given section name.""" try: opts = self._sections[section].copy() except KeyError: raise NoSectionError(section) opts.update(self._defaults) return list(opts.keys()) def read(self, filenames, encoding=None): """Read and parse a filename or a list of filenames. Files that cannot be opened are silently ignored; this is designed so that you can specify a list of potential configuration file locations (e.g. current directory, user's home directory, systemwide directory), and all existing configuration files in the list will be read. A single filename may also be given. Return list of successfully read files. """ if isinstance(filenames, str): filenames = [filenames] read_ok = [] for filename in filenames: try: with open(filename, encoding=encoding) as fp: self._read(fp, filename) except IOError: continue read_ok.append(filename) return read_ok def read_file(self, f, source=None): """Like read() but the argument must be a file-like object. The `f' argument must be iterable, returning one line at a time. Optional second argument is the `source' specifying the name of the file being read. If not given, it is taken from f.name. If `f' has no `name' attribute, `<???>' is used. """ if source is None: try: source = f.name except AttributeError: source = '<???>' self._read(f, source) def read_string(self, string, source='<string>'): """Read configuration from a given string.""" sfile = io.StringIO(string) self.read_file(sfile, source) def read_dict(self, dictionary, source='<dict>'): """Read configuration from a dictionary. Keys are section names, values are dictionaries with keys and values that should be present in the section. If the used dictionary type preserves order, sections and their keys will be added in order. All types held in the dictionary are converted to strings during reading, including section names, option names and keys. Optional second argument is the `source' specifying the name of the dictionary being read. """ elements_added = set() for section, keys in dictionary.items(): section = str(section) try: self.add_section(section) except (DuplicateSectionError, ValueError): if self._strict and section in elements_added: raise elements_added.add(section) for key, value in keys.items(): key = self.optionxform(str(key)) if value is not None: value = str(value) if self._strict and (section, key) in elements_added: raise DuplicateOptionError(section, key, source) elements_added.add((section, key)) self.set(section, key, value) def readfp(self, fp, filename=None): """Deprecated, use read_file instead.""" warnings.warn( "This method will be removed in future versions. " "Use 'parser.read_file()' instead.", DeprecationWarning, stacklevel=2 ) self.read_file(fp, source=filename) def get(self, section, option, *, raw=False, vars=None, fallback=_UNSET): """Get an option value for a given section. If `vars' is provided, it must be a dictionary. The option is looked up in `vars' (if provided), `section', and in `DEFAULTSECT' in that order. If the key is not found and `fallback' is provided, it is used as a fallback value. `None' can be provided as a `fallback' value. If interpolation is enabled and the optional argument `raw' is False, all interpolations are expanded in the return values. Arguments `raw', `vars', and `fallback' are keyword only. The section DEFAULT is special. """ try: d = self._unify_values(section, vars) except NoSectionError: if fallback is _UNSET: raise else: return fallback option = self.optionxform(option) try: value = d[option] except KeyError: if fallback is _UNSET: raise NoOptionError(option, section) else: return fallback if raw or value is None: return value else: return self._interpolation.before_get(self, section, option, value, d) def _get(self, section, conv, option, **kwargs): return conv(self.get(section, option, **kwargs)) def getint(self, section, option, *, raw=False, vars=None, fallback=_UNSET): try: return self._get(section, int, option, raw=raw, vars=vars) except (NoSectionError, NoOptionError): if fallback is _UNSET: raise else: return fallback def getfloat(self, section, option, *, raw=False, vars=None, fallback=_UNSET): try: return self._get(section, float, option, raw=raw, vars=vars) except (NoSectionError, NoOptionError): if fallback is _UNSET: raise else: return fallback def getboolean(self, section, option, *, raw=False, vars=None, fallback=_UNSET): try: return self._get(section, self._convert_to_boolean, option, raw=raw, vars=vars) except (NoSectionError, NoOptionError): if fallback is _UNSET: raise else: return fallback def items(self, section=_UNSET, raw=False, vars=None): """Return a list of (name, value) tuples for each option in a section. All % interpolations are expanded in the return values, based on the defaults passed into the constructor, unless the optional argument `raw' is true. Additional substitutions may be provided using the `vars' argument, which must be a dictionary whose contents overrides any pre-existing defaults. The section DEFAULT is special. """ if section is _UNSET: return super().items() d = self._defaults.copy() try: d.update(self._sections[section]) except KeyError: if section != self.default_section: raise NoSectionError(section) # Update with the entry specific variables if vars: for key, value in vars.items(): d[self.optionxform(key)] = value value_getter = lambda option: self._interpolation.before_get(self, section, option, d[option], d) if raw: value_getter = lambda option: d[option] return [(option, value_getter(option)) for option in d.keys()] def popitem(self): """Remove a section from the parser and return it as a (section_name, section_proxy) tuple. If no section is present, raise KeyError. The section DEFAULT is never returned because it cannot be removed. """ for key in self.sections(): value = self[key] del self[key] return key, value raise KeyError def optionxform(self, optionstr): return optionstr.lower() def has_option(self, section, option): """Check for the existence of a given option in a given section. If the specified `section' is None or an empty string, DEFAULT is assumed. If the specified `section' does not exist, returns False.""" if not section or section == self.default_section: option = self.optionxform(option) return option in self._defaults elif section not in self._sections: return False else: option = self.optionxform(option) return (option in self._sections[section] or option in self._defaults) def set(self, section, option, value=None): """Set an option.""" if value: value = self._interpolation.before_set(self, section, option, value) if not section or section == self.default_section: sectdict = self._defaults else: try: sectdict = self._sections[section] except KeyError: raise NoSectionError(section) sectdict[self.optionxform(option)] = value def write(self, fp, space_around_delimiters=True): """Write an .ini-format representation of the configuration state. If `space_around_delimiters' is True (the default), delimiters between keys and values are surrounded by spaces. """ if space_around_delimiters: d = " {} ".format(self._delimiters[0]) else: d = self._delimiters[0] if self._defaults: self._write_section(fp, self.default_section, self._defaults.items(), d) for section in self._sections: self._write_section(fp, section, self._sections[section].items(), d) def _write_section(self, fp, section_name, section_items, delimiter): """Write a single section to the specified `fp'.""" fp.write("[{}]\n".format(section_name)) for key, value in section_items: value = self._interpolation.before_write(self, section_name, key, value) if value is not None or not self._allow_no_value: value = delimiter + str(value).replace('\n', '\n\t') else: value = "" fp.write("{}{}\n".format(key, value)) fp.write("\n") def remove_option(self, section, option): """Remove an option.""" if not section or section == self.default_section: sectdict = self._defaults else: try: sectdict = self._sections[section] except KeyError: raise NoSectionError(section) option = self.optionxform(option) existed = option in sectdict if existed: del sectdict[option] return existed def remove_section(self, section): """Remove a file section.""" existed = section in self._sections if existed: del self._sections[section] del self._proxies[section] return existed def __getitem__(self, key): if key != self.default_section and not self.has_section(key): raise KeyError(key) return self._proxies[key] def __setitem__(self, key, value): # To conform with the mapping protocol, overwrites existing values in # the section. # XXX this is not atomic if read_dict fails at any point. Then again, # no update method in configparser is atomic in this implementation. if key == self.default_section: self._defaults.clear() elif key in self._sections: self._sections[key].clear() self.read_dict({key: value}) def __delitem__(self, key): if key == self.default_section: raise ValueError("Cannot remove the default section.") if not self.has_section(key): raise KeyError(key) self.remove_section(key) def __contains__(self, key): return key == self.default_section or self.has_section(key) def __len__(self): return len(self._sections) + 1 # the default section def __iter__(self): # XXX does it break when underlying container state changed? return itertools.chain((self.default_section,), self._sections.keys()) def _read(self, fp, fpname): """Parse a sectioned configuration file. Each section in a configuration file contains a header, indicated by a name in square brackets (`[]'), plus key/value options, indicated by `name' and `value' delimited with a specific substring (`=' or `:' by default). Values can span multiple lines, as long as they are indented deeper than the first line of the value. Depending on the parser's mode, blank lines may be treated as parts of multiline values or ignored. Configuration files may include comments, prefixed by specific characters (`#' and `;' by default). Comments may appear on their own in an otherwise empty line or may be entered in lines holding values or section names. """ elements_added = set() cursect = None # None, or a dictionary sectname = None optname = None lineno = 0 indent_level = 0 e = None # None, or an exception for lineno, line in enumerate(fp, start=1): comment_start = sys.maxsize # strip inline comments inline_prefixes = {p: -1 for p in self._inline_comment_prefixes} while comment_start == sys.maxsize and inline_prefixes: next_prefixes = {} for prefix, index in inline_prefixes.items(): index = line.find(prefix, index+1) if index == -1: continue next_prefixes[prefix] = index if index == 0 or (index > 0 and line[index-1].isspace()): comment_start = min(comment_start, index) inline_prefixes = next_prefixes # strip full line comments for prefix in self._comment_prefixes: if line.strip().startswith(prefix): comment_start = 0 break if comment_start == sys.maxsize: comment_start = None value = line[:comment_start].strip() if not value: if self._empty_lines_in_values: # add empty line to the value, but only if there was no # comment on the line if (comment_start is None and cursect is not None and optname and cursect[optname] is not None): cursect[optname].append('') # newlines added at join else: # empty line marks end of value indent_level = sys.maxsize continue # continuation line? first_nonspace = self.NONSPACECRE.search(line) cur_indent_level = first_nonspace.start() if first_nonspace else 0 if (cursect is not None and optname and cur_indent_level > indent_level): cursect[optname].append(value) # a section header or option header? else: indent_level = cur_indent_level # is it a section header? mo = self.SECTCRE.match(value) if mo: sectname = mo.group('header') if sectname in self._sections: if self._strict and sectname in elements_added: raise DuplicateSectionError(sectname, fpname, lineno) cursect = self._sections[sectname] elements_added.add(sectname) elif sectname == self.default_section: cursect = self._defaults else: cursect = self._dict() self._sections[sectname] = cursect self._proxies[sectname] = SectionProxy(self, sectname) elements_added.add(sectname) # So sections can't start with a continuation line optname = None # no section header in the file? elif cursect is None: raise MissingSectionHeaderError(fpname, lineno, line) # an option line? else: mo = self._optcre.match(value) if mo: optname, vi, optval = mo.group('option', 'vi', 'value') if not optname: e = self._handle_error(e, fpname, lineno, line) optname = self.optionxform(optname.rstrip()) if (self._strict and (sectname, optname) in elements_added): raise DuplicateOptionError(sectname, optname, fpname, lineno) elements_added.add((sectname, optname)) # This check is fine because the OPTCRE cannot # match if it would set optval to None if optval is not None: optval = optval.strip() cursect[optname] = [optval] else: # valueless option handling cursect[optname] = None else: # a non-fatal parsing error occurred. set up the # exception but keep going. the exception will be # raised at the end of the file and will contain a # list of all bogus lines e = self._handle_error(e, fpname, lineno, line) # if any parsing errors occurred, raise an exception if e: raise e self._join_multiline_values() def _join_multiline_values(self): defaults = self.default_section, self._defaults all_sections = itertools.chain((defaults,), self._sections.items()) for section, options in all_sections: for name, val in options.items(): if isinstance(val, list): val = '\n'.join(val).rstrip() options[name] = self._interpolation.before_read(self, section, name, val) def _handle_error(self, exc, fpname, lineno, line): if not exc: exc = ParsingError(fpname) exc.append(lineno, repr(line)) return exc def _unify_values(self, section, vars): """Create a sequence of lookups with 'vars' taking priority over the 'section' which takes priority over the DEFAULTSECT. """ sectiondict = {} try: sectiondict = self._sections[section] except KeyError: if section != self.default_section: raise NoSectionError(section) # Update with the entry specific variables vardict = {} if vars: for key, value in vars.items(): if value is not None: value = str(value) vardict[self.optionxform(key)] = value return _ChainMap(vardict, sectiondict, self._defaults) def _convert_to_boolean(self, value): """Return a boolean value translating from other types if necessary. """ if value.lower() not in self.BOOLEAN_STATES: raise ValueError('Not a boolean: %s' % value) return self.BOOLEAN_STATES[value.lower()] def _validate_value_types(self, *, section="", option="", value=""): """Raises a TypeError for non-string values. The only legal non-string value if we allow valueless options is None, so we need to check if the value is a string if: - we do not allow valueless options, or - we allow valueless options but the value is not None For compatibility reasons this method is not used in classic set() for RawConfigParsers. It is invoked in every case for mapping protocol access and in ConfigParser.set(). """ if not isinstance(section, str): raise TypeError("section names must be strings") if not isinstance(option, str): raise TypeError("option keys must be strings") if not self._allow_no_value or value: if not isinstance(value, str): raise TypeError("option values must be strings") class ConfigParser(RawConfigParser): """ConfigParser implementing interpolation.""" _DEFAULT_INTERPOLATION = BasicInterpolation() def set(self, section, option, value=None): """Set an option. Extends RawConfigParser.set by validating type and interpolation syntax on the value.""" self._validate_value_types(option=option, value=value) super().set(section, option, value) def add_section(self, section): """Create a new section in the configuration. Extends RawConfigParser.add_section by validating if the section name is a string.""" self._validate_value_types(section=section) super().add_section(section) class SafeConfigParser(ConfigParser): """ConfigParser alias for backwards compatibility purposes.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn( "The SafeConfigParser class has been renamed to ConfigParser " "in Python 3.2. This alias will be removed in future versions." " Use ConfigParser directly instead.", DeprecationWarning, stacklevel=2 ) class SectionProxy(MutableMapping): """A proxy for a single section from a parser.""" def __init__(self, parser, name): """Creates a view on a section of the specified `name` in `parser`.""" self._parser = parser self._name = name def __repr__(self): return '<Section: {}>'.format(self._name) def __getitem__(self, key): if not self._parser.has_option(self._name, key): raise KeyError(key) return self._parser.get(self._name, key) def __setitem__(self, key, value): self._parser._validate_value_types(option=key, value=value) return self._parser.set(self._name, key, value) def __delitem__(self, key): if not (self._parser.has_option(self._name, key) and self._parser.remove_option(self._name, key)): raise KeyError(key) def __contains__(self, key): return self._parser.has_option(self._name, key) def __len__(self): return len(self._options()) def __iter__(self): return self._options().__iter__() def _options(self): if self._name != self._parser.default_section: return self._parser.options(self._name) else: return self._parser.defaults() def get(self, option, fallback=None, *, raw=False, vars=None): return self._parser.get(self._name, option, raw=raw, vars=vars, fallback=fallback) def getint(self, option, fallback=None, *, raw=False, vars=None): return self._parser.getint(self._name, option, raw=raw, vars=vars, fallback=fallback) def getfloat(self, option, fallback=None, *, raw=False, vars=None): return self._parser.getfloat(self._name, option, raw=raw, vars=vars, fallback=fallback) def getboolean(self, option, fallback=None, *, raw=False, vars=None): return self._parser.getboolean(self._name, option, raw=raw, vars=vars, fallback=fallback) @property def parser(self): # The parser object of the proxy is read-only. return self._parser @property def name(self): # The name of the section on a proxy is read-only. return self._name
gpl-3.0
relictMarauder/kodi
plugin.video.relict.hdout.tv/default.py
1
5939
#!/usr/bin/python # -*- coding: utf-8 -*- import xbmcaddon, xbmc, xbmcgui, xbmcplugin import sys import os import traceback from resources.lib.debug import RemoteDebug import resources.lib.series as hdout_series import resources.lib.episodes as hdout_episodes import resources.lib.common as hdout_common debug = RemoteDebug(False) debug.start() # Plugin config config = xbmcaddon.Addon(id='plugin.video.relict.hdout.tv') lang = config.getLocalizedString login = config.getSetting('login') password = config.getSetting('password') handle = int(sys.argv[1]) # thumb = os.path.join(os.getcwd().replace(';', ''), "icon.png") thumb = os.path.join(config.getAddonInfo('path'), "icon.png") plugin = 'HDOut.TV' rootURL = {'hd': 'http://hdout.tv/', 'uaj': 'http://uaj.cc/'} def show_series(_pv): return hdout_series.show_all_series_list(True, False, _pv['tp']) def show_new_series(_pv): return hdout_series.show_new_all_series_list(True, False, _pv['tp']) def show_new_my_series(_pv): return hdout_series.show_new_my_series_list(True, False, _pv['tp']) def show_my_series(_pv): return hdout_series.show_my_series_list(False, True, _pv['tp']) def show_episodes(_pv): return hdout_episodes.show_episodes(_pv) def show_episode(_pv): return hdout_episodes.show_episode(_pv) def get_item(lang_id, url): return ((sys.argv[0] + url, xbmcgui.ListItem(lang(lang_id), iconImage=thumb, thumbnailImage=thumb), True,)) def mark_viewed(_pv): hdout_common.get('?usecase=MarkEpisode&id=%s' % _pv['id'], _pv['tp']) xbmc.sleep(12) hdout_series.clear_cache() xbmc.executebuiltin('Container.Refresh') def unmark_viewed(_pv): hdout_common.get('?usecase=UnmarkEpisode&id=%s' % _pv['id'], _pv['tp']) xbmc.sleep(12) hdout_series.clear_cache() xbmc.executebuiltin('Container.Refresh') def addToFav(pv): s = hdout_common.get('AddToFavorites/' + pv['id'] + '/', pv['tp']) if s is None: hdout_common.showMessage(lang(30003), lang(30004)) return False xbmc.sleep(12) hdout_series.clear_cache() xbmc.executebuiltin('Container.Refresh') def rmFromFav(pv): s = hdout_common.get('RemoveFromFavorites/' + pv['id'] + '/', pv['tp']) if s is None: hdout_common.showMessage(lang(30003), lang(30004)) return False xbmc.sleep(12) hdout_series.clear_cache() xbmc.executebuiltin('Container.Refresh') def default(pv): global handle list_items = [get_item(30010, '?f=show_series&tp=hd'), get_item(30011, '?f=show_my_series&tp=hd'), get_item(30012, '?f=show_new_series&tp=hd'), get_item(30013, '?f=show_new_my_series&tp=hd'), get_item(30020, '?f=show_series&tp=uaj'), get_item(30021, '?f=show_my_series&tp=uaj'), get_item(30022, '?f=show_new_series&tp=uaj'), get_item(30023, '?f=show_new_my_series&tp=uaj'), get_item(30050, '?f=openSettings') ] # hdout_common.add_menu(30010, sys.argv[0] + '?f=show_series&tp=hd') # hdout_common.add_menu(30011, sys.argv[0] + '?f=show_my_series&tp=hd') # # addMenu(30012, sys.argv[0] + '?f=showRSS&tp=hd') # hdout_common.add_menu(30012, sys.argv[0] + '?f=show_new_series&tp=hd') # hdout_common.add_menu(30013, sys.argv[0] + '?f=showMyRSS&tp=hd') # # hdout_common.add_menu(30020, sys.argv[0] + '?f=show_series&tp=uaj') # hdout_common.add_menu(30021, sys.argv[0] + '?f=showMySeries&tp=uaj') # hdout_common.add_menu(30022, sys.argv[0] + '?f=showRSS&tp=uaj') # hdout_common.add_menu(30023, sys.argv[0] + '?f=showMyRSS&tp=uaj') # # hdout_common.add_menu(30050, sys.argv[0] + '?f=openSettings') xbmcplugin.addDirectoryItems(handle, list_items, len(list_items)) xbmcplugin.endOfDirectory(handle) def init(): global config, login, password, lang while not hdout_common.auth('hd'): user_keyboard = xbmc.Keyboard() user_keyboard.setHeading(lang(30001)) user_keyboard.doModal() if user_keyboard.isConfirmed(): login = user_keyboard.getText() pass_keyboard = xbmc.Keyboard() pass_keyboard.setHeading(lang(30002)) pass_keyboard.setHiddenInput(True) pass_keyboard.doModal() if pass_keyboard.isConfirmed(): password = pass_keyboard.getText() config.setSetting('login', login) config.setSetting('password', password) else: return False else: return False return True def ping(): hdout_common.get("PingUser/", "hd") # XBMC misc def get_params(dv): param = dv param_string = sys.argv[2] if len(param_string) >= 2: params = sys.argv[2] cleaned_params = params.replace('?', '') if params[len(params) - 1] == '/': params = params[0:len(params) - 2] pairs_of_params = cleaned_params.split('&') for i in range(len(pairs_of_params)): split_params = {} split_params = pairs_of_params[i].split('=') if (len(split_params)) == 2: param[split_params[0]] = split_params[1] return param try: if init(): pv = {'f': None, 'id': 0, 'tp': 'hd'} funs = ['show_series', 'show_new_series', 'show_my_series', 'show_episodes', 'show_episode', 'showRSS', 'show_new_my_series', 'openSettings', 'addToFav', 'rmFromFav', 'mark_viewed', 'unmark_viewed'] pvm = get_params(pv) ping() if pvm['f'] in funs: eval(pvm['f'] + "(pv)") else: default(pv) debug.stop() except Exception as e: xbmc.log('-----------------', 4) xbmc.log(traceback.format_exc(sys.exc_info()), 4) debug.stop() raise e # Main processing
apache-2.0
bobwalker99/Pydev
plugins/org.python.pydev.jython/Lib/compiler/symbols.py
134
14489
"""Module symbol-table generator""" from compiler import ast from compiler.consts import SC_LOCAL, SC_GLOBAL_IMPLICIT, SC_GLOBAL_EXPLICIT, \ SC_FREE, SC_CELL, SC_UNKNOWN from compiler.misc import mangle import types import sys MANGLE_LEN = 256 class Scope: # XXX how much information do I need about each name? def __init__(self, name, module, klass=None): self.name = name self.module = module self.defs = {} self.uses = {} self.globals = {} self.params = {} self.frees = {} self.cells = {} self.children = [] # nested is true if the class could contain free variables, # i.e. if it is nested within another function. self.nested = None self.generator = None self.klass = None if klass is not None: for i in range(len(klass)): if klass[i] != '_': self.klass = klass[i:] break def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.name) def mangle(self, name): if self.klass is None: return name return mangle(name, self.klass) def add_def(self, name): self.defs[self.mangle(name)] = 1 def add_use(self, name): self.uses[self.mangle(name)] = 1 def add_global(self, name): name = self.mangle(name) if name in self.uses or name in self.defs: pass # XXX warn about global following def/use if name in self.params: raise SyntaxError, "%s in %s is global and parameter" % \ (name, self.name) self.globals[name] = 1 self.module.add_def(name) def add_param(self, name): name = self.mangle(name) self.defs[name] = 1 self.params[name] = 1 def get_names(self): d = {} d.update(self.defs) d.update(self.uses) d.update(self.globals) return d.keys() def add_child(self, child): self.children.append(child) def get_children(self): return self.children def DEBUG(self): print >> sys.stderr, self.name, self.nested and "nested" or "" print >> sys.stderr, "\tglobals: ", self.globals print >> sys.stderr, "\tcells: ", self.cells print >> sys.stderr, "\tdefs: ", self.defs print >> sys.stderr, "\tuses: ", self.uses print >> sys.stderr, "\tfrees:", self.frees def check_name(self, name): """Return scope of name. The scope of a name could be LOCAL, GLOBAL, FREE, or CELL. """ if name in self.globals: return SC_GLOBAL_EXPLICIT if name in self.cells: return SC_CELL if name in self.defs: return SC_LOCAL if self.nested and (name in self.frees or name in self.uses): return SC_FREE if self.nested: return SC_UNKNOWN else: return SC_GLOBAL_IMPLICIT def get_free_vars(self): if not self.nested: return () free = {} free.update(self.frees) for name in self.uses.keys(): if name not in self.defs and name not in self.globals: free[name] = 1 return free.keys() def handle_children(self): for child in self.children: frees = child.get_free_vars() globals = self.add_frees(frees) for name in globals: child.force_global(name) def force_global(self, name): """Force name to be global in scope. Some child of the current node had a free reference to name. When the child was processed, it was labelled a free variable. Now that all its enclosing scope have been processed, the name is known to be a global or builtin. So walk back down the child chain and set the name to be global rather than free. Be careful to stop if a child does not think the name is free. """ self.globals[name] = 1 if name in self.frees: del self.frees[name] for child in self.children: if child.check_name(name) == SC_FREE: child.force_global(name) def add_frees(self, names): """Process list of free vars from nested scope. Returns a list of names that are either 1) declared global in the parent or 2) undefined in a top-level parent. In either case, the nested scope should treat them as globals. """ child_globals = [] for name in names: sc = self.check_name(name) if self.nested: if sc == SC_UNKNOWN or sc == SC_FREE \ or isinstance(self, ClassScope): self.frees[name] = 1 elif sc == SC_GLOBAL_IMPLICIT: child_globals.append(name) elif isinstance(self, FunctionScope) and sc == SC_LOCAL: self.cells[name] = 1 elif sc != SC_CELL: child_globals.append(name) else: if sc == SC_LOCAL: self.cells[name] = 1 elif sc != SC_CELL: child_globals.append(name) return child_globals def get_cell_vars(self): return self.cells.keys() class ModuleScope(Scope): __super_init = Scope.__init__ def __init__(self): self.__super_init("global", self) class FunctionScope(Scope): pass class GenExprScope(Scope): __super_init = Scope.__init__ __counter = 1 def __init__(self, module, klass=None): i = self.__counter self.__counter += 1 self.__super_init("generator expression<%d>"%i, module, klass) self.add_param('.0') def get_names(self): keys = Scope.get_names(self) return keys class LambdaScope(FunctionScope): __super_init = Scope.__init__ __counter = 1 def __init__(self, module, klass=None): i = self.__counter self.__counter += 1 self.__super_init("lambda.%d" % i, module, klass) class ClassScope(Scope): __super_init = Scope.__init__ def __init__(self, name, module): self.__super_init(name, module, name) class SymbolVisitor: def __init__(self): self.scopes = {} self.klass = None # node that define new scopes def visitModule(self, node): scope = self.module = self.scopes[node] = ModuleScope() self.visit(node.node, scope) visitExpression = visitModule def visitFunction(self, node, parent): if node.decorators: self.visit(node.decorators, parent) parent.add_def(node.name) for n in node.defaults: self.visit(n, parent) scope = FunctionScope(node.name, self.module, self.klass) if parent.nested or isinstance(parent, FunctionScope): scope.nested = 1 self.scopes[node] = scope self._do_args(scope, node.argnames) self.visit(node.code, scope) self.handle_free_vars(scope, parent) def visitGenExpr(self, node, parent): scope = GenExprScope(self.module, self.klass); if parent.nested or isinstance(parent, FunctionScope) \ or isinstance(parent, GenExprScope): scope.nested = 1 self.scopes[node] = scope self.visit(node.code, scope) self.handle_free_vars(scope, parent) def visitGenExprInner(self, node, scope): for genfor in node.quals: self.visit(genfor, scope) self.visit(node.expr, scope) def visitGenExprFor(self, node, scope): self.visit(node.assign, scope, 1) self.visit(node.iter, scope) for if_ in node.ifs: self.visit(if_, scope) def visitGenExprIf(self, node, scope): self.visit(node.test, scope) def visitLambda(self, node, parent, assign=0): # Lambda is an expression, so it could appear in an expression # context where assign is passed. The transformer should catch # any code that has a lambda on the left-hand side. assert not assign for n in node.defaults: self.visit(n, parent) scope = LambdaScope(self.module, self.klass) if parent.nested or isinstance(parent, FunctionScope): scope.nested = 1 self.scopes[node] = scope self._do_args(scope, node.argnames) self.visit(node.code, scope) self.handle_free_vars(scope, parent) def _do_args(self, scope, args): for name in args: if type(name) == types.TupleType: self._do_args(scope, name) else: scope.add_param(name) def handle_free_vars(self, scope, parent): parent.add_child(scope) scope.handle_children() def visitClass(self, node, parent): parent.add_def(node.name) for n in node.bases: self.visit(n, parent) scope = ClassScope(node.name, self.module) if parent.nested or isinstance(parent, FunctionScope): scope.nested = 1 if node.doc is not None: scope.add_def('__doc__') scope.add_def('__module__') self.scopes[node] = scope prev = self.klass self.klass = node.name self.visit(node.code, scope) self.klass = prev self.handle_free_vars(scope, parent) # name can be a def or a use # XXX a few calls and nodes expect a third "assign" arg that is # true if the name is being used as an assignment. only # expressions contained within statements may have the assign arg. def visitName(self, node, scope, assign=0): if assign: scope.add_def(node.name) else: scope.add_use(node.name) # operations that bind new names def visitFor(self, node, scope): self.visit(node.assign, scope, 1) self.visit(node.list, scope) self.visit(node.body, scope) if node.else_: self.visit(node.else_, scope) def visitFrom(self, node, scope): for name, asname in node.names: if name == "*": continue scope.add_def(asname or name) def visitImport(self, node, scope): for name, asname in node.names: i = name.find(".") if i > -1: name = name[:i] scope.add_def(asname or name) def visitGlobal(self, node, scope): for name in node.names: scope.add_global(name) def visitAssign(self, node, scope): """Propagate assignment flag down to child nodes. The Assign node doesn't itself contains the variables being assigned to. Instead, the children in node.nodes are visited with the assign flag set to true. When the names occur in those nodes, they are marked as defs. Some names that occur in an assignment target are not bound by the assignment, e.g. a name occurring inside a slice. The visitor handles these nodes specially; they do not propagate the assign flag to their children. """ for n in node.nodes: self.visit(n, scope, 1) self.visit(node.expr, scope) def visitAssName(self, node, scope, assign=1): scope.add_def(node.name) def visitAssAttr(self, node, scope, assign=0): self.visit(node.expr, scope, 0) def visitSubscript(self, node, scope, assign=0): self.visit(node.expr, scope, 0) for n in node.subs: self.visit(n, scope, 0) def visitSlice(self, node, scope, assign=0): self.visit(node.expr, scope, 0) if node.lower: self.visit(node.lower, scope, 0) if node.upper: self.visit(node.upper, scope, 0) def visitAugAssign(self, node, scope): # If the LHS is a name, then this counts as assignment. # Otherwise, it's just use. self.visit(node.node, scope) if isinstance(node.node, ast.Name): self.visit(node.node, scope, 1) # XXX worry about this self.visit(node.expr, scope) # prune if statements if tests are false _const_types = types.StringType, types.IntType, types.FloatType def visitIf(self, node, scope): for test, body in node.tests: if isinstance(test, ast.Const): if type(test.value) in self._const_types: if not test.value: continue self.visit(test, scope) self.visit(body, scope) if node.else_: self.visit(node.else_, scope) # a yield statement signals a generator def visitYield(self, node, scope): scope.generator = 1 self.visit(node.value, scope) def list_eq(l1, l2): return sorted(l1) == sorted(l2) if __name__ == "__main__": import sys from compiler import parseFile, walk import symtable def get_names(syms): return [s for s in [s.get_name() for s in syms.get_symbols()] if not (s.startswith('_[') or s.startswith('.'))] for file in sys.argv[1:]: print file f = open(file) buf = f.read() f.close() syms = symtable.symtable(buf, file, "exec") mod_names = get_names(syms) tree = parseFile(file) s = SymbolVisitor() walk(tree, s) # compare module-level symbols names2 = s.scopes[tree].get_names() if not list_eq(mod_names, names2): print print "oops", file print sorted(mod_names) print sorted(names2) sys.exit(-1) d = {} d.update(s.scopes) del d[tree] scopes = d.values() del d for s in syms.get_symbols(): if s.is_namespace(): l = [sc for sc in scopes if sc.name == s.get_name()] if len(l) > 1: print "skipping", s.get_name() else: if not list_eq(get_names(s.get_namespace()), l[0].get_names()): print s.get_name() print sorted(get_names(s.get_namespace())) print sorted(l[0].get_names()) sys.exit(-1)
epl-1.0
Dino0631/RedRain-Bot
lib/youtube_dl/extractor/mailru.py
42
5086
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( int_or_none, remove_end, ) class MailRuIE(InfoExtractor): IE_NAME = 'mailru' IE_DESC = 'Видео@Mail.Ru' _VALID_URL = r'https?://(?:(?:www|m)\.)?my\.mail\.ru/(?:video/.*#video=/?(?P<idv1>(?:[^/]+/){3}\d+)|(?:(?P<idv2prefix>(?:[^/]+/){2})video/(?P<idv2suffix>[^/]+/\d+))\.html)' _TESTS = [ { 'url': 'http://my.mail.ru/video/top#video=/mail/sonypicturesrus/75/76', 'md5': 'dea205f03120046894db4ebb6159879a', 'info_dict': { 'id': '46301138_76', 'ext': 'mp4', 'title': 'Новый Человек-Паук. Высокое напряжение. Восстание Электро', 'timestamp': 1393232740, 'upload_date': '20140224', 'uploader': 'sonypicturesrus', 'uploader_id': 'sonypicturesrus@mail.ru', 'duration': 184, }, 'skip': 'Not accessible from Travis CI server', }, { 'url': 'http://my.mail.ru/corp/hitech/video/news_hi-tech_mail_ru/1263.html', 'md5': '00a91a58c3402204dcced523777b475f', 'info_dict': { 'id': '46843144_1263', 'ext': 'mp4', 'title': 'Samsung Galaxy S5 Hammer Smash Fail Battery Explosion', 'timestamp': 1397039888, 'upload_date': '20140409', 'uploader': 'hitech@corp.mail.ru', 'uploader_id': 'hitech@corp.mail.ru', 'duration': 245, }, 'skip': 'Not accessible from Travis CI server', }, { # only available via metaUrl API 'url': 'http://my.mail.ru/mail/720pizle/video/_myvideo/502.html', 'md5': '3b26d2491c6949d031a32b96bd97c096', 'info_dict': { 'id': '56664382_502', 'ext': 'mp4', 'title': ':8336', 'timestamp': 1449094163, 'upload_date': '20151202', 'uploader': '720pizle@mail.ru', 'uploader_id': '720pizle@mail.ru', 'duration': 6001, }, 'skip': 'Not accessible from Travis CI server', }, { 'url': 'http://m.my.mail.ru/mail/3sktvtr/video/_myvideo/138.html', 'only_matching': True, } ] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group('idv1') if not video_id: video_id = mobj.group('idv2prefix') + mobj.group('idv2suffix') webpage = self._download_webpage(url, video_id) video_data = None page_config = self._parse_json(self._search_regex( r'(?s)<script[^>]+class="sp-video__page-config"[^>]*>(.+?)</script>', webpage, 'page config', default='{}'), video_id, fatal=False) if page_config: meta_url = page_config.get('metaUrl') or page_config.get('video', {}).get('metaUrl') if meta_url: video_data = self._download_json( meta_url, video_id, 'Downloading video meta JSON', fatal=False) # Fallback old approach if not video_data: video_data = self._download_json( 'http://api.video.mail.ru/videos/%s.json?new=1' % video_id, video_id, 'Downloading video JSON') formats = [] for f in video_data['videos']: video_url = f.get('url') if not video_url: continue format_id = f.get('key') height = int_or_none(self._search_regex( r'^(\d+)[pP]$', format_id, 'height', default=None)) if format_id else None formats.append({ 'url': video_url, 'format_id': format_id, 'height': height, }) self._sort_formats(formats) meta_data = video_data['meta'] title = remove_end(meta_data['title'], '.mp4') author = video_data.get('author') uploader = author.get('name') uploader_id = author.get('id') or author.get('email') view_count = int_or_none(video_data.get('viewsCount') or video_data.get('views_count')) acc_id = meta_data.get('accId') item_id = meta_data.get('itemId') content_id = '%s_%s' % (acc_id, item_id) if acc_id and item_id else video_id thumbnail = meta_data.get('poster') duration = int_or_none(meta_data.get('duration')) timestamp = int_or_none(meta_data.get('timestamp')) return { 'id': content_id, 'title': title, 'thumbnail': thumbnail, 'timestamp': timestamp, 'uploader': uploader, 'uploader_id': uploader_id, 'duration': duration, 'view_count': view_count, 'formats': formats, }
gpl-3.0
Dekken/tick
tick/base/threadpool.py
2
1129
# License: BSD 3 clause import threading import queue class ThreadPool(object): def __init__(self, with_lock=False, max_threads=8): object.__init__(self) self._Qin = queue.Queue() self._max_threads = max_threads self._n_works = 0 self._Qerr = queue.Queue() if (with_lock): self.lock = threading.Lock() else: self.lock = None def add_work(self, callable, *args, **kwargs): self._n_works += 1 self._Qin.put( dict(callable=callable, args=args, kwargs=kwargs, me=self)) def _worker(self, n): while True: try: work = self._Qin.get(False) work['callable'](*work['args'], **work['kwargs']) self._Qin.task_done() except queue.Empty: break # except: # print "merde" #self._Qerr.put("Error"); #break def start(self): for n in range(min(self._n_works, self._max_threads)): t = threading.Thread(target=self._worker, args=(n,)) t.start() self._Qin.join()
bsd-3-clause
0x0mar/king-phisher
tests/client/export.py
9
2721
#!/usr/bin/env python # -*- coding: utf-8 -*- # # tests/client/export.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import unittest from king_phisher import testing from king_phisher.client.export import * from king_phisher.client.export import message_template_from_kpm from king_phisher.client.export import message_template_to_kpm class ClientExportTests(testing.KingPhisherTestCase): def test_value_conversions(self): self.assertEqual(convert_value('campaigns', 'reject_after_credentials', False), 'False') self.assertEqual(convert_value('campaigns', 'reject_after_credentials', True), 'True') self.assertIsNone(convert_value('messages', 'opened', None)) def test_message_template_kpm(self): # test to_kpm first template, files = message_template_to_kpm(testing.TEST_MESSAGE_TEMPLATE) self.assertIn("""{{ inline_image(\'image.png\') }}""", template) msg = 'The inline image path was not returned in the list of files' self.assertEqual(len(files), 1, msg=msg) self.assertIn(testing.TEST_MESSAGE_TEMPLATE_INLINE_IMAGE, files, msg=msg) # then feed the results into from_kpm template = message_template_from_kpm(template, files) self.assertEqual(template, testing.TEST_MESSAGE_TEMPLATE) if __name__ == '__main__': unittest.main()
bsd-3-clause
quantumlib/Cirq
cirq-core/cirq/contrib/graph_device/graph_device_test.py
1
7734
# Copyright 2018 The Cirq Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest import cirq import cirq.contrib.graph_device as ccgd import cirq.contrib.graph_device.graph_device as ccgdgd def test_fixed_duration_undirected_graph_device_edge_eq(): e = ccgd.FixedDurationUndirectedGraphDeviceEdge(cirq.Duration(picos=4)) f = ccgd.FixedDurationUndirectedGraphDeviceEdge(cirq.Duration(picos=4)) g = ccgd.FixedDurationUndirectedGraphDeviceEdge(cirq.Duration(picos=5)) assert e == f assert e != g assert e != 4 def test_unconstrained_undirected_graph_device_edge_eq(): e = ccgdgd._UnconstrainedUndirectedGraphDeviceEdge() f = ccgd.UnconstrainedUndirectedGraphDeviceEdge assert e == f assert e != 3 def test_is_undirected_device_graph(): assert not ccgd.is_undirected_device_graph('abc') graph = ccgd.UndirectedHypergraph() assert ccgd.is_undirected_device_graph(graph) a, b, c, d, e = cirq.LineQubit.range(5) graph.add_edge((a, b)) assert ccgd.is_undirected_device_graph(graph) graph.add_edge((b, c), ccgd.UnconstrainedUndirectedGraphDeviceEdge) assert ccgd.is_undirected_device_graph(graph) graph.add_edge((d, e), 'abc') assert not ccgd.is_undirected_device_graph(graph) graph = ccgd.UndirectedHypergraph(vertices=(0, 1)) assert not ccgd.is_undirected_device_graph(graph) def test_is_crosstalk_graph(): a, b, c, d, e, f = cirq.LineQubit.range(6) assert not ccgd.is_crosstalk_graph('abc') graph = ccgd.UndirectedHypergraph() graph.add_vertex('abc') assert not ccgd.is_crosstalk_graph(graph) graph = ccgd.UndirectedHypergraph() graph.add_edge((frozenset((a, b)), frozenset((c, d))), 'abc') assert not ccgd.is_crosstalk_graph(graph) graph = ccgd.UndirectedHypergraph() graph.add_edge((frozenset((a, b)), frozenset((c, d))), None) graph.add_edge((frozenset((e, f)), frozenset((c, d))), lambda _: None) assert ccgd.is_crosstalk_graph(graph) graph = ccgd.UndirectedHypergraph() graph.add_edge((frozenset((a, b)), frozenset((c, d))), 'abc') assert not ccgd.is_crosstalk_graph(graph) graph = ccgd.UndirectedHypergraph() graph.add_edge((frozenset((a, b)),), None) assert not ccgd.is_crosstalk_graph(graph) graph = ccgd.UndirectedHypergraph() graph.add_edge((frozenset((0, 1)), frozenset((2, 3))), None) assert not ccgd.is_crosstalk_graph(graph) def test_unconstrained_undirected_graph_device_edge(): edge = ccgd.UnconstrainedUndirectedGraphDeviceEdge qubits = cirq.LineQubit.range(2) assert edge.duration_of(cirq.X(qubits[0])) == cirq.Duration(picos=0) assert edge.duration_of(cirq.CZ(*qubits[:2])) == cirq.Duration(picos=0) def test_graph_device(): one_qubit_duration = cirq.Duration(picos=10) two_qubit_duration = cirq.Duration(picos=1) one_qubit_edge = ccgd.FixedDurationUndirectedGraphDeviceEdge(one_qubit_duration) two_qubit_edge = ccgd.FixedDurationUndirectedGraphDeviceEdge(two_qubit_duration) empty_device = ccgd.UndirectedGraphDevice() assert not empty_device.qubits assert not empty_device.edges n_qubits = 4 qubits = cirq.LineQubit.range(n_qubits) edges = { (cirq.LineQubit(i), cirq.LineQubit((i + 1) % n_qubits)): two_qubit_edge for i in range(n_qubits) } edges.update({(cirq.LineQubit(i),): one_qubit_edge for i in range(n_qubits)}) device_graph = ccgd.UndirectedHypergraph(labelled_edges=edges) def not_cnots(first_op, second_op): if all( isinstance(op, cirq.GateOperation) and op.gate == cirq.CNOT for op in (first_op, second_op) ): raise ValueError('Simultaneous CNOTs') assert ccgd.is_undirected_device_graph(device_graph) with pytest.raises(TypeError): ccgd.UndirectedGraphDevice('abc') constraint_edges = { (frozenset(cirq.LineQubit.range(2)), frozenset(cirq.LineQubit.range(2, 4))): None, ( frozenset(cirq.LineQubit.range(1, 3)), frozenset((cirq.LineQubit(0), cirq.LineQubit(3))), ): not_cnots, } crosstalk_graph = ccgd.UndirectedHypergraph(labelled_edges=constraint_edges) assert ccgd.is_crosstalk_graph(crosstalk_graph) with pytest.raises(TypeError): ccgd.UndirectedGraphDevice(device_graph, crosstalk_graph='abc') graph_device = ccgd.UndirectedGraphDevice(device_graph) assert graph_device.crosstalk_graph == ccgd.UndirectedHypergraph() graph_device = ccgd.UndirectedGraphDevice(device_graph, crosstalk_graph=crosstalk_graph) assert sorted(graph_device.edges) == sorted(device_graph.edges) assert graph_device.qubits == tuple(qubits) assert graph_device.device_graph == device_graph assert graph_device.labelled_edges == device_graph.labelled_edges assert graph_device.duration_of(cirq.X(qubits[2])) == one_qubit_duration assert graph_device.duration_of(cirq.CNOT(*qubits[:2])) == two_qubit_duration with pytest.raises(KeyError): graph_device.duration_of(cirq.CNOT(qubits[0], qubits[2])) with pytest.raises(ValueError): graph_device.validate_operation(cirq.CNOT(qubits[0], qubits[2])) with pytest.raises(AttributeError): graph_device.validate_operation(list((2, 3))) moment = cirq.Moment([cirq.CNOT(*qubits[:2]), cirq.CNOT(*qubits[2:])]) with pytest.raises(ValueError): graph_device.validate_moment(moment) moment = cirq.Moment([cirq.CNOT(qubits[0], qubits[3]), cirq.CZ(qubits[1], qubits[2])]) graph_device.validate_moment(moment) moment = cirq.Moment([cirq.CNOT(qubits[0], qubits[3]), cirq.CNOT(qubits[1], qubits[2])]) with pytest.raises(ValueError): graph_device.validate_moment(moment) def test_graph_device_copy_and_add(): a, b, c, d, e, f = cirq.LineQubit.range(6) device_graph = ccgd.UndirectedHypergraph(labelled_edges={(a, b): None, (c, d): None}) crosstalk_graph = ccgd.UndirectedHypergraph( labelled_edges={(frozenset((a, b)), frozenset((c, d))): None} ) device = ccgd.UndirectedGraphDevice(device_graph=device_graph, crosstalk_graph=crosstalk_graph) device_graph_addend = ccgd.UndirectedHypergraph(labelled_edges={(a, b): None, (e, f): None}) crosstalk_graph_addend = ccgd.UndirectedHypergraph( labelled_edges={(frozenset((a, b)), frozenset((e, f))): None} ) device_addend = ccgd.UndirectedGraphDevice( device_graph=device_graph_addend, crosstalk_graph=crosstalk_graph_addend ) device_sum = device + device_addend device_copy = device.__copy__() device_copy += device_addend assert device != device_copy assert device_copy == device_sum def test_qubit_set(): a, b, c, d = cirq.LineQubit.range(4) device_graph = ccgd.UndirectedHypergraph(labelled_edges={(a, b): None, (c, d): None}) device = ccgd.UndirectedGraphDevice(device_graph=device_graph) assert device.qubit_set() == {a, b, c, d} def test_qid_pairs(): a, b, c, d = cirq.LineQubit.range(4) device_graph = ccgd.UndirectedHypergraph(labelled_edges={(a, b): None, (c, d): None}) device = ccgd.UndirectedGraphDevice(device_graph=device_graph) assert len(device.qid_pairs()) == 2
apache-2.0
AudioHumLab/FIRtro
bin_custom.example/monitors_restore.py
2
2340
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Script custom para restaurar la conexión de la fuente de audio del FIRtro hacia los monitores de señal, por ejemplo: - monitor externo spdif (p.ej el RTA/VU-Meter Behringer Ultracurve 2496) - monitor (jack) de loudness Ebumeter de Fons Adriaensen """ import jack from sys import path as sys_path firtroHome = "/home/firtro/" sys_path.append(firtroHome + "bin/") from getconfig import * displays = jack_internal_monitors.split() + jack_external_monitors.split() if load_ecasound: firtro_ports = ecasound_ports else: firtro_ports = brutefir_ports firtro_ports = firtro_ports.split() # apaño para las variable cadena de getconfig def getDisplaysPorts(): puertos = jack.get_ports() displaysPorts = [] for display in displays: for puerto in puertos: if display in puerto: displaysPorts.append(puerto) return displaysPorts def desconectaDISPLAYS(): for portDisplay in getDisplaysPorts(): clientes = jack.get_connections(portDisplay) for cliente in clientes: print "(i) desconectando " + cliente + " " + portDisplay jack.disconnect(cliente, portDisplay) if not getDisplaysPorts(): print "(?) nada que desconectar" def conectaDISPLAYS(): info =[] # Primero desconectamos lo que hubiera en los displays, por higiene ;-) desconectaDISPLAYS() # Tomamos nota de la source: source_ports = [] for p in firtro_ports: source_ports += jack.get_connections(p) # Conectamos los impares: for o in source_ports[0::2]: for d in getDisplaysPorts()[0::2]: info.append("(i) conectando "+ o + " " + d) jack.connect(o,d) # Conectamos los pares: for o in source_ports[1::2]: for d in getDisplaysPorts()[1::2]: info.append("(i) conectando "+ o + " " + d) jack.connect(o,d) if info: for cosa in info: print cosa else: print "(?) nada que conectar" if __name__ == "__main__": try: # Nos conectamos al servidor jack bajo el nombre "tmp" por poner algo jack.attach("tmp") conectaDISPLAYS() jack.detach() except: print "problemas de conexion python a JACK, que lo sepas"
gpl-3.0
broferek/ansible
lib/ansible/modules/storage/purestorage/purefb_ds.py
19
14384
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2018, Simon Dodsley (simon@purestorage.com) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: purefb_ds version_added: '2.8' short_description: Configure FlashBlade Directory Service description: - Create or erase directory services configurations. There is no facility to SSL certificates at this time. Use the FlashBlade GUI for this additional configuration work. - To modify an existing directory service configuration you must first delete an existing configuration and then recreate with new settings. author: - Pure Storage Ansible Team (@sdodsley) <pure-ansible-team@purestorage.com> options: state: description: - Create or delete directory service configuration default: present type: str choices: [ absent, present ] dstype: description: - The type of directory service to work on choices: [ management, nfs, smb ] type: str enable: description: - Whether to enable or disable directory service support. default: false type: bool uri: description: - A list of up to 30 URIs of the directory servers. Each URI must include the scheme ldap:// or ldaps:// (for LDAP over SSL), a hostname, and a domain name or IP address. For example, ldap://ad.company.com configures the directory service with the hostname "ad" in the domain "company.com" while specifying the unencrypted LDAP protocol. type: list base_dn: description: - Sets the base of the Distinguished Name (DN) of the directory service groups. The base should consist of only Domain Components (DCs). The base_dn will populate with a default value when a URI is entered by parsing domain components from the URI. The base DN should specify DC= for each domain component and multiple DCs should be separated by commas. required: true type: str bind_password: description: - Sets the password of the bind_user user name account. type: str bind_user: description: - Sets the user name that can be used to bind to and query the directory. - For Active Directory, enter the username - often referred to as sAMAccountName or User Logon Name - of the account that is used to perform directory lookups. - For OpenLDAP, enter the full DN of the user. type: str nis_servers: description: - A list of up to 30 IP addresses or FQDNs for NIS servers. - This cannot be used in conjunction with LDAP configurations. type: list version_added: 2.9 nis_domain: description: - The NIS domain to search - This cannot be used in conjunction with LDAP configurations. type: str version_added: 2.9 join_ou: description: - The optional organizational unit (OU) where the machine account for the directory service will be created. type: str version_added: 2.9 extends_documentation_fragment: - purestorage.fb ''' EXAMPLES = r''' - name: Delete existing management directory service purefb_ds: dstype: management state: absent fb_url: 10.10.10.2 api_token: e31060a7-21fc-e277-6240-25983c6c4592 - name: Create NFS directory service (disabled) purefb_ds: dstype: nfs uri: "ldaps://lab.purestorage.com" base_dn: "DC=lab,DC=purestorage,DC=com" bind_user: Administrator bind_password: password fb_url: 10.10.10.2 api_token: e31060a7-21fc-e277-6240-25983c6c4592 - name: Enable existing SMB directory service purefb_ds: dstypr: smb enable: true fb_url: 10.10.10.2 api_token: e31060a7-21fc-e277-6240-25983c6c4592 - name: Disable existing management directory service purefb_ds: dstype: management enable: false fb_url: 10.10.10.2 api_token: e31060a7-21fc-e277-6240-25983c6c4592 - name: Create NFS directory service (enabled) purefb_ds: dstype: nfs enable: true uri: "ldaps://lab.purestorage.com" base_dn: "DC=lab,DC=purestorage,DC=com" bind_user: Administrator bind_password: password fb_url: 10.10.10.2 api_token: e31060a7-21fc-e277-6240-25983c6c4592 ''' RETURN = r''' ''' NIS_API_VERSION = '1.7' HAS_PURITY_FB = True try: from purity_fb import DirectoryService except ImportError: HAS_PURITY_FB = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.pure import get_blade, purefb_argument_spec def update_ds(module, blade): """Update Directory Service""" # This module is a place-holder until we figure out a way to # update the config on the fly rather than deleting and resetting changed = False module.exit_json(changed=changed) def enable_ds(module, blade): """Enable Directory Service""" changed = True if not module.check_mode: try: blade.directory_services.update_directory_services(names=[module.params['dstype']], directory_service=DirectoryService(enabled=True)) changed = True except Exception: module.fail_json(msg='Enable {0} Directory Service failed: Check Configuration'.format(module.params['dstype'])) module.exit_json(changed=changed) def disable_ds(module, blade): """Disable Directory Service""" changed = True if not module.check_mode: try: blade.directory_services.update_directory_services(names=[module.params['dstype']], directory_service=DirectoryService(enabled=False)) except Exception: module.fail_json(msg='Disable {0} Directory Service failed'.format(module.params['dstype'])) module.exit_json(changed=changed) def delete_ds(module, blade): """Delete Directory Service""" changed = True if not module.check_mode: dirserv = blade.directory_services.list_directory_services(names=[module.params['dstype']]) try: if module.params['dstype'] == 'management': if dirserv.items[0].uris: dir_service = DirectoryService(uris=[''], base_dn="", bind_user="", bind_password="", enabled=False) else: changed = False elif module.params['dstype'] == 'smb': if dirserv.items[0].uris: smb_attrs = {'join_ou': ''} dir_service = DirectoryService(uris=[''], base_dn='', bind_user='', bind_password='', smb=smb_attrs, enabled=False) else: changed = False elif module.params['dstype'] == 'nfs': if dirserv.items[0].uris: dir_service = DirectoryService(uris=[''], base_dn='', bind_user='', bind_password='', enabled=False) elif dirserv.items[0].nfs.nis_domains: nfs_attrs = {'nis_domains': [], 'nis_servers': []} dir_service = DirectoryService(nfs=nfs_attrs, enabled=False) else: changed = False if changed: blade.directory_services.update_directory_services(names=[module.params['dstype']], directory_service=dir_service) except Exception: module.fail_json(msg='Delete {0} Directory Service failed'.format(module.params['dstype'])) module.exit_json(changed=changed) def create_ds(module, blade): """Create Directory Service""" changed = True if not module.check_mode: try: if module.params['dstype'] == 'management': if module.params['uri']: dir_service = DirectoryService(uris=module.params['uri'][0:30], base_dn=module.params['base_dn'], bind_user=module.params['bind_user'], bind_password=module.params['bind_password'], enabled=module.params['enable']) else: module.fail_json(msg="URI and associated params must be specified to create dstype {0}".format(module.params['dstype'])) elif module.params['dstype'] == 'smb': if module.params['uri']: smb_attrs = {'join_ou': module.params['join_ou']} dir_service = DirectoryService(uris=module.params['uri'][0:30], base_dn=module.params['base_dn'], bind_user=module.params['bind_user'], bind_password=module.params['bind_password'], smb=smb_attrs, enabled=module.params['enable']) else: module.fail_json(msg="URI and associated params must be specified to create dstype {0}".format(module.params['dstype'])) elif module.params['dstype'] == 'nfs': if module.params['nis_domain']: nfs_attrs = {'nis_domains': [module.params['nis_domain']], 'nis_servers': module.params['nis_servers'][0:30]} dir_service = DirectoryService(nfs=nfs_attrs, enabled=module.params['enable']) else: dir_service = DirectoryService(uris=module.params['uri'][0:30], base_dn=module.params['base_dn'], bind_user=module.params['bind_user'], bind_password=module.params['bind_password'], enabled=module.params['enable']) blade.directory_services.update_directory_services(names=[module.params['dstype']], directory_service=dir_service) except Exception: module.fail_json(msg='Create {0} Directory Service failed: Check configuration'.format(module.params['dstype'])) module.exit_json(changed=changed) def main(): argument_spec = purefb_argument_spec() argument_spec.update(dict( uri=dict(type='list'), dstype=dict(required=True, type='str', choices=['management', 'nfs', 'smb']), state=dict(type='str', default='present', choices=['absent', 'present']), enable=dict(type='bool', default=False), bind_password=dict(type='str', no_log=True), bind_user=dict(type='str'), base_dn=dict(type='str'), join_ou=dict(type='str'), nis_domain=dict(type='str'), nis_servers=dict(type='list'), )) required_together = [['uri', 'bind_password', 'bind_user', 'base_dn'], ['nis_servers', 'nis_domain'], ['join_ou', 'uri']] mutually_exclusive = [['uri', 'nis_domain']] module = AnsibleModule(argument_spec, required_together=required_together, mutually_exclusive=mutually_exclusive, supports_check_mode=True) if not HAS_PURITY_FB: module.fail_json(msg='purity_fb sdk is required for this module') state = module.params['state'] blade = get_blade(module) api_version = blade.api_version.list_versions().versions ds_configured = False dirserv = blade.directory_services.list_directory_services(names=[module.params['dstype']]) ds_enabled = dirserv.items[0].enabled if dirserv.items[0].base_dn is not None: ds_configured = True if (module.params['nis_domain'] or module.params['join_ou']) and (NIS_API_VERSION not in api_version): module.fail_json(msg="NFS or SMB directory service attributes are not supported in your FlashBlade Purity version.") ldap_uri = False set_ldap = False for uri in range(0, len(dirserv.items[0].uris)): if "ldap" in dirserv.items[0].uris[uri].lower(): ldap_uri = True if module.params['uri']: for uri in range(0, len(module.params['uri'])): if "ldap" in module.params['uri'][uri].lower(): set_ldap = True if not module.params['uri'] and ldap_uri or \ module.params['uri'] and set_ldap: if module.params['nis_servers'] or module.params['nis_domain']: module.fail_json(msg="NIS configuration not supported in an LDAP environment") if state == 'absent': delete_ds(module, blade) elif ds_configured and module.params['enable'] and ds_enabled: update_ds(module, blade) elif ds_configured and not module.params['enable'] and ds_enabled: disable_ds(module, blade) elif ds_configured and module.params['enable'] and not ds_enabled: enable_ds(module, blade) # Now we have enabled the DS lets make sure there aren't any new updates... update_ds(module, blade) elif not ds_configured and state == 'present': create_ds(module, blade) else: module.exit_json(changed=False) if __name__ == '__main__': main()
gpl-3.0
tiborsimko/invenio-webhooks
docs/conf.py
2
10729
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2015 CERN. # # Invenio is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the # Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, # MA 02111-1307, USA. # # In applying this license, CERN does not # waive the privileges and immunities granted to it by virtue of its status # as an Intergovernmental Organization or submit itself to any jurisdiction. from __future__ import print_function import os import sphinx.environment # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Do not warn on external images. suppress_warnings = ['image.nonlocal_uri'] # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Invenio-Webhooks' copyright = u'2015, CERN' author = u'CERN' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. # Get the version string. Cannot be done with import! g = {} with open(os.path.join('..', 'invenio_webhooks', 'version.py'), 'rt') as fp: exec(fp.read(), g) version = g['__version__'] # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- html_theme = 'alabaster' html_theme_options = { 'description': 'Invenio module for processing webhook events.', 'github_user': 'inveniosoftware', 'github_repo': 'invenio-webhooks', 'github_button': False, 'github_banner': True, 'show_powered_by': False, 'extra_nav_links': { 'invenio-webhooks@GitHub': 'https://github.com/inveniosoftware/invenio-webhooks', 'invenio-webhooks@PyPI': 'https://pypi.python.org/pypi/invenio-webhooks/', } } # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = { '**': [ 'about.html', 'navigation.html', 'relations.html', 'searchbox.html', 'donate.html', ] } # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'invenio-webhooks_namedoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'invenio-webhooks.tex', u'invenio-webhooks Documentation', u'CERN', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'invenio-webhooks', u'invenio-webhooks Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'invenio-webhooks', u'Invenio-Webhooks Documentation', author, 'invenio-webhooks', 'Invenio module for processing webhook events.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'https://docs.python.org/': None} # Autodoc configuraton. autoclass_content = 'both'
gpl-2.0
jakevdp/scipy
scipy/optimize/_minimize.py
2
27478
""" Unified interfaces to minimization algorithms. Functions --------- - minimize : minimization of a function of several variables. - minimize_scalar : minimization of a function of one variable. """ from __future__ import division, print_function, absolute_import __all__ = ['minimize', 'minimize_scalar'] from warnings import warn import numpy as np from scipy._lib.six import callable # unconstrained minimization from .optimize import (_minimize_neldermead, _minimize_powell, _minimize_cg, _minimize_bfgs, _minimize_newtoncg, _minimize_scalar_brent, _minimize_scalar_bounded, _minimize_scalar_golden, MemoizeJac) from ._trustregion_dogleg import _minimize_dogleg from ._trustregion_ncg import _minimize_trust_ncg from ._trustregion_exact import _minimize_trustregion_exact # constrained minimization from .lbfgsb import _minimize_lbfgsb from .tnc import _minimize_tnc from .cobyla import _minimize_cobyla from .slsqp import _minimize_slsqp def minimize(fun, x0, args=(), method=None, jac=None, hess=None, hessp=None, bounds=None, constraints=(), tol=None, callback=None, options=None): """Minimization of scalar function of one or more variables. In general, the optimization problems are of the form:: minimize f(x) subject to g_i(x) >= 0, i = 1,...,m h_j(x) = 0, j = 1,...,p where x is a vector of one or more variables. ``g_i(x)`` are the inequality constraints. ``h_j(x)`` are the equality constrains. Optionally, the lower and upper bounds for each element in x can also be specified using the `bounds` argument. Parameters ---------- fun : callable Objective function. x0 : ndarray Initial guess. args : tuple, optional Extra arguments passed to the objective function and its derivatives (Jacobian, Hessian). method : str or callable, optional Type of solver. Should be one of - 'Nelder-Mead' :ref:`(see here) <optimize.minimize-neldermead>` - 'Powell' :ref:`(see here) <optimize.minimize-powell>` - 'CG' :ref:`(see here) <optimize.minimize-cg>` - 'BFGS' :ref:`(see here) <optimize.minimize-bfgs>` - 'Newton-CG' :ref:`(see here) <optimize.minimize-newtoncg>` - 'L-BFGS-B' :ref:`(see here) <optimize.minimize-lbfgsb>` - 'TNC' :ref:`(see here) <optimize.minimize-tnc>` - 'COBYLA' :ref:`(see here) <optimize.minimize-cobyla>` - 'SLSQP' :ref:`(see here) <optimize.minimize-slsqp>` - 'dogleg' :ref:`(see here) <optimize.minimize-dogleg>` - 'trust-ncg' :ref:`(see here) <optimize.minimize-trustncg>` - 'trust-region-exact' :ref:`(see here) <optimize.minimize-trustexact>` - custom - a callable object (added in version 0.14.0), see below for description. If not given, chosen to be one of ``BFGS``, ``L-BFGS-B``, ``SLSQP``, depending if the problem has constraints or bounds. jac : bool or callable, optional Jacobian (gradient) of objective function. Only for CG, BFGS, Newton-CG, L-BFGS-B, TNC, SLSQP, dogleg, trust-ncg. If `jac` is a Boolean and is True, `fun` is assumed to return the gradient along with the objective function. If False, the gradient will be estimated numerically. `jac` can also be a callable returning the gradient of the objective. In this case, it must accept the same arguments as `fun`. hess, hessp : callable, optional Hessian (matrix of second-order derivatives) of objective function or Hessian of objective function times an arbitrary vector p. Only for Newton-CG, dogleg, trust-ncg. Only one of `hessp` or `hess` needs to be given. If `hess` is provided, then `hessp` will be ignored. If neither `hess` nor `hessp` is provided, then the Hessian product will be approximated using finite differences on `jac`. `hessp` must compute the Hessian times an arbitrary vector. bounds : sequence, optional Bounds for variables (only for L-BFGS-B, TNC and SLSQP). ``(min, max)`` pairs for each element in ``x``, defining the bounds on that parameter. Use None for one of ``min`` or ``max`` when there is no bound in that direction. constraints : dict or sequence of dict, optional Constraints definition (only for COBYLA and SLSQP). Each constraint is defined in a dictionary with fields: type : str Constraint type: 'eq' for equality, 'ineq' for inequality. fun : callable The function defining the constraint. jac : callable, optional The Jacobian of `fun` (only for SLSQP). args : sequence, optional Extra arguments to be passed to the function and Jacobian. Equality constraint means that the constraint function result is to be zero whereas inequality means that it is to be non-negative. Note that COBYLA only supports inequality constraints. tol : float, optional Tolerance for termination. For detailed control, use solver-specific options. options : dict, optional A dictionary of solver options. All methods accept the following generic options: maxiter : int Maximum number of iterations to perform. disp : bool Set to True to print convergence messages. For method-specific options, see :func:`show_options()`. callback : callable, optional Called after each iteration, as ``callback(xk)``, where ``xk`` is the current parameter vector. Returns ------- res : OptimizeResult The optimization result represented as a ``OptimizeResult`` object. Important attributes are: ``x`` the solution array, ``success`` a Boolean flag indicating if the optimizer exited successfully and ``message`` which describes the cause of the termination. See `OptimizeResult` for a description of other attributes. See also -------- minimize_scalar : Interface to minimization algorithms for scalar univariate functions show_options : Additional options accepted by the solvers Notes ----- This section describes the available solvers that can be selected by the 'method' parameter. The default method is *BFGS*. **Unconstrained minimization** Method :ref:`Nelder-Mead <optimize.minimize-neldermead>` uses the Simplex algorithm [1]_, [2]_. This algorithm is robust in many applications. However, if numerical computation of derivative can be trusted, other algorithms using the first and/or second derivatives information might be preferred for their better performance in general. Method :ref:`Powell <optimize.minimize-powell>` is a modification of Powell's method [3]_, [4]_ which is a conjugate direction method. It performs sequential one-dimensional minimizations along each vector of the directions set (`direc` field in `options` and `info`), which is updated at each iteration of the main minimization loop. The function need not be differentiable, and no derivatives are taken. Method :ref:`CG <optimize.minimize-cg>` uses a nonlinear conjugate gradient algorithm by Polak and Ribiere, a variant of the Fletcher-Reeves method described in [5]_ pp. 120-122. Only the first derivatives are used. Method :ref:`BFGS <optimize.minimize-bfgs>` uses the quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS) [5]_ pp. 136. It uses the first derivatives only. BFGS has proven good performance even for non-smooth optimizations. This method also returns an approximation of the Hessian inverse, stored as `hess_inv` in the OptimizeResult object. Method :ref:`Newton-CG <optimize.minimize-newtoncg>` uses a Newton-CG algorithm [5]_ pp. 168 (also known as the truncated Newton method). It uses a CG method to the compute the search direction. See also *TNC* method for a box-constrained minimization with a similar algorithm. Suitable for large-scale problems. Method :ref:`dogleg <optimize.minimize-dogleg>` uses the dog-leg trust-region algorithm [5]_ for unconstrained minimization. This algorithm requires the gradient and Hessian; furthermore the Hessian is required to be positive definite. Method :ref:`trust-ncg <optimize.minimize-trustncg>` uses the Newton conjugate gradient trust-region algorithm [5]_ for unconstrained minimization. This algorithm requires the gradient and either the Hessian or a function that computes the product of the Hessian with a given vector. Suitable for large-scale problems. Method :ref:`trust-region-exact <optimize.minimize-trustexact>` is a trust-region method for unconstrained minimization in which quadratic subproblems are solved almost exactly [13]_. This algorithm requires the gradient and the Hessian (which is *not* required to be positive definite). It is, in many situations, the Newton method to converge in fewer iteraction and the most recommended for small and medium-size problems. **Constrained minimization** Method :ref:`L-BFGS-B <optimize.minimize-lbfgsb>` uses the L-BFGS-B algorithm [6]_, [7]_ for bound constrained minimization. Method :ref:`TNC <optimize.minimize-tnc>` uses a truncated Newton algorithm [5]_, [8]_ to minimize a function with variables subject to bounds. This algorithm uses gradient information; it is also called Newton Conjugate-Gradient. It differs from the *Newton-CG* method described above as it wraps a C implementation and allows each variable to be given upper and lower bounds. Method :ref:`COBYLA <optimize.minimize-cobyla>` uses the Constrained Optimization BY Linear Approximation (COBYLA) method [9]_, [10]_, [11]_. The algorithm is based on linear approximations to the objective function and each constraint. The method wraps a FORTRAN implementation of the algorithm. The constraints functions 'fun' may return either a single number or an array or list of numbers. Method :ref:`SLSQP <optimize.minimize-slsqp>` uses Sequential Least SQuares Programming to minimize a function of several variables with any combination of bounds, equality and inequality constraints. The method wraps the SLSQP Optimization subroutine originally implemented by Dieter Kraft [12]_. Note that the wrapper handles infinite values in bounds by converting them into large floating values. **Custom minimizers** It may be useful to pass a custom minimization method, for example when using a frontend to this method such as `scipy.optimize.basinhopping` or a different library. You can simply pass a callable as the ``method`` parameter. The callable is called as ``method(fun, x0, args, **kwargs, **options)`` where ``kwargs`` corresponds to any other parameters passed to `minimize` (such as `callback`, `hess`, etc.), except the `options` dict, which has its contents also passed as `method` parameters pair by pair. Also, if `jac` has been passed as a bool type, `jac` and `fun` are mangled so that `fun` returns just the function values and `jac` is converted to a function returning the Jacobian. The method shall return an ``OptimizeResult`` object. The provided `method` callable must be able to accept (and possibly ignore) arbitrary parameters; the set of parameters accepted by `minimize` may expand in future versions and then these parameters will be passed to the method. You can find an example in the scipy.optimize tutorial. .. versionadded:: 0.11.0 References ---------- .. [1] Nelder, J A, and R Mead. 1965. A Simplex Method for Function Minimization. The Computer Journal 7: 308-13. .. [2] Wright M H. 1996. Direct search methods: Once scorned, now respectable, in Numerical Analysis 1995: Proceedings of the 1995 Dundee Biennial Conference in Numerical Analysis (Eds. D F Griffiths and G A Watson). Addison Wesley Longman, Harlow, UK. 191-208. .. [3] Powell, M J D. 1964. An efficient method for finding the minimum of a function of several variables without calculating derivatives. The Computer Journal 7: 155-162. .. [4] Press W, S A Teukolsky, W T Vetterling and B P Flannery. Numerical Recipes (any edition), Cambridge University Press. .. [5] Nocedal, J, and S J Wright. 2006. Numerical Optimization. Springer New York. .. [6] Byrd, R H and P Lu and J. Nocedal. 1995. A Limited Memory Algorithm for Bound Constrained Optimization. SIAM Journal on Scientific and Statistical Computing 16 (5): 1190-1208. .. [7] Zhu, C and R H Byrd and J Nocedal. 1997. L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization. ACM Transactions on Mathematical Software 23 (4): 550-560. .. [8] Nash, S G. Newton-Type Minimization Via the Lanczos Method. 1984. SIAM Journal of Numerical Analysis 21: 770-778. .. [9] Powell, M J D. A direct search optimization method that models the objective and constraint functions by linear interpolation. 1994. Advances in Optimization and Numerical Analysis, eds. S. Gomez and J-P Hennart, Kluwer Academic (Dordrecht), 51-67. .. [10] Powell M J D. Direct search algorithms for optimization calculations. 1998. Acta Numerica 7: 287-336. .. [11] Powell M J D. A view of algorithms for optimization without derivatives. 2007.Cambridge University Technical Report DAMTP 2007/NA03 .. [12] Kraft, D. A software package for sequential quadratic programming. 1988. Tech. Rep. DFVLR-FB 88-28, DLR German Aerospace Center -- Institute for Flight Mechanics, Koln, Germany. .. [13] Conn, A. R., Gould, N. I., and Toint, P. L. Trust region methods. 2000. Siam. pp. 169-200. Examples -------- Let us consider the problem of minimizing the Rosenbrock function. This function (and its respective derivatives) is implemented in `rosen` (resp. `rosen_der`, `rosen_hess`) in the `scipy.optimize`. >>> from scipy.optimize import minimize, rosen, rosen_der A simple application of the *Nelder-Mead* method is: >>> x0 = [1.3, 0.7, 0.8, 1.9, 1.2] >>> res = minimize(rosen, x0, method='Nelder-Mead', tol=1e-6) >>> res.x array([ 1., 1., 1., 1., 1.]) Now using the *BFGS* algorithm, using the first derivative and a few options: >>> res = minimize(rosen, x0, method='BFGS', jac=rosen_der, ... options={'gtol': 1e-6, 'disp': True}) Optimization terminated successfully. Current function value: 0.000000 Iterations: 33 Function evaluations: 35 Gradient evaluations: 35 >>> res.x array([ 1., 1., 1., 1., 1.]) >>> print(res.message) Optimization terminated successfully. >>> res.hess_inv array([[ 0.00749589, 0.01255155, 0.02396251, 0.04750988, 0.09495377], # may vary [ 0.01255155, 0.02510441, 0.04794055, 0.09502834, 0.18996269], [ 0.02396251, 0.04794055, 0.09631614, 0.19092151, 0.38165151], [ 0.04750988, 0.09502834, 0.19092151, 0.38341252, 0.7664427 ], [ 0.09495377, 0.18996269, 0.38165151, 0.7664427, 1.53713523]]) Next, consider a minimization problem with several constraints (namely Example 16.4 from [5]_). The objective function is: >>> fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 There are three constraints defined as: >>> cons = ({'type': 'ineq', 'fun': lambda x: x[0] - 2 * x[1] + 2}, ... {'type': 'ineq', 'fun': lambda x: -x[0] - 2 * x[1] + 6}, ... {'type': 'ineq', 'fun': lambda x: -x[0] + 2 * x[1] + 2}) And variables must be positive, hence the following bounds: >>> bnds = ((0, None), (0, None)) The optimization problem is solved using the SLSQP method as: >>> res = minimize(fun, (2, 0), method='SLSQP', bounds=bnds, ... constraints=cons) It should converge to the theoretical solution (1.4 ,1.7). """ x0 = np.asarray(x0) if x0.dtype.kind in np.typecodes["AllInteger"]: x0 = np.asarray(x0, dtype=float) if not isinstance(args, tuple): args = (args,) if method is None: # Select automatically if constraints: method = 'SLSQP' elif bounds is not None: method = 'L-BFGS-B' else: method = 'BFGS' if callable(method): meth = "_custom" else: meth = method.lower() if options is None: options = {} # check if optional parameters are supported by the selected method # - jac if meth in ['nelder-mead', 'powell', 'cobyla'] and bool(jac): warn('Method %s does not use gradient information (jac).' % method, RuntimeWarning) # - hess if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-region-exact', '_custom') and hess is not None: warn('Method %s does not use Hessian information (hess).' % method, RuntimeWarning) # - hessp if meth not in ('newton-cg', 'dogleg', 'trust-ncg', '_custom') and hessp is not None: warn('Method %s does not use Hessian-vector product ' 'information (hessp).' % method, RuntimeWarning) # - constraints or bounds if (meth in ['nelder-mead', 'powell', 'cg', 'bfgs', 'newton-cg', 'dogleg', 'trust-ncg'] and (bounds is not None or np.any(constraints))): warn('Method %s cannot handle constraints nor bounds.' % method, RuntimeWarning) if meth in ['l-bfgs-b', 'tnc'] and np.any(constraints): warn('Method %s cannot handle constraints.' % method, RuntimeWarning) if meth == 'cobyla' and bounds is not None: warn('Method %s cannot handle bounds.' % method, RuntimeWarning) # - callback if (meth in ['cobyla'] and callback is not None): warn('Method %s does not support callback.' % method, RuntimeWarning) # - return_all if (meth in ['l-bfgs-b', 'tnc', 'cobyla', 'slsqp'] and options.get('return_all', False)): warn('Method %s does not support the return_all option.' % method, RuntimeWarning) # fun also returns the jacobian if not callable(jac): if bool(jac): fun = MemoizeJac(fun) jac = fun.derivative else: jac = None # set default tolerances if tol is not None: options = dict(options) if meth == 'nelder-mead': options.setdefault('xatol', tol) options.setdefault('fatol', tol) if meth in ['newton-cg', 'powell', 'tnc']: options.setdefault('xtol', tol) if meth in ['powell', 'l-bfgs-b', 'tnc', 'slsqp']: options.setdefault('ftol', tol) if meth in ['bfgs', 'cg', 'l-bfgs-b', 'tnc', 'dogleg', 'trust-ncg', 'trust-region-exact']: options.setdefault('gtol', tol) if meth in ['cobyla', '_custom']: options.setdefault('tol', tol) if meth == '_custom': return method(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp, bounds=bounds, constraints=constraints, callback=callback, **options) elif meth == 'nelder-mead': return _minimize_neldermead(fun, x0, args, callback, **options) elif meth == 'powell': return _minimize_powell(fun, x0, args, callback, **options) elif meth == 'cg': return _minimize_cg(fun, x0, args, jac, callback, **options) elif meth == 'bfgs': return _minimize_bfgs(fun, x0, args, jac, callback, **options) elif meth == 'newton-cg': return _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback, **options) elif meth == 'l-bfgs-b': return _minimize_lbfgsb(fun, x0, args, jac, bounds, callback=callback, **options) elif meth == 'tnc': return _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, **options) elif meth == 'cobyla': return _minimize_cobyla(fun, x0, args, constraints, **options) elif meth == 'slsqp': return _minimize_slsqp(fun, x0, args, jac, bounds, constraints, callback=callback, **options) elif meth == 'dogleg': return _minimize_dogleg(fun, x0, args, jac, hess, callback=callback, **options) elif meth == 'trust-ncg': return _minimize_trust_ncg(fun, x0, args, jac, hess, hessp, callback=callback, **options) elif meth == 'trust-region-exact': return _minimize_trustregion_exact(fun, x0, args, jac, hess, callback=callback, **options) else: raise ValueError('Unknown solver %s' % method) def minimize_scalar(fun, bracket=None, bounds=None, args=(), method='brent', tol=None, options=None): """Minimization of scalar function of one variable. Parameters ---------- fun : callable Objective function. Scalar function, must return a scalar. bracket : sequence, optional For methods 'brent' and 'golden', `bracket` defines the bracketing interval and can either have three items ``(a, b, c)`` so that ``a < b < c`` and ``fun(b) < fun(a), fun(c)`` or two items ``a`` and ``c`` which are assumed to be a starting interval for a downhill bracket search (see `bracket`); it doesn't always mean that the obtained solution will satisfy ``a <= x <= c``. bounds : sequence, optional For method 'bounded', `bounds` is mandatory and must have two items corresponding to the optimization bounds. args : tuple, optional Extra arguments passed to the objective function. method : str or callable, optional Type of solver. Should be one of: - 'Brent' :ref:`(see here) <optimize.minimize_scalar-brent>` - 'Bounded' :ref:`(see here) <optimize.minimize_scalar-bounded>` - 'Golden' :ref:`(see here) <optimize.minimize_scalar-golden>` - custom - a callable object (added in version 0.14.0), see below tol : float, optional Tolerance for termination. For detailed control, use solver-specific options. options : dict, optional A dictionary of solver options. maxiter : int Maximum number of iterations to perform. disp : bool Set to True to print convergence messages. See :func:`show_options()` for solver-specific options. Returns ------- res : OptimizeResult The optimization result represented as a ``OptimizeResult`` object. Important attributes are: ``x`` the solution array, ``success`` a Boolean flag indicating if the optimizer exited successfully and ``message`` which describes the cause of the termination. See `OptimizeResult` for a description of other attributes. See also -------- minimize : Interface to minimization algorithms for scalar multivariate functions show_options : Additional options accepted by the solvers Notes ----- This section describes the available solvers that can be selected by the 'method' parameter. The default method is *Brent*. Method :ref:`Brent <optimize.minimize_scalar-brent>` uses Brent's algorithm to find a local minimum. The algorithm uses inverse parabolic interpolation when possible to speed up convergence of the golden section method. Method :ref:`Golden <optimize.minimize_scalar-golden>` uses the golden section search technique. It uses analog of the bisection method to decrease the bracketed interval. It is usually preferable to use the *Brent* method. Method :ref:`Bounded <optimize.minimize_scalar-bounded>` can perform bounded minimization. It uses the Brent method to find a local minimum in the interval x1 < xopt < x2. **Custom minimizers** It may be useful to pass a custom minimization method, for example when using some library frontend to minimize_scalar. You can simply pass a callable as the ``method`` parameter. The callable is called as ``method(fun, args, **kwargs, **options)`` where ``kwargs`` corresponds to any other parameters passed to `minimize` (such as `bracket`, `tol`, etc.), except the `options` dict, which has its contents also passed as `method` parameters pair by pair. The method shall return an ``OptimizeResult`` object. The provided `method` callable must be able to accept (and possibly ignore) arbitrary parameters; the set of parameters accepted by `minimize` may expand in future versions and then these parameters will be passed to the method. You can find an example in the scipy.optimize tutorial. .. versionadded:: 0.11.0 Examples -------- Consider the problem of minimizing the following function. >>> def f(x): ... return (x - 2) * x * (x + 2)**2 Using the *Brent* method, we find the local minimum as: >>> from scipy.optimize import minimize_scalar >>> res = minimize_scalar(f) >>> res.x 1.28077640403 Using the *Bounded* method, we find a local minimum with specified bounds as: >>> res = minimize_scalar(f, bounds=(-3, -1), method='bounded') >>> res.x -2.0000002026 """ if not isinstance(args, tuple): args = (args,) if callable(method): meth = "_custom" else: meth = method.lower() if options is None: options = {} if tol is not None: options = dict(options) if meth == 'bounded' and 'xatol' not in options: warn("Method 'bounded' does not support relative tolerance in x; " "defaulting to absolute tolerance.", RuntimeWarning) options['xatol'] = tol elif meth == '_custom': options.setdefault('tol', tol) else: options.setdefault('xtol', tol) if meth == '_custom': return method(fun, args=args, bracket=bracket, bounds=bounds, **options) elif meth == 'brent': return _minimize_scalar_brent(fun, bracket, args, **options) elif meth == 'bounded': if bounds is None: raise ValueError('The `bounds` parameter is mandatory for ' 'method `bounded`.') return _minimize_scalar_bounded(fun, bounds, args, **options) elif meth == 'golden': return _minimize_scalar_golden(fun, bracket, args, **options) else: raise ValueError('Unknown solver %s' % method)
bsd-3-clause
Ashstreet/BugScoreKeeper
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/cmake.py
304
41695
# Copyright (c) 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """cmake output module This module is under development and should be considered experimental. This module produces cmake (2.8.8+) input as its output. One CMakeLists.txt is created for each configuration. This module's original purpose was to support editing in IDEs like KDevelop which use CMake for project management. It is also possible to use CMake to generate projects for other IDEs such as eclipse cdt and code::blocks. QtCreator will convert the CMakeLists.txt to a code::blocks cbp for the editor to read, but build using CMake. As a result QtCreator editor is unaware of compiler defines. The generated CMakeLists.txt can also be used to build on Linux. There is currently no support for building on platforms other than Linux. The generated CMakeLists.txt should properly compile all projects. However, there is a mismatch between gyp and cmake with regard to linking. All attempts are made to work around this, but CMake sometimes sees -Wl,--start-group as a library and incorrectly repeats it. As a result the output of this generator should not be relied on for building. When using with kdevelop, use version 4.4+. Previous versions of kdevelop will not be able to find the header file directories described in the generated CMakeLists.txt file. """ import multiprocessing import os import signal import string import subprocess import gyp.common generator_default_variables = { 'EXECUTABLE_PREFIX': '', 'EXECUTABLE_SUFFIX': '', 'STATIC_LIB_PREFIX': 'lib', 'STATIC_LIB_SUFFIX': '.a', 'SHARED_LIB_PREFIX': 'lib', 'SHARED_LIB_SUFFIX': '.so', 'SHARED_LIB_DIR': '${builddir}/lib.${TOOLSET}', 'LIB_DIR': '${obj}.${TOOLSET}', 'INTERMEDIATE_DIR': '${obj}.${TOOLSET}/${TARGET}/geni', 'SHARED_INTERMEDIATE_DIR': '${obj}/gen', 'PRODUCT_DIR': '${builddir}', 'RULE_INPUT_PATH': '${RULE_INPUT_PATH}', 'RULE_INPUT_DIRNAME': '${RULE_INPUT_DIRNAME}', 'RULE_INPUT_NAME': '${RULE_INPUT_NAME}', 'RULE_INPUT_ROOT': '${RULE_INPUT_ROOT}', 'RULE_INPUT_EXT': '${RULE_INPUT_EXT}', 'CONFIGURATION_NAME': '${configuration}', } FULL_PATH_VARS = ('${CMAKE_SOURCE_DIR}', '${builddir}', '${obj}') generator_supports_multiple_toolsets = True generator_wants_static_library_dependencies_adjusted = True COMPILABLE_EXTENSIONS = { '.c': 'cc', '.cc': 'cxx', '.cpp': 'cxx', '.cxx': 'cxx', '.s': 's', # cc '.S': 's', # cc } def RemovePrefix(a, prefix): """Returns 'a' without 'prefix' if it starts with 'prefix'.""" return a[len(prefix):] if a.startswith(prefix) else a def CalculateVariables(default_variables, params): """Calculate additional variables for use in the build (called by gyp).""" default_variables.setdefault('OS', gyp.common.GetFlavor(params)) def Compilable(filename): """Return true if the file is compilable (should be in OBJS).""" return any(filename.endswith(e) for e in COMPILABLE_EXTENSIONS) def Linkable(filename): """Return true if the file is linkable (should be on the link line).""" return filename.endswith('.o') def NormjoinPathForceCMakeSource(base_path, rel_path): """Resolves rel_path against base_path and returns the result. If rel_path is an absolute path it is returned unchanged. Otherwise it is resolved against base_path and normalized. If the result is a relative path, it is forced to be relative to the CMakeLists.txt. """ if os.path.isabs(rel_path): return rel_path if any([rel_path.startswith(var) for var in FULL_PATH_VARS]): return rel_path # TODO: do we need to check base_path for absolute variables as well? return os.path.join('${CMAKE_SOURCE_DIR}', os.path.normpath(os.path.join(base_path, rel_path))) def NormjoinPath(base_path, rel_path): """Resolves rel_path against base_path and returns the result. TODO: what is this really used for? If rel_path begins with '$' it is returned unchanged. Otherwise it is resolved against base_path if relative, then normalized. """ if rel_path.startswith('$') and not rel_path.startswith('${configuration}'): return rel_path return os.path.normpath(os.path.join(base_path, rel_path)) def CMakeStringEscape(a): """Escapes the string 'a' for use inside a CMake string. This means escaping '\' otherwise it may be seen as modifying the next character '"' otherwise it will end the string ';' otherwise the string becomes a list The following do not need to be escaped '#' when the lexer is in string state, this does not start a comment The following are yet unknown '$' generator variables (like ${obj}) must not be escaped, but text $ should be escaped what is wanted is to know which $ come from generator variables """ return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"') def SetFileProperty(output, source_name, property_name, values, sep): """Given a set of source file, sets the given property on them.""" output.write('set_source_files_properties(') output.write(source_name) output.write(' PROPERTIES ') output.write(property_name) output.write(' "') for value in values: output.write(CMakeStringEscape(value)) output.write(sep) output.write('")\n') def SetFilesProperty(output, source_names, property_name, values, sep): """Given a set of source files, sets the given property on them.""" output.write('set_source_files_properties(\n') for source_name in source_names: output.write(' ') output.write(source_name) output.write('\n') output.write(' PROPERTIES\n ') output.write(property_name) output.write(' "') for value in values: output.write(CMakeStringEscape(value)) output.write(sep) output.write('"\n)\n') def SetTargetProperty(output, target_name, property_name, values, sep=''): """Given a target, sets the given property.""" output.write('set_target_properties(') output.write(target_name) output.write(' PROPERTIES ') output.write(property_name) output.write(' "') for value in values: output.write(CMakeStringEscape(value)) output.write(sep) output.write('")\n') def SetVariable(output, variable_name, value): """Sets a CMake variable.""" output.write('set(') output.write(variable_name) output.write(' "') output.write(CMakeStringEscape(value)) output.write('")\n') def SetVariableList(output, variable_name, values): """Sets a CMake variable to a list.""" if not values: return SetVariable(output, variable_name, "") if len(values) == 1: return SetVariable(output, variable_name, values[0]) output.write('list(APPEND ') output.write(variable_name) output.write('\n "') output.write('"\n "'.join([CMakeStringEscape(value) for value in values])) output.write('")\n') def UnsetVariable(output, variable_name): """Unsets a CMake variable.""" output.write('unset(') output.write(variable_name) output.write(')\n') def WriteVariable(output, variable_name, prepend=None): if prepend: output.write(prepend) output.write('${') output.write(variable_name) output.write('}') class CMakeTargetType(object): def __init__(self, command, modifier, property_modifier): self.command = command self.modifier = modifier self.property_modifier = property_modifier cmake_target_type_from_gyp_target_type = { 'executable': CMakeTargetType('add_executable', None, 'RUNTIME'), 'static_library': CMakeTargetType('add_library', 'STATIC', 'ARCHIVE'), 'shared_library': CMakeTargetType('add_library', 'SHARED', 'LIBRARY'), 'loadable_module': CMakeTargetType('add_library', 'MODULE', 'LIBRARY'), 'none': CMakeTargetType('add_custom_target', 'SOURCES', None), } def StringToCMakeTargetName(a): """Converts the given string 'a' to a valid CMake target name. All invalid characters are replaced by '_'. Invalid for cmake: ' ', '/', '(', ')' Invalid for make: ':' Invalid for unknown reasons but cause failures: '.' """ return a.translate(string.maketrans(' /():.', '______')) def WriteActions(target_name, actions, extra_sources, extra_deps, path_to_gyp, output): """Write CMake for the 'actions' in the target. Args: target_name: the name of the CMake target being generated. actions: the Gyp 'actions' dict for this target. extra_sources: [(<cmake_src>, <src>)] to append with generated source files. extra_deps: [<cmake_taget>] to append with generated targets. path_to_gyp: relative path from CMakeLists.txt being generated to the Gyp file in which the target being generated is defined. """ for action in actions: action_name = StringToCMakeTargetName(action['action_name']) action_target_name = '%s__%s' % (target_name, action_name) inputs = action['inputs'] inputs_name = action_target_name + '__input' SetVariableList(output, inputs_name, [NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs]) outputs = action['outputs'] cmake_outputs = [NormjoinPathForceCMakeSource(path_to_gyp, out) for out in outputs] outputs_name = action_target_name + '__output' SetVariableList(output, outputs_name, cmake_outputs) # Build up a list of outputs. # Collect the output dirs we'll need. dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir) if int(action.get('process_outputs_as_sources', False)): extra_sources.extend(zip(cmake_outputs, outputs)) # add_custom_command output.write('add_custom_command(OUTPUT ') WriteVariable(output, outputs_name) output.write('\n') if len(dirs) > 0: for directory in dirs: output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ') output.write(directory) output.write('\n') output.write(' COMMAND ') output.write(gyp.common.EncodePOSIXShellList(action['action'])) output.write('\n') output.write(' DEPENDS ') WriteVariable(output, inputs_name) output.write('\n') output.write(' WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/') output.write(path_to_gyp) output.write('\n') output.write(' COMMENT ') if 'message' in action: output.write(action['message']) else: output.write(action_target_name) output.write('\n') output.write(' VERBATIM\n') output.write(')\n') # add_custom_target output.write('add_custom_target(') output.write(action_target_name) output.write('\n DEPENDS ') WriteVariable(output, outputs_name) output.write('\n SOURCES ') WriteVariable(output, inputs_name) output.write('\n)\n') extra_deps.append(action_target_name) def NormjoinRulePathForceCMakeSource(base_path, rel_path, rule_source): if rel_path.startswith(("${RULE_INPUT_PATH}","${RULE_INPUT_DIRNAME}")): if any([rule_source.startswith(var) for var in FULL_PATH_VARS]): return rel_path return NormjoinPathForceCMakeSource(base_path, rel_path) def WriteRules(target_name, rules, extra_sources, extra_deps, path_to_gyp, output): """Write CMake for the 'rules' in the target. Args: target_name: the name of the CMake target being generated. actions: the Gyp 'actions' dict for this target. extra_sources: [(<cmake_src>, <src>)] to append with generated source files. extra_deps: [<cmake_taget>] to append with generated targets. path_to_gyp: relative path from CMakeLists.txt being generated to the Gyp file in which the target being generated is defined. """ for rule in rules: rule_name = StringToCMakeTargetName(target_name + '__' + rule['rule_name']) inputs = rule.get('inputs', []) inputs_name = rule_name + '__input' SetVariableList(output, inputs_name, [NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs]) outputs = rule['outputs'] var_outputs = [] for count, rule_source in enumerate(rule.get('rule_sources', [])): action_name = rule_name + '_' + str(count) rule_source_dirname, rule_source_basename = os.path.split(rule_source) rule_source_root, rule_source_ext = os.path.splitext(rule_source_basename) SetVariable(output, 'RULE_INPUT_PATH', rule_source) SetVariable(output, 'RULE_INPUT_DIRNAME', rule_source_dirname) SetVariable(output, 'RULE_INPUT_NAME', rule_source_basename) SetVariable(output, 'RULE_INPUT_ROOT', rule_source_root) SetVariable(output, 'RULE_INPUT_EXT', rule_source_ext) # Build up a list of outputs. # Collect the output dirs we'll need. dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir) # Create variables for the output, as 'local' variable will be unset. these_outputs = [] for output_index, out in enumerate(outputs): output_name = action_name + '_' + str(output_index) SetVariable(output, output_name, NormjoinRulePathForceCMakeSource(path_to_gyp, out, rule_source)) if int(rule.get('process_outputs_as_sources', False)): extra_sources.append(('${' + output_name + '}', out)) these_outputs.append('${' + output_name + '}') var_outputs.append('${' + output_name + '}') # add_custom_command output.write('add_custom_command(OUTPUT\n') for out in these_outputs: output.write(' ') output.write(out) output.write('\n') for directory in dirs: output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ') output.write(directory) output.write('\n') output.write(' COMMAND ') output.write(gyp.common.EncodePOSIXShellList(rule['action'])) output.write('\n') output.write(' DEPENDS ') WriteVariable(output, inputs_name) output.write(' ') output.write(NormjoinPath(path_to_gyp, rule_source)) output.write('\n') # CMAKE_SOURCE_DIR is where the CMakeLists.txt lives. # The cwd is the current build directory. output.write(' WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/') output.write(path_to_gyp) output.write('\n') output.write(' COMMENT ') if 'message' in rule: output.write(rule['message']) else: output.write(action_name) output.write('\n') output.write(' VERBATIM\n') output.write(')\n') UnsetVariable(output, 'RULE_INPUT_PATH') UnsetVariable(output, 'RULE_INPUT_DIRNAME') UnsetVariable(output, 'RULE_INPUT_NAME') UnsetVariable(output, 'RULE_INPUT_ROOT') UnsetVariable(output, 'RULE_INPUT_EXT') # add_custom_target output.write('add_custom_target(') output.write(rule_name) output.write(' DEPENDS\n') for out in var_outputs: output.write(' ') output.write(out) output.write('\n') output.write('SOURCES ') WriteVariable(output, inputs_name) output.write('\n') for rule_source in rule.get('rule_sources', []): output.write(' ') output.write(NormjoinPath(path_to_gyp, rule_source)) output.write('\n') output.write(')\n') extra_deps.append(rule_name) def WriteCopies(target_name, copies, extra_deps, path_to_gyp, output): """Write CMake for the 'copies' in the target. Args: target_name: the name of the CMake target being generated. actions: the Gyp 'actions' dict for this target. extra_deps: [<cmake_taget>] to append with generated targets. path_to_gyp: relative path from CMakeLists.txt being generated to the Gyp file in which the target being generated is defined. """ copy_name = target_name + '__copies' # CMake gets upset with custom targets with OUTPUT which specify no output. have_copies = any(copy['files'] for copy in copies) if not have_copies: output.write('add_custom_target(') output.write(copy_name) output.write(')\n') extra_deps.append(copy_name) return class Copy(object): def __init__(self, ext, command): self.cmake_inputs = [] self.cmake_outputs = [] self.gyp_inputs = [] self.gyp_outputs = [] self.ext = ext self.inputs_name = None self.outputs_name = None self.command = command file_copy = Copy('', 'copy') dir_copy = Copy('_dirs', 'copy_directory') for copy in copies: files = copy['files'] destination = copy['destination'] for src in files: path = os.path.normpath(src) basename = os.path.split(path)[1] dst = os.path.join(destination, basename) copy = file_copy if os.path.basename(src) else dir_copy copy.cmake_inputs.append(NormjoinPath(path_to_gyp, src)) copy.cmake_outputs.append(NormjoinPathForceCMakeSource(path_to_gyp, dst)) copy.gyp_inputs.append(src) copy.gyp_outputs.append(dst) for copy in (file_copy, dir_copy): if copy.cmake_inputs: copy.inputs_name = copy_name + '__input' + copy.ext SetVariableList(output, copy.inputs_name, copy.cmake_inputs) copy.outputs_name = copy_name + '__output' + copy.ext SetVariableList(output, copy.outputs_name, copy.cmake_outputs) # add_custom_command output.write('add_custom_command(\n') output.write('OUTPUT') for copy in (file_copy, dir_copy): if copy.outputs_name: WriteVariable(output, copy.outputs_name, ' ') output.write('\n') for copy in (file_copy, dir_copy): for src, dst in zip(copy.gyp_inputs, copy.gyp_outputs): # 'cmake -E copy src dst' will create the 'dst' directory if needed. output.write('COMMAND ${CMAKE_COMMAND} -E %s ' % copy.command) output.write(src) output.write(' ') output.write(dst) output.write("\n") output.write('DEPENDS') for copy in (file_copy, dir_copy): if copy.inputs_name: WriteVariable(output, copy.inputs_name, ' ') output.write('\n') output.write('WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/') output.write(path_to_gyp) output.write('\n') output.write('COMMENT Copying for ') output.write(target_name) output.write('\n') output.write('VERBATIM\n') output.write(')\n') # add_custom_target output.write('add_custom_target(') output.write(copy_name) output.write('\n DEPENDS') for copy in (file_copy, dir_copy): if copy.outputs_name: WriteVariable(output, copy.outputs_name, ' ') output.write('\n SOURCES') if file_copy.inputs_name: WriteVariable(output, file_copy.inputs_name, ' ') output.write('\n)\n') extra_deps.append(copy_name) def CreateCMakeTargetBaseName(qualified_target): """This is the name we would like the target to have.""" _, gyp_target_name, gyp_target_toolset = ( gyp.common.ParseQualifiedTarget(qualified_target)) cmake_target_base_name = gyp_target_name if gyp_target_toolset and gyp_target_toolset != 'target': cmake_target_base_name += '_' + gyp_target_toolset return StringToCMakeTargetName(cmake_target_base_name) def CreateCMakeTargetFullName(qualified_target): """An unambiguous name for the target.""" gyp_file, gyp_target_name, gyp_target_toolset = ( gyp.common.ParseQualifiedTarget(qualified_target)) cmake_target_full_name = gyp_file + ':' + gyp_target_name if gyp_target_toolset and gyp_target_toolset != 'target': cmake_target_full_name += '_' + gyp_target_toolset return StringToCMakeTargetName(cmake_target_full_name) class CMakeNamer(object): """Converts Gyp target names into CMake target names. CMake requires that target names be globally unique. One way to ensure this is to fully qualify the names of the targets. Unfortunatly, this ends up with all targets looking like "chrome_chrome_gyp_chrome" instead of just "chrome". If this generator were only interested in building, it would be possible to fully qualify all target names, then create unqualified target names which depend on all qualified targets which should have had that name. This is more or less what the 'make' generator does with aliases. However, one goal of this generator is to create CMake files for use with IDEs, and fully qualified names are not as user friendly. Since target name collision is rare, we do the above only when required. Toolset variants are always qualified from the base, as this is required for building. However, it also makes sense for an IDE, as it is possible for defines to be different. """ def __init__(self, target_list): self.cmake_target_base_names_conficting = set() cmake_target_base_names_seen = set() for qualified_target in target_list: cmake_target_base_name = CreateCMakeTargetBaseName(qualified_target) if cmake_target_base_name not in cmake_target_base_names_seen: cmake_target_base_names_seen.add(cmake_target_base_name) else: self.cmake_target_base_names_conficting.add(cmake_target_base_name) def CreateCMakeTargetName(self, qualified_target): base_name = CreateCMakeTargetBaseName(qualified_target) if base_name in self.cmake_target_base_names_conficting: return CreateCMakeTargetFullName(qualified_target) return base_name def WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use, options, generator_flags, all_qualified_targets, output): # The make generator does this always. # TODO: It would be nice to be able to tell CMake all dependencies. circular_libs = generator_flags.get('circular', True) if not generator_flags.get('standalone', False): output.write('\n#') output.write(qualified_target) output.write('\n') gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target) rel_gyp_file = gyp.common.RelativePath(gyp_file, options.toplevel_dir) rel_gyp_dir = os.path.dirname(rel_gyp_file) # Relative path from build dir to top dir. build_to_top = gyp.common.InvertRelativePath(build_dir, options.toplevel_dir) # Relative path from build dir to gyp dir. build_to_gyp = os.path.join(build_to_top, rel_gyp_dir) path_from_cmakelists_to_gyp = build_to_gyp spec = target_dicts.get(qualified_target, {}) config = spec.get('configurations', {}).get(config_to_use, {}) target_name = spec.get('target_name', '<missing target name>') target_type = spec.get('type', '<missing target type>') target_toolset = spec.get('toolset') SetVariable(output, 'TARGET', target_name) SetVariable(output, 'TOOLSET', target_toolset) cmake_target_name = namer.CreateCMakeTargetName(qualified_target) extra_sources = [] extra_deps = [] # Actions must come first, since they can generate more OBJs for use below. if 'actions' in spec: WriteActions(cmake_target_name, spec['actions'], extra_sources, extra_deps, path_from_cmakelists_to_gyp, output) # Rules must be early like actions. if 'rules' in spec: WriteRules(cmake_target_name, spec['rules'], extra_sources, extra_deps, path_from_cmakelists_to_gyp, output) # Copies if 'copies' in spec: WriteCopies(cmake_target_name, spec['copies'], extra_deps, path_from_cmakelists_to_gyp, output) # Target and sources srcs = spec.get('sources', []) # Gyp separates the sheep from the goats based on file extensions. def partition(l, p): return reduce(lambda x, e: x[not p(e)].append(e) or x, l, ([], [])) compilable_srcs, other_srcs = partition(srcs, Compilable) # CMake gets upset when executable targets provide no sources. if target_type == 'executable' and not compilable_srcs and not extra_sources: print ('Executable %s has no complilable sources, treating as "none".' % target_name ) target_type = 'none' cmake_target_type = cmake_target_type_from_gyp_target_type.get(target_type) if cmake_target_type is None: print ('Target %s has unknown target type %s, skipping.' % ( target_name, target_type ) ) return other_srcs_name = None if other_srcs: other_srcs_name = cmake_target_name + '__other_srcs' SetVariableList(output, other_srcs_name, [NormjoinPath(path_from_cmakelists_to_gyp, src) for src in other_srcs]) # CMake is opposed to setting linker directories and considers the practice # of setting linker directories dangerous. Instead, it favors the use of # find_library and passing absolute paths to target_link_libraries. # However, CMake does provide the command link_directories, which adds # link directories to targets defined after it is called. # As a result, link_directories must come before the target definition. # CMake unfortunately has no means of removing entries from LINK_DIRECTORIES. library_dirs = config.get('library_dirs') if library_dirs is not None: output.write('link_directories(') for library_dir in library_dirs: output.write(' ') output.write(NormjoinPath(path_from_cmakelists_to_gyp, library_dir)) output.write('\n') output.write(')\n') output.write(cmake_target_type.command) output.write('(') output.write(cmake_target_name) if cmake_target_type.modifier is not None: output.write(' ') output.write(cmake_target_type.modifier) if other_srcs_name: WriteVariable(output, other_srcs_name, ' ') output.write('\n') for src in compilable_srcs: output.write(' ') output.write(NormjoinPath(path_from_cmakelists_to_gyp, src)) output.write('\n') for extra_source in extra_sources: output.write(' ') src, _ = extra_source output.write(NormjoinPath(path_from_cmakelists_to_gyp, src)) output.write('\n') output.write(')\n') # Output name and location. if target_type != 'none': # Mark uncompiled sources as uncompiled. if other_srcs_name: output.write('set_source_files_properties(') WriteVariable(output, other_srcs_name, '') output.write(' PROPERTIES HEADER_FILE_ONLY "TRUE")\n') # Output directory target_output_directory = spec.get('product_dir') if target_output_directory is None: if target_type in ('executable', 'loadable_module'): target_output_directory = generator_default_variables['PRODUCT_DIR'] elif target_type == 'shared_library': target_output_directory = '${builddir}/lib.${TOOLSET}' elif spec.get('standalone_static_library', False): target_output_directory = generator_default_variables['PRODUCT_DIR'] else: base_path = gyp.common.RelativePath(os.path.dirname(gyp_file), options.toplevel_dir) target_output_directory = '${obj}.${TOOLSET}' target_output_directory = ( os.path.join(target_output_directory, base_path)) cmake_target_output_directory = NormjoinPathForceCMakeSource( path_from_cmakelists_to_gyp, target_output_directory) SetTargetProperty(output, cmake_target_name, cmake_target_type.property_modifier + '_OUTPUT_DIRECTORY', cmake_target_output_directory) # Output name default_product_prefix = '' default_product_name = target_name default_product_ext = '' if target_type == 'static_library': static_library_prefix = generator_default_variables['STATIC_LIB_PREFIX'] default_product_name = RemovePrefix(default_product_name, static_library_prefix) default_product_prefix = static_library_prefix default_product_ext = generator_default_variables['STATIC_LIB_SUFFIX'] elif target_type in ('loadable_module', 'shared_library'): shared_library_prefix = generator_default_variables['SHARED_LIB_PREFIX'] default_product_name = RemovePrefix(default_product_name, shared_library_prefix) default_product_prefix = shared_library_prefix default_product_ext = generator_default_variables['SHARED_LIB_SUFFIX'] elif target_type != 'executable': print ('ERROR: What output file should be generated?', 'type', target_type, 'target', target_name) product_prefix = spec.get('product_prefix', default_product_prefix) product_name = spec.get('product_name', default_product_name) product_ext = spec.get('product_extension') if product_ext: product_ext = '.' + product_ext else: product_ext = default_product_ext SetTargetProperty(output, cmake_target_name, 'PREFIX', product_prefix) SetTargetProperty(output, cmake_target_name, cmake_target_type.property_modifier + '_OUTPUT_NAME', product_name) SetTargetProperty(output, cmake_target_name, 'SUFFIX', product_ext) # Make the output of this target referenceable as a source. cmake_target_output_basename = product_prefix + product_name + product_ext cmake_target_output = os.path.join(cmake_target_output_directory, cmake_target_output_basename) SetFileProperty(output, cmake_target_output, 'GENERATED', ['TRUE'], '') # Let CMake know if the 'all' target should depend on this target. exclude_from_all = ('TRUE' if qualified_target not in all_qualified_targets else 'FALSE') SetTargetProperty(output, cmake_target_name, 'EXCLUDE_FROM_ALL', exclude_from_all) for extra_target_name in extra_deps: SetTargetProperty(output, extra_target_name, 'EXCLUDE_FROM_ALL', exclude_from_all) # Includes includes = config.get('include_dirs') if includes: # This (target include directories) is what requires CMake 2.8.8 includes_name = cmake_target_name + '__include_dirs' SetVariableList(output, includes_name, [NormjoinPathForceCMakeSource(path_from_cmakelists_to_gyp, include) for include in includes]) output.write('set_property(TARGET ') output.write(cmake_target_name) output.write(' APPEND PROPERTY INCLUDE_DIRECTORIES ') WriteVariable(output, includes_name, '') output.write(')\n') # Defines defines = config.get('defines') if defines is not None: SetTargetProperty(output, cmake_target_name, 'COMPILE_DEFINITIONS', defines, ';') # Compile Flags - http://www.cmake.org/Bug/view.php?id=6493 # CMake currently does not have target C and CXX flags. # So, instead of doing... # cflags_c = config.get('cflags_c') # if cflags_c is not None: # SetTargetProperty(output, cmake_target_name, # 'C_COMPILE_FLAGS', cflags_c, ' ') # cflags_cc = config.get('cflags_cc') # if cflags_cc is not None: # SetTargetProperty(output, cmake_target_name, # 'CXX_COMPILE_FLAGS', cflags_cc, ' ') # Instead we must... s_sources = [] c_sources = [] cxx_sources = [] for src in srcs: _, ext = os.path.splitext(src) src_type = COMPILABLE_EXTENSIONS.get(ext, None) if src_type == 's': s_sources.append(NormjoinPath(path_from_cmakelists_to_gyp, src)) if src_type == 'cc': c_sources.append(NormjoinPath(path_from_cmakelists_to_gyp, src)) if src_type == 'cxx': cxx_sources.append(NormjoinPath(path_from_cmakelists_to_gyp, src)) for extra_source in extra_sources: src, real_source = extra_source _, ext = os.path.splitext(real_source) src_type = COMPILABLE_EXTENSIONS.get(ext, None) if src_type == 's': s_sources.append(NormjoinPath(path_from_cmakelists_to_gyp, src)) if src_type == 'cc': c_sources.append(NormjoinPath(path_from_cmakelists_to_gyp, src)) if src_type == 'cxx': cxx_sources.append(NormjoinPath(path_from_cmakelists_to_gyp, src)) cflags = config.get('cflags', []) cflags_c = config.get('cflags_c', []) cflags_cxx = config.get('cflags_cc', []) if c_sources and not (s_sources or cxx_sources): flags = [] flags.extend(cflags) flags.extend(cflags_c) SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ') elif cxx_sources and not (s_sources or c_sources): flags = [] flags.extend(cflags) flags.extend(cflags_cxx) SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ') else: if s_sources and cflags: SetFilesProperty(output, s_sources, 'COMPILE_FLAGS', cflags, ' ') if c_sources and (cflags or cflags_c): flags = [] flags.extend(cflags) flags.extend(cflags_c) SetFilesProperty(output, c_sources, 'COMPILE_FLAGS', flags, ' ') if cxx_sources and (cflags or cflags_cxx): flags = [] flags.extend(cflags) flags.extend(cflags_cxx) SetFilesProperty(output, cxx_sources, 'COMPILE_FLAGS', flags, ' ') # Have assembly link as c if there are no other files if not c_sources and not cxx_sources and s_sources: SetTargetProperty(output, cmake_target_name, 'LINKER_LANGUAGE', ['C']) # Linker flags ldflags = config.get('ldflags') if ldflags is not None: SetTargetProperty(output, cmake_target_name, 'LINK_FLAGS', ldflags, ' ') # Note on Dependencies and Libraries: # CMake wants to handle link order, resolving the link line up front. # Gyp does not retain or enforce specifying enough information to do so. # So do as other gyp generators and use --start-group and --end-group. # Give CMake as little information as possible so that it doesn't mess it up. # Dependencies rawDeps = spec.get('dependencies', []) static_deps = [] shared_deps = [] other_deps = [] for rawDep in rawDeps: dep_cmake_name = namer.CreateCMakeTargetName(rawDep) dep_spec = target_dicts.get(rawDep, {}) dep_target_type = dep_spec.get('type', None) if dep_target_type == 'static_library': static_deps.append(dep_cmake_name) elif dep_target_type == 'shared_library': shared_deps.append(dep_cmake_name) else: other_deps.append(dep_cmake_name) # ensure all external dependencies are complete before internal dependencies # extra_deps currently only depend on their own deps, so otherwise run early if static_deps or shared_deps or other_deps: for extra_dep in extra_deps: output.write('add_dependencies(') output.write(extra_dep) output.write('\n') for deps in (static_deps, shared_deps, other_deps): for dep in gyp.common.uniquer(deps): output.write(' ') output.write(dep) output.write('\n') output.write(')\n') linkable = target_type in ('executable', 'loadable_module', 'shared_library') other_deps.extend(extra_deps) if other_deps or (not linkable and (static_deps or shared_deps)): output.write('add_dependencies(') output.write(cmake_target_name) output.write('\n') for dep in gyp.common.uniquer(other_deps): output.write(' ') output.write(dep) output.write('\n') if not linkable: for deps in (static_deps, shared_deps): for lib_dep in gyp.common.uniquer(deps): output.write(' ') output.write(lib_dep) output.write('\n') output.write(')\n') # Libraries if linkable: external_libs = [lib for lib in spec.get('libraries', []) if len(lib) > 0] if external_libs or static_deps or shared_deps: output.write('target_link_libraries(') output.write(cmake_target_name) output.write('\n') if static_deps: write_group = circular_libs and len(static_deps) > 1 if write_group: output.write('-Wl,--start-group\n') for dep in gyp.common.uniquer(static_deps): output.write(' ') output.write(dep) output.write('\n') if write_group: output.write('-Wl,--end-group\n') if shared_deps: for dep in gyp.common.uniquer(shared_deps): output.write(' ') output.write(dep) output.write('\n') if external_libs: for lib in gyp.common.uniquer(external_libs): output.write(' ') output.write(lib) output.write('\n') output.write(')\n') UnsetVariable(output, 'TOOLSET') UnsetVariable(output, 'TARGET') def GenerateOutputForConfig(target_list, target_dicts, data, params, config_to_use): options = params['options'] generator_flags = params['generator_flags'] # generator_dir: relative path from pwd to where make puts build files. # Makes migrating from make to cmake easier, cmake doesn't put anything here. # Each Gyp configuration creates a different CMakeLists.txt file # to avoid incompatibilities between Gyp and CMake configurations. generator_dir = os.path.relpath(options.generator_output or '.') # output_dir: relative path from generator_dir to the build directory. output_dir = generator_flags.get('output_dir', 'out') # build_dir: relative path from source root to our output files. # e.g. "out/Debug" build_dir = os.path.normpath(os.path.join(generator_dir, output_dir, config_to_use)) toplevel_build = os.path.join(options.toplevel_dir, build_dir) output_file = os.path.join(toplevel_build, 'CMakeLists.txt') gyp.common.EnsureDirExists(output_file) output = open(output_file, 'w') output.write('cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)\n') output.write('cmake_policy(VERSION 2.8.8)\n') _, project_target, _ = gyp.common.ParseQualifiedTarget(target_list[-1]) output.write('project(') output.write(project_target) output.write(')\n') SetVariable(output, 'configuration', config_to_use) # The following appears to be as-yet undocumented. # http://public.kitware.com/Bug/view.php?id=8392 output.write('enable_language(ASM)\n') # ASM-ATT does not support .S files. # output.write('enable_language(ASM-ATT)\n') SetVariable(output, 'builddir', '${CMAKE_BINARY_DIR}') SetVariable(output, 'obj', '${builddir}/obj') output.write('\n') # TODO: Undocumented/unsupported (the CMake Java generator depends on it). # CMake by default names the object resulting from foo.c to be foo.c.o. # Gyp traditionally names the object resulting from foo.c foo.o. # This should be irrelevant, but some targets extract .o files from .a # and depend on the name of the extracted .o files. output.write('set(CMAKE_C_OUTPUT_EXTENSION_REPLACE 1)\n') output.write('set(CMAKE_CXX_OUTPUT_EXTENSION_REPLACE 1)\n') output.write('\n') namer = CMakeNamer(target_list) # The list of targets upon which the 'all' target should depend. # CMake has it's own implicit 'all' target, one is not created explicitly. all_qualified_targets = set() for build_file in params['build_files']: for qualified_target in gyp.common.AllTargets(target_list, target_dicts, os.path.normpath(build_file)): all_qualified_targets.add(qualified_target) for qualified_target in target_list: WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use, options, generator_flags, all_qualified_targets, output) output.close() def PerformBuild(data, configurations, params): options = params['options'] generator_flags = params['generator_flags'] # generator_dir: relative path from pwd to where make puts build files. # Makes migrating from make to cmake easier, cmake doesn't put anything here. generator_dir = os.path.relpath(options.generator_output or '.') # output_dir: relative path from generator_dir to the build directory. output_dir = generator_flags.get('output_dir', 'out') for config_name in configurations: # build_dir: relative path from source root to our output files. # e.g. "out/Debug" build_dir = os.path.normpath(os.path.join(generator_dir, output_dir, config_name)) arguments = ['cmake', '-G', 'Ninja'] print 'Generating [%s]: %s' % (config_name, arguments) subprocess.check_call(arguments, cwd=build_dir) arguments = ['ninja', '-C', build_dir] print 'Building [%s]: %s' % (config_name, arguments) subprocess.check_call(arguments) def CallGenerateOutputForConfig(arglist): # Ignore the interrupt signal so that the parent process catches it and # kills all multiprocessing children. signal.signal(signal.SIGINT, signal.SIG_IGN) target_list, target_dicts, data, params, config_name = arglist GenerateOutputForConfig(target_list, target_dicts, data, params, config_name) def GenerateOutput(target_list, target_dicts, data, params): user_config = params.get('generator_flags', {}).get('config', None) if user_config: GenerateOutputForConfig(target_list, target_dicts, data, params, user_config) else: config_names = target_dicts[target_list[0]]['configurations'].keys() if params['parallel']: try: pool = multiprocessing.Pool(len(config_names)) arglists = [] for config_name in config_names: arglists.append((target_list, target_dicts, data, params, config_name)) pool.map(CallGenerateOutputForConfig, arglists) except KeyboardInterrupt, e: pool.terminate() raise e else: for config_name in config_names: GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
mit
Karkus476/pygame-tanks
entity.py
1
5935
import pygame, math, geom from vector import Vector mp = [0,0] class Entity: entities = [] bulletIds = [] bombIds = [] agentIds = [] trackIds = [] nextId = 0 def __init__(self, type, xywh = None, xyr = None): if xywh != None: self.rect = pygame.Rect(xywh[0],xywh[1],xywh[2],xywh[3]) self.isRect = True elif xyr != None: self.circle = [xyr[0], xyr[1], xyr[2]] self.isRect = False else: print("No location specified for entity") self.type = type self.ID = Entity.nextId Entity.nextId += 1 Entity.addEntity(self) @staticmethod def tickAll(keys, mousePos, player, level, delta): global mp mp = mousePos for e in Entity.entities: if e == None: continue if e.type == Entity.PLAYER: e.tick(keys, mousePos,level, delta) elif e.type == Entity.BULLET or e.type == Entity.BOMB: e.tick(level,delta) elif e.type == Entity.XPL: e.tick(delta) elif e.type >= 5: e.tick(player, level, delta) @staticmethod def drawAll(ts, surface, delta): Entity.drawBombs(surface) for e in Entity.entities: if e == None or e.type == Entity.BOMB or e.type == Entity.TRACK: continue if e.type == 0 or e.type >= 5: e.draw(ts, surface, delta) continue e.draw(surface) @staticmethod def drawBombs(surface): for bId in Entity.bombIds: b = Entity.entities[bId] b.draw(surface) @staticmethod def drawTracks(surface): for tId in Entity.trackIds: t = Entity.entities[tId] t.draw(surface) def tick(self, delta): pass def draw(self, surface): pass def destroy(self): Entity.removeEntity(self) def collideEntity(self, e): if self.isRect != e.isRect: if self.isRect: return geom.rectCircCollision(self.rect, e.circle) return geom.rectCircCollision(e.rect, self.circle) elif self.isRect: return self.rect.colliderect(e.rect) return geom.circCircCollision(self.circle, e.circle) @staticmethod def addEntity(e): if e.type == Entity.BULLET: Entity.bulletIds.append(e.ID) elif e.type == Entity.BOMB: Entity.bombIds.append(e.ID) elif e.type == Entity.TRACK: Entity.trackIds.append(e.ID) elif e.type >= 5: Entity.agentIds.append(e.ID) Entity.entities.append(e) @staticmethod def removeEntity(e): try: if e.type == Entity.BULLET: Entity.bulletIds.remove(e.ID) elif e.type == Entity.BOMB: Entity.bombIds.remove(e.ID) elif e.type == Entity.TRACK: Entity.trackIds.remove(e.ID) elif e.type >= 5: Entity.agentIds.remove(e.ID) except ValueError: pass Entity.entities[e.ID] = None def getW(self): if self.isRect: return self.rect.w else: return None def getH(self): if self.isRect: return self.rect.h else: return None def setW(self, w): if self.isRect: self.rect.w = w def setH(self, h): if self.isRect: self.rect.h = h def getR(self): if self.isRect: return None else: return self.circle[2] def setR(self, r): if not self.isRect: self.circle[2] = r def getX(self): if self.isRect: return self.rect.x else: return self.circle[0] def getY(self): if self.isRect: return self.rect.y else: return self.circle[1] def setX(self, x): if self.isRect: self.rect.x = x else: self.circle[0] = x def setY(self, y): if self.isRect: self.rect.y = y else: self.circle[1] = y def getLoc(self): return (self.getX(), self.getY()) def getLocVect(self): return Vector(self.getX(), self.getY()) def getSquare(self): loc = self.getLoc() return (math.floor(loc[0]/30), math.floor(loc[1]/30)) @staticmethod def clear(l): for e in Entity.entities: if not e: continue if e.type == 1 or e.type == 2: e.destroy() elif e.type == 3: e.safeDestroy() @staticmethod def typeToColour(type): if type == 0: return "Blue" elif type == 5: return "Brown" elif type == 6: return "Grey" elif type == 7: return "Teal" elif type == 8: return "Yellow" elif type == 9: return "Red" elif type == 10: return "Green" elif type == 12: return "Red" PLAYER = 0 BULLET = 1 TRACK = 2 BOMB = 3 XPL = 4 TURRET = 5 SOLDIER = 6 HUNTER = 7 BOMBER = 8 ROVER = 9#Slightly lighter than player 2 SNIPER = 10 WARRIOR = 11 SENTRY = 12 SCOUT = 13 SPY = 14 ASSASIN = 15 ELITE = 16 SPAWNER = 17 #Slightly lighter than player 1 TACTICIAN = 18 GENERAL = 19 DESTROYER = 20
gpl-3.0
SkRobo/Eurobot-2017
old year/RESET-master/FSM/packetParser.py
4
4413
import typeConvertor import packetBuilder import crc import binascii class ParsePacket(object): synchronization = 'FA' adress = 'FA' typeConvertor = typeConvertor.TypeConvertor() crcCalculator = crc.CrcCalculator() listOfComands = packetBuilder.CommandsList() def __init__(self, byteString): self.string = byteString self.byteArray = self.GetByteArrayFromRecievedString() self.recievedSynchronizationByte = self.byteArray[0] self.recievedAdress = self.byteArray[1] self.packetLenght = self.byteArray[2] self.command = self.byteArray[3] self.reply = self.GetReply() self.recievedcrc = self.typeConvertor.IntToHex(self.byteArray[len(self.byteArray) - 2]) + self.typeConvertor.IntToHex(self.byteArray[len(self.byteArray) - 1]) self.CheckCrc() def GetReply(self): if self.listOfComands.getCurentCoordinates == self.command or self.listOfComands.getCurrentSpeed == self.command: recievedPametersString = binascii.hexlify(self.byteArray[4:len(self.byteArray) - 2]) invertedParametersString = self.typeConvertor.InvertStringArray(recievedPametersString) coordinatesList = [] coordinatesList.append(self.typeConvertor.HexToFloat(invertedParametersString[16:24])) coordinatesList.append(self.typeConvertor.HexToFloat(invertedParametersString[8:16])) coordinatesList.append(self.typeConvertor.HexToFloat(invertedParametersString[0:8])) return coordinatesList if self.listOfComands.getDataIKSensors == self.command: recievedPametersString = binascii.hexlify(self.byteArray[4:len(self.byteArray) - 2]) invertedParametersString = self.typeConvertor.InvertStringArray(recievedPametersString) coordinatesList = [] coordinatesList.append(self.typeConvertor.HexToFloat(invertedParametersString[24:32])) coordinatesList.append(self.typeConvertor.HexToFloat(invertedParametersString[16:24])) coordinatesList.append(self.typeConvertor.HexToFloat(invertedParametersString[8:16])) coordinatesList.append(self.typeConvertor.HexToFloat(invertedParametersString[0:8])) return coordinatesList if self.listOfComands.getDataUSSensors == self.command: recievedPametersString = binascii.hexlify(self.byteArray[4:len(self.byteArray) - 2]) invertedParametersString = self.typeConvertor.InvertStringArray(recievedPametersString) coordinatesList = [] coordinatesList.append(self.typeConvertor.HexToFloat(invertedParametersString[32:40])) coordinatesList.append(self.typeConvertor.HexToFloat(invertedParametersString[24:32])) coordinatesList.append(self.typeConvertor.HexToFloat(invertedParametersString[16:24])) coordinatesList.append(self.typeConvertor.HexToFloat(invertedParametersString[8:16])) coordinatesList.append(self.typeConvertor.HexToFloat(invertedParametersString[0:8])) return coordinatesList if self.listOfComands.closeCubeCollector == self.command or self.listOfComands.getADCPinState == self.command: recievedPametersString = binascii.hexlify(self.byteArray[4:len(self.byteArray) - 2]) invertedParametersString = self.typeConvertor.InvertStringArray(recievedPametersString) # check int 16 or int 8 return int(invertedParametersString, 16) if self.listOfComands.isPointWasReached == self.command: recievedPametersString = binascii.hexlify(self.byteArray[4:len(self.byteArray) - 2]) invertedParametersString = self.typeConvertor.InvertStringArray(recievedPametersString) return int(invertedParametersString, 16) if self.listOfComands.getManipulatorAngle == self.command: recievedPametersString = binascii.hexlify(self.byteArray[4:len(self.byteArray) - 2]) invertedParametersString = self.typeConvertor.InvertStringArray(recievedPametersString) return self.typeConvertor.HexToFloat(invertedParametersString) return self.byteArray[4:len(self.byteArray) - 2][:-1] def CheckCrc(self): calculatedCrc = self.crcCalculator.GetTwoBytesFromCrc32(self.byteArray[0:len(self.byteArray) - 2]) if calculatedCrc != self.recievedcrc: raise Exception('Expected crc: ' + self.recievedcrc + ', but calculated: ' + calculatedCrc) def GetByteArrayFromRecievedString(self): byteString = "" for index in range(0, len(self.string)): hexSymbol = self.typeConvertor.IntToHex(ord(str(self.string[index]))) #print str(index) + ': ' + hexSymbol byteString = byteString + hexSymbol byteString = bytearray.fromhex(byteString) #print 'end' return byteString
mit
thfield/sf-base-election-data
venv/lib/python3.4/site-packages/pip/_vendor/distlib/index.py
571
20976
# -*- coding: utf-8 -*- # # Copyright (C) 2013 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # import hashlib import logging import os import shutil import subprocess import tempfile try: from threading import Thread except ImportError: from dummy_threading import Thread from . import DistlibException from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr, urlparse, build_opener, string_types) from .util import cached_property, zip_dir, ServerProxy logger = logging.getLogger(__name__) DEFAULT_INDEX = 'https://pypi.python.org/pypi' DEFAULT_REALM = 'pypi' class PackageIndex(object): """ This class represents a package index compatible with PyPI, the Python Package Index. """ boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$' def __init__(self, url=None): """ Initialise an instance. :param url: The URL of the index. If not specified, the URL for PyPI is used. """ self.url = url or DEFAULT_INDEX self.read_configuration() scheme, netloc, path, params, query, frag = urlparse(self.url) if params or query or frag or scheme not in ('http', 'https'): raise DistlibException('invalid repository: %s' % self.url) self.password_handler = None self.ssl_verifier = None self.gpg = None self.gpg_home = None self.rpc_proxy = None with open(os.devnull, 'w') as sink: for s in ('gpg2', 'gpg'): try: rc = subprocess.check_call([s, '--version'], stdout=sink, stderr=sink) if rc == 0: self.gpg = s break except OSError: pass def _get_pypirc_command(self): """ Get the distutils command for interacting with PyPI configurations. :return: the command. """ from distutils.core import Distribution from distutils.config import PyPIRCCommand d = Distribution() return PyPIRCCommand(d) def read_configuration(self): """ Read the PyPI access configuration as supported by distutils, getting PyPI to do the acutal work. This populates ``username``, ``password``, ``realm`` and ``url`` attributes from the configuration. """ # get distutils to do the work c = self._get_pypirc_command() c.repository = self.url cfg = c._read_pypirc() self.username = cfg.get('username') self.password = cfg.get('password') self.realm = cfg.get('realm', 'pypi') self.url = cfg.get('repository', self.url) def save_configuration(self): """ Save the PyPI access configuration. You must have set ``username`` and ``password`` attributes before calling this method. Again, distutils is used to do the actual work. """ self.check_credentials() # get distutils to do the work c = self._get_pypirc_command() c._store_pypirc(self.username, self.password) def check_credentials(self): """ Check that ``username`` and ``password`` have been set, and raise an exception if not. """ if self.username is None or self.password is None: raise DistlibException('username and password must be set') pm = HTTPPasswordMgr() _, netloc, _, _, _, _ = urlparse(self.url) pm.add_password(self.realm, netloc, self.username, self.password) self.password_handler = HTTPBasicAuthHandler(pm) def register(self, metadata): """ Register a distribution on PyPI, using the provided metadata. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the distribution to be registered. :return: The HTTP response received from PyPI upon submission of the request. """ self.check_credentials() metadata.validate() d = metadata.todict() d[':action'] = 'verify' request = self.encode_request(d.items(), []) response = self.send_request(request) d[':action'] = 'submit' request = self.encode_request(d.items(), []) return self.send_request(request) def _reader(self, name, stream, outbuf): """ Thread runner for reading lines of from a subprocess into a buffer. :param name: The logical name of the stream (used for logging only). :param stream: The stream to read from. This will typically a pipe connected to the output stream of a subprocess. :param outbuf: The list to append the read lines to. """ while True: s = stream.readline() if not s: break s = s.decode('utf-8').rstrip() outbuf.append(s) logger.debug('%s: %s' % (name, s)) stream.close() def get_sign_command(self, filename, signer, sign_password, keystore=None): """ Return a suitable command for signing a file. :param filename: The pathname to the file to be signed. :param signer: The identifier of the signer of the file. :param sign_password: The passphrase for the signer's private key used for signing. :param keystore: The path to a directory which contains the keys used in verification. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: The signing command as a list suitable to be passed to :class:`subprocess.Popen`. """ cmd = [self.gpg, '--status-fd', '2', '--no-tty'] if keystore is None: keystore = self.gpg_home if keystore: cmd.extend(['--homedir', keystore]) if sign_password is not None: cmd.extend(['--batch', '--passphrase-fd', '0']) td = tempfile.mkdtemp() sf = os.path.join(td, os.path.basename(filename) + '.asc') cmd.extend(['--detach-sign', '--armor', '--local-user', signer, '--output', sf, filename]) logger.debug('invoking: %s', ' '.join(cmd)) return cmd, sf def run_command(self, cmd, input_data=None): """ Run a command in a child process , passing it any input data specified. :param cmd: The command to run. :param input_data: If specified, this must be a byte string containing data to be sent to the child process. :return: A tuple consisting of the subprocess' exit code, a list of lines read from the subprocess' ``stdout``, and a list of lines read from the subprocess' ``stderr``. """ kwargs = { 'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE, } if input_data is not None: kwargs['stdin'] = subprocess.PIPE stdout = [] stderr = [] p = subprocess.Popen(cmd, **kwargs) # We don't use communicate() here because we may need to # get clever with interacting with the command t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout)) t1.start() t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr)) t2.start() if input_data is not None: p.stdin.write(input_data) p.stdin.close() p.wait() t1.join() t2.join() return p.returncode, stdout, stderr def sign_file(self, filename, signer, sign_password, keystore=None): """ Sign a file. :param filename: The pathname to the file to be signed. :param signer: The identifier of the signer of the file. :param sign_password: The passphrase for the signer's private key used for signing. :param keystore: The path to a directory which contains the keys used in signing. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: The absolute pathname of the file where the signature is stored. """ cmd, sig_file = self.get_sign_command(filename, signer, sign_password, keystore) rc, stdout, stderr = self.run_command(cmd, sign_password.encode('utf-8')) if rc != 0: raise DistlibException('sign command failed with error ' 'code %s' % rc) return sig_file def upload_file(self, metadata, filename, signer=None, sign_password=None, filetype='sdist', pyversion='source', keystore=None): """ Upload a release file to the index. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the file to be uploaded. :param filename: The pathname of the file to be uploaded. :param signer: The identifier of the signer of the file. :param sign_password: The passphrase for the signer's private key used for signing. :param filetype: The type of the file being uploaded. This is the distutils command which produced that file, e.g. ``sdist`` or ``bdist_wheel``. :param pyversion: The version of Python which the release relates to. For code compatible with any Python, this would be ``source``, otherwise it would be e.g. ``3.2``. :param keystore: The path to a directory which contains the keys used in signing. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: The HTTP response received from PyPI upon submission of the request. """ self.check_credentials() if not os.path.exists(filename): raise DistlibException('not found: %s' % filename) metadata.validate() d = metadata.todict() sig_file = None if signer: if not self.gpg: logger.warning('no signing program available - not signed') else: sig_file = self.sign_file(filename, signer, sign_password, keystore) with open(filename, 'rb') as f: file_data = f.read() md5_digest = hashlib.md5(file_data).hexdigest() sha256_digest = hashlib.sha256(file_data).hexdigest() d.update({ ':action': 'file_upload', 'protcol_version': '1', 'filetype': filetype, 'pyversion': pyversion, 'md5_digest': md5_digest, 'sha256_digest': sha256_digest, }) files = [('content', os.path.basename(filename), file_data)] if sig_file: with open(sig_file, 'rb') as f: sig_data = f.read() files.append(('gpg_signature', os.path.basename(sig_file), sig_data)) shutil.rmtree(os.path.dirname(sig_file)) request = self.encode_request(d.items(), files) return self.send_request(request) def upload_documentation(self, metadata, doc_dir): """ Upload documentation to the index. :param metadata: A :class:`Metadata` instance defining at least a name and version number for the documentation to be uploaded. :param doc_dir: The pathname of the directory which contains the documentation. This should be the directory that contains the ``index.html`` for the documentation. :return: The HTTP response received from PyPI upon submission of the request. """ self.check_credentials() if not os.path.isdir(doc_dir): raise DistlibException('not a directory: %r' % doc_dir) fn = os.path.join(doc_dir, 'index.html') if not os.path.exists(fn): raise DistlibException('not found: %r' % fn) metadata.validate() name, version = metadata.name, metadata.version zip_data = zip_dir(doc_dir).getvalue() fields = [(':action', 'doc_upload'), ('name', name), ('version', version)] files = [('content', name, zip_data)] request = self.encode_request(fields, files) return self.send_request(request) def get_verify_command(self, signature_filename, data_filename, keystore=None): """ Return a suitable command for verifying a file. :param signature_filename: The pathname to the file containing the signature. :param data_filename: The pathname to the file containing the signed data. :param keystore: The path to a directory which contains the keys used in verification. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: The verifying command as a list suitable to be passed to :class:`subprocess.Popen`. """ cmd = [self.gpg, '--status-fd', '2', '--no-tty'] if keystore is None: keystore = self.gpg_home if keystore: cmd.extend(['--homedir', keystore]) cmd.extend(['--verify', signature_filename, data_filename]) logger.debug('invoking: %s', ' '.join(cmd)) return cmd def verify_signature(self, signature_filename, data_filename, keystore=None): """ Verify a signature for a file. :param signature_filename: The pathname to the file containing the signature. :param data_filename: The pathname to the file containing the signed data. :param keystore: The path to a directory which contains the keys used in verification. If not specified, the instance's ``gpg_home`` attribute is used instead. :return: True if the signature was verified, else False. """ if not self.gpg: raise DistlibException('verification unavailable because gpg ' 'unavailable') cmd = self.get_verify_command(signature_filename, data_filename, keystore) rc, stdout, stderr = self.run_command(cmd) if rc not in (0, 1): raise DistlibException('verify command failed with error ' 'code %s' % rc) return rc == 0 def download_file(self, url, destfile, digest=None, reporthook=None): """ This is a convenience method for downloading a file from an URL. Normally, this will be a file from the index, though currently no check is made for this (i.e. a file can be downloaded from anywhere). The method is just like the :func:`urlretrieve` function in the standard library, except that it allows digest computation to be done during download and checking that the downloaded data matched any expected value. :param url: The URL of the file to be downloaded (assumed to be available via an HTTP GET request). :param destfile: The pathname where the downloaded file is to be saved. :param digest: If specified, this must be a (hasher, value) tuple, where hasher is the algorithm used (e.g. ``'md5'``) and ``value`` is the expected value. :param reporthook: The same as for :func:`urlretrieve` in the standard library. """ if digest is None: digester = None logger.debug('No digest specified') else: if isinstance(digest, (list, tuple)): hasher, digest = digest else: hasher = 'md5' digester = getattr(hashlib, hasher)() logger.debug('Digest specified: %s' % digest) # The following code is equivalent to urlretrieve. # We need to do it this way so that we can compute the # digest of the file as we go. with open(destfile, 'wb') as dfp: # addinfourl is not a context manager on 2.x # so we have to use try/finally sfp = self.send_request(Request(url)) try: headers = sfp.info() blocksize = 8192 size = -1 read = 0 blocknum = 0 if "content-length" in headers: size = int(headers["Content-Length"]) if reporthook: reporthook(blocknum, blocksize, size) while True: block = sfp.read(blocksize) if not block: break read += len(block) dfp.write(block) if digester: digester.update(block) blocknum += 1 if reporthook: reporthook(blocknum, blocksize, size) finally: sfp.close() # check that we got the whole file, if we can if size >= 0 and read < size: raise DistlibException( 'retrieval incomplete: got only %d out of %d bytes' % (read, size)) # if we have a digest, it must match. if digester: actual = digester.hexdigest() if digest != actual: raise DistlibException('%s digest mismatch for %s: expected ' '%s, got %s' % (hasher, destfile, digest, actual)) logger.debug('Digest verified: %s', digest) def send_request(self, req): """ Send a standard library :class:`Request` to PyPI and return its response. :param req: The request to send. :return: The HTTP response from PyPI (a standard library HTTPResponse). """ handlers = [] if self.password_handler: handlers.append(self.password_handler) if self.ssl_verifier: handlers.append(self.ssl_verifier) opener = build_opener(*handlers) return opener.open(req) def encode_request(self, fields, files): """ Encode fields and files for posting to an HTTP server. :param fields: The fields to send as a list of (fieldname, value) tuples. :param files: The files to send as a list of (fieldname, filename, file_bytes) tuple. """ # Adapted from packaging, which in turn was adapted from # http://code.activestate.com/recipes/146306 parts = [] boundary = self.boundary for k, values in fields: if not isinstance(values, (list, tuple)): values = [values] for v in values: parts.extend(( b'--' + boundary, ('Content-Disposition: form-data; name="%s"' % k).encode('utf-8'), b'', v.encode('utf-8'))) for key, filename, value in files: parts.extend(( b'--' + boundary, ('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)).encode('utf-8'), b'', value)) parts.extend((b'--' + boundary + b'--', b'')) body = b'\r\n'.join(parts) ct = b'multipart/form-data; boundary=' + boundary headers = { 'Content-type': ct, 'Content-length': str(len(body)) } return Request(self.url, body, headers) def search(self, terms, operator=None): if isinstance(terms, string_types): terms = {'name': terms} if self.rpc_proxy is None: self.rpc_proxy = ServerProxy(self.url, timeout=3.0) return self.rpc_proxy.search(terms, operator or 'and')
bsd-3-clause
ruibarreira/linuxtrail
usr/lib/python2.7/dist-packages/numpy/distutils/lib2def.py
57
3487
from __future__ import division, absolute_import, print_function import re import sys import os import subprocess __doc__ = """This module generates a DEF file from the symbols in an MSVC-compiled DLL import library. It correctly discriminates between data and functions. The data is collected from the output of the program nm(1). Usage: python lib2def.py [libname.lib] [output.def] or python lib2def.py [libname.lib] > output.def libname.lib defaults to python<py_ver>.lib and output.def defaults to stdout Author: Robert Kern <kernr@mail.ncifcrf.gov> Last Update: April 30, 1999 """ __version__ = '0.1a' py_ver = "%d%d" % tuple(sys.version_info[:2]) DEFAULT_NM = 'nm -Cs' DEF_HEADER = """LIBRARY python%s.dll ;CODE PRELOAD MOVEABLE DISCARDABLE ;DATA PRELOAD SINGLE EXPORTS """ % py_ver # the header of the DEF file FUNC_RE = re.compile(r"^(.*) in python%s\.dll" % py_ver, re.MULTILINE) DATA_RE = re.compile(r"^_imp__(.*) in python%s\.dll" % py_ver, re.MULTILINE) def parse_cmd(): """Parses the command-line arguments. libfile, deffile = parse_cmd()""" if len(sys.argv) == 3: if sys.argv[1][-4:] == '.lib' and sys.argv[2][-4:] == '.def': libfile, deffile = sys.argv[1:] elif sys.argv[1][-4:] == '.def' and sys.argv[2][-4:] == '.lib': deffile, libfile = sys.argv[1:] else: print("I'm assuming that your first argument is the library") print("and the second is the DEF file.") elif len(sys.argv) == 2: if sys.argv[1][-4:] == '.def': deffile = sys.argv[1] libfile = 'python%s.lib' % py_ver elif sys.argv[1][-4:] == '.lib': deffile = None libfile = sys.argv[1] else: libfile = 'python%s.lib' % py_ver deffile = None return libfile, deffile def getnm(nm_cmd = ['nm', '-Cs', 'python%s.lib' % py_ver]): """Returns the output of nm_cmd via a pipe. nm_output = getnam(nm_cmd = 'nm -Cs py_lib')""" f = subprocess.Popen(nm_cmd, shell=True, stdout=subprocess.PIPE) nm_output = f.stdout.read() f.stdout.close() return nm_output def parse_nm(nm_output): """Returns a tuple of lists: dlist for the list of data symbols and flist for the list of function symbols. dlist, flist = parse_nm(nm_output)""" data = DATA_RE.findall(nm_output) func = FUNC_RE.findall(nm_output) flist = [] for sym in data: if sym in func and (sym[:2] == 'Py' or sym[:3] == '_Py' or sym[:4] == 'init'): flist.append(sym) dlist = [] for sym in data: if sym not in flist and (sym[:2] == 'Py' or sym[:3] == '_Py'): dlist.append(sym) dlist.sort() flist.sort() return dlist, flist def output_def(dlist, flist, header, file = sys.stdout): """Outputs the final DEF file to a file defaulting to stdout. output_def(dlist, flist, header, file = sys.stdout)""" for data_sym in dlist: header = header + '\t%s DATA\n' % data_sym header = header + '\n' # blank line for func_sym in flist: header = header + '\t%s\n' % func_sym file.write(header) if __name__ == '__main__': libfile, deffile = parse_cmd() if deffile is None: deffile = sys.stdout else: deffile = open(deffile, 'w') nm_cmd = [str(DEFAULT_NM), str(libfile)] nm_output = getnm(nm_cmd) dlist, flist = parse_nm(nm_output) output_def(dlist, flist, DEF_HEADER, deffile)
gpl-3.0
ZJUZT/google-diff-match-patch
python2/diff_match_patch.py
337
67934
#!/usr/bin/python2.4 from __future__ import division """Diff Match and Patch Copyright 2006 Google Inc. http://code.google.com/p/google-diff-match-patch/ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ """Functions for diff, match and patch. Computes the difference between two texts to create a patch. Applies the patch onto another text, allowing for errors. """ __author__ = 'fraser@google.com (Neil Fraser)' import math import re import sys import time import urllib class diff_match_patch: """Class containing the diff, match and patch methods. Also contains the behaviour settings. """ def __init__(self): """Inits a diff_match_patch object with default settings. Redefine these in your program to override the defaults. """ # Number of seconds to map a diff before giving up (0 for infinity). self.Diff_Timeout = 1.0 # Cost of an empty edit operation in terms of edit characters. self.Diff_EditCost = 4 # At what point is no match declared (0.0 = perfection, 1.0 = very loose). self.Match_Threshold = 0.5 # How far to search for a match (0 = exact location, 1000+ = broad match). # A match this many characters away from the expected location will add # 1.0 to the score (0.0 is a perfect match). self.Match_Distance = 1000 # When deleting a large block of text (over ~64 characters), how close do # the contents have to be to match the expected contents. (0.0 = perfection, # 1.0 = very loose). Note that Match_Threshold controls how closely the # end points of a delete need to match. self.Patch_DeleteThreshold = 0.5 # Chunk size for context length. self.Patch_Margin = 4 # The number of bits in an int. # Python has no maximum, thus to disable patch splitting set to 0. # However to avoid long patches in certain pathological cases, use 32. # Multiple short patches (using native ints) are much faster than long ones. self.Match_MaxBits = 32 # DIFF FUNCTIONS # The data structure representing a diff is an array of tuples: # [(DIFF_DELETE, "Hello"), (DIFF_INSERT, "Goodbye"), (DIFF_EQUAL, " world.")] # which means: delete "Hello", add "Goodbye" and keep " world." DIFF_DELETE = -1 DIFF_INSERT = 1 DIFF_EQUAL = 0 def diff_main(self, text1, text2, checklines=True, deadline=None): """Find the differences between two texts. Simplifies the problem by stripping any common prefix or suffix off the texts before diffing. Args: text1: Old string to be diffed. text2: New string to be diffed. checklines: Optional speedup flag. If present and false, then don't run a line-level diff first to identify the changed areas. Defaults to true, which does a faster, slightly less optimal diff. deadline: Optional time when the diff should be complete by. Used internally for recursive calls. Users should set DiffTimeout instead. Returns: Array of changes. """ # Set a deadline by which time the diff must be complete. if deadline == None: # Unlike in most languages, Python counts time in seconds. if self.Diff_Timeout <= 0: deadline = sys.maxint else: deadline = time.time() + self.Diff_Timeout # Check for null inputs. if text1 == None or text2 == None: raise ValueError("Null inputs. (diff_main)") # Check for equality (speedup). if text1 == text2: if text1: return [(self.DIFF_EQUAL, text1)] return [] # Trim off common prefix (speedup). commonlength = self.diff_commonPrefix(text1, text2) commonprefix = text1[:commonlength] text1 = text1[commonlength:] text2 = text2[commonlength:] # Trim off common suffix (speedup). commonlength = self.diff_commonSuffix(text1, text2) if commonlength == 0: commonsuffix = '' else: commonsuffix = text1[-commonlength:] text1 = text1[:-commonlength] text2 = text2[:-commonlength] # Compute the diff on the middle block. diffs = self.diff_compute(text1, text2, checklines, deadline) # Restore the prefix and suffix. if commonprefix: diffs[:0] = [(self.DIFF_EQUAL, commonprefix)] if commonsuffix: diffs.append((self.DIFF_EQUAL, commonsuffix)) self.diff_cleanupMerge(diffs) return diffs def diff_compute(self, text1, text2, checklines, deadline): """Find the differences between two texts. Assumes that the texts do not have any common prefix or suffix. Args: text1: Old string to be diffed. text2: New string to be diffed. checklines: Speedup flag. If false, then don't run a line-level diff first to identify the changed areas. If true, then run a faster, slightly less optimal diff. deadline: Time when the diff should be complete by. Returns: Array of changes. """ if not text1: # Just add some text (speedup). return [(self.DIFF_INSERT, text2)] if not text2: # Just delete some text (speedup). return [(self.DIFF_DELETE, text1)] if len(text1) > len(text2): (longtext, shorttext) = (text1, text2) else: (shorttext, longtext) = (text1, text2) i = longtext.find(shorttext) if i != -1: # Shorter text is inside the longer text (speedup). diffs = [(self.DIFF_INSERT, longtext[:i]), (self.DIFF_EQUAL, shorttext), (self.DIFF_INSERT, longtext[i + len(shorttext):])] # Swap insertions for deletions if diff is reversed. if len(text1) > len(text2): diffs[0] = (self.DIFF_DELETE, diffs[0][1]) diffs[2] = (self.DIFF_DELETE, diffs[2][1]) return diffs if len(shorttext) == 1: # Single character string. # After the previous speedup, the character can't be an equality. return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)] # Check to see if the problem can be split in two. hm = self.diff_halfMatch(text1, text2) if hm: # A half-match was found, sort out the return data. (text1_a, text1_b, text2_a, text2_b, mid_common) = hm # Send both pairs off for separate processing. diffs_a = self.diff_main(text1_a, text2_a, checklines, deadline) diffs_b = self.diff_main(text1_b, text2_b, checklines, deadline) # Merge the results. return diffs_a + [(self.DIFF_EQUAL, mid_common)] + diffs_b if checklines and len(text1) > 100 and len(text2) > 100: return self.diff_lineMode(text1, text2, deadline) return self.diff_bisect(text1, text2, deadline) def diff_lineMode(self, text1, text2, deadline): """Do a quick line-level diff on both strings, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. Args: text1: Old string to be diffed. text2: New string to be diffed. deadline: Time when the diff should be complete by. Returns: Array of changes. """ # Scan the text on a line-by-line basis first. (text1, text2, linearray) = self.diff_linesToChars(text1, text2) diffs = self.diff_main(text1, text2, False, deadline) # Convert the diff back to original text. self.diff_charsToLines(diffs, linearray) # Eliminate freak matches (e.g. blank lines) self.diff_cleanupSemantic(diffs) # Rediff any replacement blocks, this time character-by-character. # Add a dummy entry at the end. diffs.append((self.DIFF_EQUAL, '')) pointer = 0 count_delete = 0 count_insert = 0 text_delete = '' text_insert = '' while pointer < len(diffs): if diffs[pointer][0] == self.DIFF_INSERT: count_insert += 1 text_insert += diffs[pointer][1] elif diffs[pointer][0] == self.DIFF_DELETE: count_delete += 1 text_delete += diffs[pointer][1] elif diffs[pointer][0] == self.DIFF_EQUAL: # Upon reaching an equality, check for prior redundancies. if count_delete >= 1 and count_insert >= 1: # Delete the offending records and add the merged ones. a = self.diff_main(text_delete, text_insert, False, deadline) diffs[pointer - count_delete - count_insert : pointer] = a pointer = pointer - count_delete - count_insert + len(a) count_insert = 0 count_delete = 0 text_delete = '' text_insert = '' pointer += 1 diffs.pop() # Remove the dummy entry at the end. return diffs def diff_bisect(self, text1, text2, deadline): """Find the 'middle snake' of a diff, split the problem in two and return the recursively constructed diff. See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations. Args: text1: Old string to be diffed. text2: New string to be diffed. deadline: Time at which to bail if not yet complete. Returns: Array of diff tuples. """ # Cache the text lengths to prevent multiple calls. text1_length = len(text1) text2_length = len(text2) max_d = (text1_length + text2_length + 1) // 2 v_offset = max_d v_length = 2 * max_d v1 = [-1] * v_length v1[v_offset + 1] = 0 v2 = v1[:] delta = text1_length - text2_length # If the total number of characters is odd, then the front path will # collide with the reverse path. front = (delta % 2 != 0) # Offsets for start and end of k loop. # Prevents mapping of space beyond the grid. k1start = 0 k1end = 0 k2start = 0 k2end = 0 for d in xrange(max_d): # Bail out if deadline is reached. if time.time() > deadline: break # Walk the front path one step. for k1 in xrange(-d + k1start, d + 1 - k1end, 2): k1_offset = v_offset + k1 if k1 == -d or (k1 != d and v1[k1_offset - 1] < v1[k1_offset + 1]): x1 = v1[k1_offset + 1] else: x1 = v1[k1_offset - 1] + 1 y1 = x1 - k1 while (x1 < text1_length and y1 < text2_length and text1[x1] == text2[y1]): x1 += 1 y1 += 1 v1[k1_offset] = x1 if x1 > text1_length: # Ran off the right of the graph. k1end += 2 elif y1 > text2_length: # Ran off the bottom of the graph. k1start += 2 elif front: k2_offset = v_offset + delta - k1 if k2_offset >= 0 and k2_offset < v_length and v2[k2_offset] != -1: # Mirror x2 onto top-left coordinate system. x2 = text1_length - v2[k2_offset] if x1 >= x2: # Overlap detected. return self.diff_bisectSplit(text1, text2, x1, y1, deadline) # Walk the reverse path one step. for k2 in xrange(-d + k2start, d + 1 - k2end, 2): k2_offset = v_offset + k2 if k2 == -d or (k2 != d and v2[k2_offset - 1] < v2[k2_offset + 1]): x2 = v2[k2_offset + 1] else: x2 = v2[k2_offset - 1] + 1 y2 = x2 - k2 while (x2 < text1_length and y2 < text2_length and text1[-x2 - 1] == text2[-y2 - 1]): x2 += 1 y2 += 1 v2[k2_offset] = x2 if x2 > text1_length: # Ran off the left of the graph. k2end += 2 elif y2 > text2_length: # Ran off the top of the graph. k2start += 2 elif not front: k1_offset = v_offset + delta - k2 if k1_offset >= 0 and k1_offset < v_length and v1[k1_offset] != -1: x1 = v1[k1_offset] y1 = v_offset + x1 - k1_offset # Mirror x2 onto top-left coordinate system. x2 = text1_length - x2 if x1 >= x2: # Overlap detected. return self.diff_bisectSplit(text1, text2, x1, y1, deadline) # Diff took too long and hit the deadline or # number of diffs equals number of characters, no commonality at all. return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)] def diff_bisectSplit(self, text1, text2, x, y, deadline): """Given the location of the 'middle snake', split the diff in two parts and recurse. Args: text1: Old string to be diffed. text2: New string to be diffed. x: Index of split point in text1. y: Index of split point in text2. deadline: Time at which to bail if not yet complete. Returns: Array of diff tuples. """ text1a = text1[:x] text2a = text2[:y] text1b = text1[x:] text2b = text2[y:] # Compute both diffs serially. diffs = self.diff_main(text1a, text2a, False, deadline) diffsb = self.diff_main(text1b, text2b, False, deadline) return diffs + diffsb def diff_linesToChars(self, text1, text2): """Split two texts into an array of strings. Reduce the texts to a string of hashes where each Unicode character represents one line. Args: text1: First string. text2: Second string. Returns: Three element tuple, containing the encoded text1, the encoded text2 and the array of unique strings. The zeroth element of the array of unique strings is intentionally blank. """ lineArray = [] # e.g. lineArray[4] == "Hello\n" lineHash = {} # e.g. lineHash["Hello\n"] == 4 # "\x00" is a valid character, but various debuggers don't like it. # So we'll insert a junk entry to avoid generating a null character. lineArray.append('') def diff_linesToCharsMunge(text): """Split a text into an array of strings. Reduce the texts to a string of hashes where each Unicode character represents one line. Modifies linearray and linehash through being a closure. Args: text: String to encode. Returns: Encoded string. """ chars = [] # Walk the text, pulling out a substring for each line. # text.split('\n') would would temporarily double our memory footprint. # Modifying text would create many large strings to garbage collect. lineStart = 0 lineEnd = -1 while lineEnd < len(text) - 1: lineEnd = text.find('\n', lineStart) if lineEnd == -1: lineEnd = len(text) - 1 line = text[lineStart:lineEnd + 1] lineStart = lineEnd + 1 if line in lineHash: chars.append(unichr(lineHash[line])) else: lineArray.append(line) lineHash[line] = len(lineArray) - 1 chars.append(unichr(len(lineArray) - 1)) return "".join(chars) chars1 = diff_linesToCharsMunge(text1) chars2 = diff_linesToCharsMunge(text2) return (chars1, chars2, lineArray) def diff_charsToLines(self, diffs, lineArray): """Rehydrate the text in a diff from a string of line hashes to real lines of text. Args: diffs: Array of diff tuples. lineArray: Array of unique strings. """ for x in xrange(len(diffs)): text = [] for char in diffs[x][1]: text.append(lineArray[ord(char)]) diffs[x] = (diffs[x][0], "".join(text)) def diff_commonPrefix(self, text1, text2): """Determine the common prefix of two strings. Args: text1: First string. text2: Second string. Returns: The number of characters common to the start of each string. """ # Quick check for common null cases. if not text1 or not text2 or text1[0] != text2[0]: return 0 # Binary search. # Performance analysis: http://neil.fraser.name/news/2007/10/09/ pointermin = 0 pointermax = min(len(text1), len(text2)) pointermid = pointermax pointerstart = 0 while pointermin < pointermid: if text1[pointerstart:pointermid] == text2[pointerstart:pointermid]: pointermin = pointermid pointerstart = pointermin else: pointermax = pointermid pointermid = (pointermax - pointermin) // 2 + pointermin return pointermid def diff_commonSuffix(self, text1, text2): """Determine the common suffix of two strings. Args: text1: First string. text2: Second string. Returns: The number of characters common to the end of each string. """ # Quick check for common null cases. if not text1 or not text2 or text1[-1] != text2[-1]: return 0 # Binary search. # Performance analysis: http://neil.fraser.name/news/2007/10/09/ pointermin = 0 pointermax = min(len(text1), len(text2)) pointermid = pointermax pointerend = 0 while pointermin < pointermid: if (text1[-pointermid:len(text1) - pointerend] == text2[-pointermid:len(text2) - pointerend]): pointermin = pointermid pointerend = pointermin else: pointermax = pointermid pointermid = (pointermax - pointermin) // 2 + pointermin return pointermid def diff_commonOverlap(self, text1, text2): """Determine if the suffix of one string is the prefix of another. Args: text1 First string. text2 Second string. Returns: The number of characters common to the end of the first string and the start of the second string. """ # Cache the text lengths to prevent multiple calls. text1_length = len(text1) text2_length = len(text2) # Eliminate the null case. if text1_length == 0 or text2_length == 0: return 0 # Truncate the longer string. if text1_length > text2_length: text1 = text1[-text2_length:] elif text1_length < text2_length: text2 = text2[:text1_length] text_length = min(text1_length, text2_length) # Quick check for the worst case. if text1 == text2: return text_length # Start by looking for a single character match # and increase length until no match is found. # Performance analysis: http://neil.fraser.name/news/2010/11/04/ best = 0 length = 1 while True: pattern = text1[-length:] found = text2.find(pattern) if found == -1: return best length += found if found == 0 or text1[-length:] == text2[:length]: best = length length += 1 def diff_halfMatch(self, text1, text2): """Do the two texts share a substring which is at least half the length of the longer text? This speedup can produce non-minimal diffs. Args: text1: First string. text2: Second string. Returns: Five element Array, containing the prefix of text1, the suffix of text1, the prefix of text2, the suffix of text2 and the common middle. Or None if there was no match. """ if self.Diff_Timeout <= 0: # Don't risk returning a non-optimal diff if we have unlimited time. return None if len(text1) > len(text2): (longtext, shorttext) = (text1, text2) else: (shorttext, longtext) = (text1, text2) if len(longtext) < 4 or len(shorttext) * 2 < len(longtext): return None # Pointless. def diff_halfMatchI(longtext, shorttext, i): """Does a substring of shorttext exist within longtext such that the substring is at least half the length of longtext? Closure, but does not reference any external variables. Args: longtext: Longer string. shorttext: Shorter string. i: Start index of quarter length substring within longtext. Returns: Five element Array, containing the prefix of longtext, the suffix of longtext, the prefix of shorttext, the suffix of shorttext and the common middle. Or None if there was no match. """ seed = longtext[i:i + len(longtext) // 4] best_common = '' j = shorttext.find(seed) while j != -1: prefixLength = self.diff_commonPrefix(longtext[i:], shorttext[j:]) suffixLength = self.diff_commonSuffix(longtext[:i], shorttext[:j]) if len(best_common) < suffixLength + prefixLength: best_common = (shorttext[j - suffixLength:j] + shorttext[j:j + prefixLength]) best_longtext_a = longtext[:i - suffixLength] best_longtext_b = longtext[i + prefixLength:] best_shorttext_a = shorttext[:j - suffixLength] best_shorttext_b = shorttext[j + prefixLength:] j = shorttext.find(seed, j + 1) if len(best_common) * 2 >= len(longtext): return (best_longtext_a, best_longtext_b, best_shorttext_a, best_shorttext_b, best_common) else: return None # First check if the second quarter is the seed for a half-match. hm1 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 3) // 4) # Check again based on the third quarter. hm2 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 1) // 2) if not hm1 and not hm2: return None elif not hm2: hm = hm1 elif not hm1: hm = hm2 else: # Both matched. Select the longest. if len(hm1[4]) > len(hm2[4]): hm = hm1 else: hm = hm2 # A half-match was found, sort out the return data. if len(text1) > len(text2): (text1_a, text1_b, text2_a, text2_b, mid_common) = hm else: (text2_a, text2_b, text1_a, text1_b, mid_common) = hm return (text1_a, text1_b, text2_a, text2_b, mid_common) def diff_cleanupSemantic(self, diffs): """Reduce the number of edits by eliminating semantically trivial equalities. Args: diffs: Array of diff tuples. """ changes = False equalities = [] # Stack of indices where equalities are found. lastequality = None # Always equal to diffs[equalities[-1]][1] pointer = 0 # Index of current position. # Number of chars that changed prior to the equality. length_insertions1, length_deletions1 = 0, 0 # Number of chars that changed after the equality. length_insertions2, length_deletions2 = 0, 0 while pointer < len(diffs): if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found. equalities.append(pointer) length_insertions1, length_insertions2 = length_insertions2, 0 length_deletions1, length_deletions2 = length_deletions2, 0 lastequality = diffs[pointer][1] else: # An insertion or deletion. if diffs[pointer][0] == self.DIFF_INSERT: length_insertions2 += len(diffs[pointer][1]) else: length_deletions2 += len(diffs[pointer][1]) # Eliminate an equality that is smaller or equal to the edits on both # sides of it. if (lastequality and (len(lastequality) <= max(length_insertions1, length_deletions1)) and (len(lastequality) <= max(length_insertions2, length_deletions2))): # Duplicate record. diffs.insert(equalities[-1], (self.DIFF_DELETE, lastequality)) # Change second copy to insert. diffs[equalities[-1] + 1] = (self.DIFF_INSERT, diffs[equalities[-1] + 1][1]) # Throw away the equality we just deleted. equalities.pop() # Throw away the previous equality (it needs to be reevaluated). if len(equalities): equalities.pop() if len(equalities): pointer = equalities[-1] else: pointer = -1 # Reset the counters. length_insertions1, length_deletions1 = 0, 0 length_insertions2, length_deletions2 = 0, 0 lastequality = None changes = True pointer += 1 # Normalize the diff. if changes: self.diff_cleanupMerge(diffs) self.diff_cleanupSemanticLossless(diffs) # Find any overlaps between deletions and insertions. # e.g: <del>abcxxx</del><ins>xxxdef</ins> # -> <del>abc</del>xxx<ins>def</ins> # e.g: <del>xxxabc</del><ins>defxxx</ins> # -> <ins>def</ins>xxx<del>abc</del> # Only extract an overlap if it is as big as the edit ahead or behind it. pointer = 1 while pointer < len(diffs): if (diffs[pointer - 1][0] == self.DIFF_DELETE and diffs[pointer][0] == self.DIFF_INSERT): deletion = diffs[pointer - 1][1] insertion = diffs[pointer][1] overlap_length1 = self.diff_commonOverlap(deletion, insertion) overlap_length2 = self.diff_commonOverlap(insertion, deletion) if overlap_length1 >= overlap_length2: if (overlap_length1 >= len(deletion) / 2.0 or overlap_length1 >= len(insertion) / 2.0): # Overlap found. Insert an equality and trim the surrounding edits. diffs.insert(pointer, (self.DIFF_EQUAL, insertion[:overlap_length1])) diffs[pointer - 1] = (self.DIFF_DELETE, deletion[:len(deletion) - overlap_length1]) diffs[pointer + 1] = (self.DIFF_INSERT, insertion[overlap_length1:]) pointer += 1 else: if (overlap_length2 >= len(deletion) / 2.0 or overlap_length2 >= len(insertion) / 2.0): # Reverse overlap found. # Insert an equality and swap and trim the surrounding edits. diffs.insert(pointer, (self.DIFF_EQUAL, deletion[:overlap_length2])) diffs[pointer - 1] = (self.DIFF_INSERT, insertion[:len(insertion) - overlap_length2]) diffs[pointer + 1] = (self.DIFF_DELETE, deletion[overlap_length2:]) pointer += 1 pointer += 1 pointer += 1 def diff_cleanupSemanticLossless(self, diffs): """Look for single edits surrounded on both sides by equalities which can be shifted sideways to align the edit to a word boundary. e.g: The c<ins>at c</ins>ame. -> The <ins>cat </ins>came. Args: diffs: Array of diff tuples. """ def diff_cleanupSemanticScore(one, two): """Given two strings, compute a score representing whether the internal boundary falls on logical boundaries. Scores range from 6 (best) to 0 (worst). Closure, but does not reference any external variables. Args: one: First string. two: Second string. Returns: The score. """ if not one or not two: # Edges are the best. return 6 # Each port of this function behaves slightly differently due to # subtle differences in each language's definition of things like # 'whitespace'. Since this function's purpose is largely cosmetic, # the choice has been made to use each language's native features # rather than force total conformity. char1 = one[-1] char2 = two[0] nonAlphaNumeric1 = not char1.isalnum() nonAlphaNumeric2 = not char2.isalnum() whitespace1 = nonAlphaNumeric1 and char1.isspace() whitespace2 = nonAlphaNumeric2 and char2.isspace() lineBreak1 = whitespace1 and (char1 == "\r" or char1 == "\n") lineBreak2 = whitespace2 and (char2 == "\r" or char2 == "\n") blankLine1 = lineBreak1 and self.BLANKLINEEND.search(one) blankLine2 = lineBreak2 and self.BLANKLINESTART.match(two) if blankLine1 or blankLine2: # Five points for blank lines. return 5 elif lineBreak1 or lineBreak2: # Four points for line breaks. return 4 elif nonAlphaNumeric1 and not whitespace1 and whitespace2: # Three points for end of sentences. return 3 elif whitespace1 or whitespace2: # Two points for whitespace. return 2 elif nonAlphaNumeric1 or nonAlphaNumeric2: # One point for non-alphanumeric. return 1 return 0 pointer = 1 # Intentionally ignore the first and last element (don't need checking). while pointer < len(diffs) - 1: if (diffs[pointer - 1][0] == self.DIFF_EQUAL and diffs[pointer + 1][0] == self.DIFF_EQUAL): # This is a single edit surrounded by equalities. equality1 = diffs[pointer - 1][1] edit = diffs[pointer][1] equality2 = diffs[pointer + 1][1] # First, shift the edit as far left as possible. commonOffset = self.diff_commonSuffix(equality1, edit) if commonOffset: commonString = edit[-commonOffset:] equality1 = equality1[:-commonOffset] edit = commonString + edit[:-commonOffset] equality2 = commonString + equality2 # Second, step character by character right, looking for the best fit. bestEquality1 = equality1 bestEdit = edit bestEquality2 = equality2 bestScore = (diff_cleanupSemanticScore(equality1, edit) + diff_cleanupSemanticScore(edit, equality2)) while edit and equality2 and edit[0] == equality2[0]: equality1 += edit[0] edit = edit[1:] + equality2[0] equality2 = equality2[1:] score = (diff_cleanupSemanticScore(equality1, edit) + diff_cleanupSemanticScore(edit, equality2)) # The >= encourages trailing rather than leading whitespace on edits. if score >= bestScore: bestScore = score bestEquality1 = equality1 bestEdit = edit bestEquality2 = equality2 if diffs[pointer - 1][1] != bestEquality1: # We have an improvement, save it back to the diff. if bestEquality1: diffs[pointer - 1] = (diffs[pointer - 1][0], bestEquality1) else: del diffs[pointer - 1] pointer -= 1 diffs[pointer] = (diffs[pointer][0], bestEdit) if bestEquality2: diffs[pointer + 1] = (diffs[pointer + 1][0], bestEquality2) else: del diffs[pointer + 1] pointer -= 1 pointer += 1 # Define some regex patterns for matching boundaries. BLANKLINEEND = re.compile(r"\n\r?\n$"); BLANKLINESTART = re.compile(r"^\r?\n\r?\n"); def diff_cleanupEfficiency(self, diffs): """Reduce the number of edits by eliminating operationally trivial equalities. Args: diffs: Array of diff tuples. """ changes = False equalities = [] # Stack of indices where equalities are found. lastequality = None # Always equal to diffs[equalities[-1]][1] pointer = 0 # Index of current position. pre_ins = False # Is there an insertion operation before the last equality. pre_del = False # Is there a deletion operation before the last equality. post_ins = False # Is there an insertion operation after the last equality. post_del = False # Is there a deletion operation after the last equality. while pointer < len(diffs): if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found. if (len(diffs[pointer][1]) < self.Diff_EditCost and (post_ins or post_del)): # Candidate found. equalities.append(pointer) pre_ins = post_ins pre_del = post_del lastequality = diffs[pointer][1] else: # Not a candidate, and can never become one. equalities = [] lastequality = None post_ins = post_del = False else: # An insertion or deletion. if diffs[pointer][0] == self.DIFF_DELETE: post_del = True else: post_ins = True # Five types to be split: # <ins>A</ins><del>B</del>XY<ins>C</ins><del>D</del> # <ins>A</ins>X<ins>C</ins><del>D</del> # <ins>A</ins><del>B</del>X<ins>C</ins> # <ins>A</del>X<ins>C</ins><del>D</del> # <ins>A</ins><del>B</del>X<del>C</del> if lastequality and ((pre_ins and pre_del and post_ins and post_del) or ((len(lastequality) < self.Diff_EditCost / 2) and (pre_ins + pre_del + post_ins + post_del) == 3)): # Duplicate record. diffs.insert(equalities[-1], (self.DIFF_DELETE, lastequality)) # Change second copy to insert. diffs[equalities[-1] + 1] = (self.DIFF_INSERT, diffs[equalities[-1] + 1][1]) equalities.pop() # Throw away the equality we just deleted. lastequality = None if pre_ins and pre_del: # No changes made which could affect previous entry, keep going. post_ins = post_del = True equalities = [] else: if len(equalities): equalities.pop() # Throw away the previous equality. if len(equalities): pointer = equalities[-1] else: pointer = -1 post_ins = post_del = False changes = True pointer += 1 if changes: self.diff_cleanupMerge(diffs) def diff_cleanupMerge(self, diffs): """Reorder and merge like edit sections. Merge equalities. Any edit section can move as long as it doesn't cross an equality. Args: diffs: Array of diff tuples. """ diffs.append((self.DIFF_EQUAL, '')) # Add a dummy entry at the end. pointer = 0 count_delete = 0 count_insert = 0 text_delete = '' text_insert = '' while pointer < len(diffs): if diffs[pointer][0] == self.DIFF_INSERT: count_insert += 1 text_insert += diffs[pointer][1] pointer += 1 elif diffs[pointer][0] == self.DIFF_DELETE: count_delete += 1 text_delete += diffs[pointer][1] pointer += 1 elif diffs[pointer][0] == self.DIFF_EQUAL: # Upon reaching an equality, check for prior redundancies. if count_delete + count_insert > 1: if count_delete != 0 and count_insert != 0: # Factor out any common prefixies. commonlength = self.diff_commonPrefix(text_insert, text_delete) if commonlength != 0: x = pointer - count_delete - count_insert - 1 if x >= 0 and diffs[x][0] == self.DIFF_EQUAL: diffs[x] = (diffs[x][0], diffs[x][1] + text_insert[:commonlength]) else: diffs.insert(0, (self.DIFF_EQUAL, text_insert[:commonlength])) pointer += 1 text_insert = text_insert[commonlength:] text_delete = text_delete[commonlength:] # Factor out any common suffixies. commonlength = self.diff_commonSuffix(text_insert, text_delete) if commonlength != 0: diffs[pointer] = (diffs[pointer][0], text_insert[-commonlength:] + diffs[pointer][1]) text_insert = text_insert[:-commonlength] text_delete = text_delete[:-commonlength] # Delete the offending records and add the merged ones. if count_delete == 0: diffs[pointer - count_insert : pointer] = [ (self.DIFF_INSERT, text_insert)] elif count_insert == 0: diffs[pointer - count_delete : pointer] = [ (self.DIFF_DELETE, text_delete)] else: diffs[pointer - count_delete - count_insert : pointer] = [ (self.DIFF_DELETE, text_delete), (self.DIFF_INSERT, text_insert)] pointer = pointer - count_delete - count_insert + 1 if count_delete != 0: pointer += 1 if count_insert != 0: pointer += 1 elif pointer != 0 and diffs[pointer - 1][0] == self.DIFF_EQUAL: # Merge this equality with the previous one. diffs[pointer - 1] = (diffs[pointer - 1][0], diffs[pointer - 1][1] + diffs[pointer][1]) del diffs[pointer] else: pointer += 1 count_insert = 0 count_delete = 0 text_delete = '' text_insert = '' if diffs[-1][1] == '': diffs.pop() # Remove the dummy entry at the end. # Second pass: look for single edits surrounded on both sides by equalities # which can be shifted sideways to eliminate an equality. # e.g: A<ins>BA</ins>C -> <ins>AB</ins>AC changes = False pointer = 1 # Intentionally ignore the first and last element (don't need checking). while pointer < len(diffs) - 1: if (diffs[pointer - 1][0] == self.DIFF_EQUAL and diffs[pointer + 1][0] == self.DIFF_EQUAL): # This is a single edit surrounded by equalities. if diffs[pointer][1].endswith(diffs[pointer - 1][1]): # Shift the edit over the previous equality. diffs[pointer] = (diffs[pointer][0], diffs[pointer - 1][1] + diffs[pointer][1][:-len(diffs[pointer - 1][1])]) diffs[pointer + 1] = (diffs[pointer + 1][0], diffs[pointer - 1][1] + diffs[pointer + 1][1]) del diffs[pointer - 1] changes = True elif diffs[pointer][1].startswith(diffs[pointer + 1][1]): # Shift the edit over the next equality. diffs[pointer - 1] = (diffs[pointer - 1][0], diffs[pointer - 1][1] + diffs[pointer + 1][1]) diffs[pointer] = (diffs[pointer][0], diffs[pointer][1][len(diffs[pointer + 1][1]):] + diffs[pointer + 1][1]) del diffs[pointer + 1] changes = True pointer += 1 # If shifts were made, the diff needs reordering and another shift sweep. if changes: self.diff_cleanupMerge(diffs) def diff_xIndex(self, diffs, loc): """loc is a location in text1, compute and return the equivalent location in text2. e.g. "The cat" vs "The big cat", 1->1, 5->8 Args: diffs: Array of diff tuples. loc: Location within text1. Returns: Location within text2. """ chars1 = 0 chars2 = 0 last_chars1 = 0 last_chars2 = 0 for x in xrange(len(diffs)): (op, text) = diffs[x] if op != self.DIFF_INSERT: # Equality or deletion. chars1 += len(text) if op != self.DIFF_DELETE: # Equality or insertion. chars2 += len(text) if chars1 > loc: # Overshot the location. break last_chars1 = chars1 last_chars2 = chars2 if len(diffs) != x and diffs[x][0] == self.DIFF_DELETE: # The location was deleted. return last_chars2 # Add the remaining len(character). return last_chars2 + (loc - last_chars1) def diff_prettyHtml(self, diffs): """Convert a diff array into a pretty HTML report. Args: diffs: Array of diff tuples. Returns: HTML representation. """ html = [] for (op, data) in diffs: text = (data.replace("&", "&amp;").replace("<", "&lt;") .replace(">", "&gt;").replace("\n", "&para;<br>")) if op == self.DIFF_INSERT: html.append("<ins style=\"background:#e6ffe6;\">%s</ins>" % text) elif op == self.DIFF_DELETE: html.append("<del style=\"background:#ffe6e6;\">%s</del>" % text) elif op == self.DIFF_EQUAL: html.append("<span>%s</span>" % text) return "".join(html) def diff_text1(self, diffs): """Compute and return the source text (all equalities and deletions). Args: diffs: Array of diff tuples. Returns: Source text. """ text = [] for (op, data) in diffs: if op != self.DIFF_INSERT: text.append(data) return "".join(text) def diff_text2(self, diffs): """Compute and return the destination text (all equalities and insertions). Args: diffs: Array of diff tuples. Returns: Destination text. """ text = [] for (op, data) in diffs: if op != self.DIFF_DELETE: text.append(data) return "".join(text) def diff_levenshtein(self, diffs): """Compute the Levenshtein distance; the number of inserted, deleted or substituted characters. Args: diffs: Array of diff tuples. Returns: Number of changes. """ levenshtein = 0 insertions = 0 deletions = 0 for (op, data) in diffs: if op == self.DIFF_INSERT: insertions += len(data) elif op == self.DIFF_DELETE: deletions += len(data) elif op == self.DIFF_EQUAL: # A deletion and an insertion is one substitution. levenshtein += max(insertions, deletions) insertions = 0 deletions = 0 levenshtein += max(insertions, deletions) return levenshtein def diff_toDelta(self, diffs): """Crush the diff into an encoded string which describes the operations required to transform text1 into text2. E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'. Operations are tab-separated. Inserted text is escaped using %xx notation. Args: diffs: Array of diff tuples. Returns: Delta text. """ text = [] for (op, data) in diffs: if op == self.DIFF_INSERT: # High ascii will raise UnicodeDecodeError. Use Unicode instead. data = data.encode("utf-8") text.append("+" + urllib.quote(data, "!~*'();/?:@&=+$,# ")) elif op == self.DIFF_DELETE: text.append("-%d" % len(data)) elif op == self.DIFF_EQUAL: text.append("=%d" % len(data)) return "\t".join(text) def diff_fromDelta(self, text1, delta): """Given the original text1, and an encoded string which describes the operations required to transform text1 into text2, compute the full diff. Args: text1: Source string for the diff. delta: Delta text. Returns: Array of diff tuples. Raises: ValueError: If invalid input. """ if type(delta) == unicode: # Deltas should be composed of a subset of ascii chars, Unicode not # required. If this encode raises UnicodeEncodeError, delta is invalid. delta = delta.encode("ascii") diffs = [] pointer = 0 # Cursor in text1 tokens = delta.split("\t") for token in tokens: if token == "": # Blank tokens are ok (from a trailing \t). continue # Each token begins with a one character parameter which specifies the # operation of this token (delete, insert, equality). param = token[1:] if token[0] == "+": param = urllib.unquote(param).decode("utf-8") diffs.append((self.DIFF_INSERT, param)) elif token[0] == "-" or token[0] == "=": try: n = int(param) except ValueError: raise ValueError("Invalid number in diff_fromDelta: " + param) if n < 0: raise ValueError("Negative number in diff_fromDelta: " + param) text = text1[pointer : pointer + n] pointer += n if token[0] == "=": diffs.append((self.DIFF_EQUAL, text)) else: diffs.append((self.DIFF_DELETE, text)) else: # Anything else is an error. raise ValueError("Invalid diff operation in diff_fromDelta: " + token[0]) if pointer != len(text1): raise ValueError( "Delta length (%d) does not equal source text length (%d)." % (pointer, len(text1))) return diffs # MATCH FUNCTIONS def match_main(self, text, pattern, loc): """Locate the best instance of 'pattern' in 'text' near 'loc'. Args: text: The text to search. pattern: The pattern to search for. loc: The location to search around. Returns: Best match index or -1. """ # Check for null inputs. if text == None or pattern == None: raise ValueError("Null inputs. (match_main)") loc = max(0, min(loc, len(text))) if text == pattern: # Shortcut (potentially not guaranteed by the algorithm) return 0 elif not text: # Nothing to match. return -1 elif text[loc:loc + len(pattern)] == pattern: # Perfect match at the perfect spot! (Includes case of null pattern) return loc else: # Do a fuzzy compare. match = self.match_bitap(text, pattern, loc) return match def match_bitap(self, text, pattern, loc): """Locate the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm. Args: text: The text to search. pattern: The pattern to search for. loc: The location to search around. Returns: Best match index or -1. """ # Python doesn't have a maxint limit, so ignore this check. #if self.Match_MaxBits != 0 and len(pattern) > self.Match_MaxBits: # raise ValueError("Pattern too long for this application.") # Initialise the alphabet. s = self.match_alphabet(pattern) def match_bitapScore(e, x): """Compute and return the score for a match with e errors and x location. Accesses loc and pattern through being a closure. Args: e: Number of errors in match. x: Location of match. Returns: Overall score for match (0.0 = good, 1.0 = bad). """ accuracy = float(e) / len(pattern) proximity = abs(loc - x) if not self.Match_Distance: # Dodge divide by zero error. return proximity and 1.0 or accuracy return accuracy + (proximity / float(self.Match_Distance)) # Highest score beyond which we give up. score_threshold = self.Match_Threshold # Is there a nearby exact match? (speedup) best_loc = text.find(pattern, loc) if best_loc != -1: score_threshold = min(match_bitapScore(0, best_loc), score_threshold) # What about in the other direction? (speedup) best_loc = text.rfind(pattern, loc + len(pattern)) if best_loc != -1: score_threshold = min(match_bitapScore(0, best_loc), score_threshold) # Initialise the bit arrays. matchmask = 1 << (len(pattern) - 1) best_loc = -1 bin_max = len(pattern) + len(text) # Empty initialization added to appease pychecker. last_rd = None for d in xrange(len(pattern)): # Scan for the best match each iteration allows for one more error. # Run a binary search to determine how far from 'loc' we can stray at # this error level. bin_min = 0 bin_mid = bin_max while bin_min < bin_mid: if match_bitapScore(d, loc + bin_mid) <= score_threshold: bin_min = bin_mid else: bin_max = bin_mid bin_mid = (bin_max - bin_min) // 2 + bin_min # Use the result from this iteration as the maximum for the next. bin_max = bin_mid start = max(1, loc - bin_mid + 1) finish = min(loc + bin_mid, len(text)) + len(pattern) rd = [0] * (finish + 2) rd[finish + 1] = (1 << d) - 1 for j in xrange(finish, start - 1, -1): if len(text) <= j - 1: # Out of range. charMatch = 0 else: charMatch = s.get(text[j - 1], 0) if d == 0: # First pass: exact match. rd[j] = ((rd[j + 1] << 1) | 1) & charMatch else: # Subsequent passes: fuzzy match. rd[j] = (((rd[j + 1] << 1) | 1) & charMatch) | ( ((last_rd[j + 1] | last_rd[j]) << 1) | 1) | last_rd[j + 1] if rd[j] & matchmask: score = match_bitapScore(d, j - 1) # This match will almost certainly be better than any existing match. # But check anyway. if score <= score_threshold: # Told you so. score_threshold = score best_loc = j - 1 if best_loc > loc: # When passing loc, don't exceed our current distance from loc. start = max(1, 2 * loc - best_loc) else: # Already passed loc, downhill from here on in. break # No hope for a (better) match at greater error levels. if match_bitapScore(d + 1, loc) > score_threshold: break last_rd = rd return best_loc def match_alphabet(self, pattern): """Initialise the alphabet for the Bitap algorithm. Args: pattern: The text to encode. Returns: Hash of character locations. """ s = {} for char in pattern: s[char] = 0 for i in xrange(len(pattern)): s[pattern[i]] |= 1 << (len(pattern) - i - 1) return s # PATCH FUNCTIONS def patch_addContext(self, patch, text): """Increase the context until it is unique, but don't let the pattern expand beyond Match_MaxBits. Args: patch: The patch to grow. text: Source text. """ if len(text) == 0: return pattern = text[patch.start2 : patch.start2 + patch.length1] padding = 0 # Look for the first and last matches of pattern in text. If two different # matches are found, increase the pattern length. while (text.find(pattern) != text.rfind(pattern) and (self.Match_MaxBits == 0 or len(pattern) < self.Match_MaxBits - self.Patch_Margin - self.Patch_Margin)): padding += self.Patch_Margin pattern = text[max(0, patch.start2 - padding) : patch.start2 + patch.length1 + padding] # Add one chunk for good luck. padding += self.Patch_Margin # Add the prefix. prefix = text[max(0, patch.start2 - padding) : patch.start2] if prefix: patch.diffs[:0] = [(self.DIFF_EQUAL, prefix)] # Add the suffix. suffix = text[patch.start2 + patch.length1 : patch.start2 + patch.length1 + padding] if suffix: patch.diffs.append((self.DIFF_EQUAL, suffix)) # Roll back the start points. patch.start1 -= len(prefix) patch.start2 -= len(prefix) # Extend lengths. patch.length1 += len(prefix) + len(suffix) patch.length2 += len(prefix) + len(suffix) def patch_make(self, a, b=None, c=None): """Compute a list of patches to turn text1 into text2. Use diffs if provided, otherwise compute it ourselves. There are four ways to call this function, depending on what data is available to the caller: Method 1: a = text1, b = text2 Method 2: a = diffs Method 3 (optimal): a = text1, b = diffs Method 4 (deprecated, use method 3): a = text1, b = text2, c = diffs Args: a: text1 (methods 1,3,4) or Array of diff tuples for text1 to text2 (method 2). b: text2 (methods 1,4) or Array of diff tuples for text1 to text2 (method 3) or undefined (method 2). c: Array of diff tuples for text1 to text2 (method 4) or undefined (methods 1,2,3). Returns: Array of Patch objects. """ text1 = None diffs = None # Note that texts may arrive as 'str' or 'unicode'. if isinstance(a, basestring) and isinstance(b, basestring) and c is None: # Method 1: text1, text2 # Compute diffs from text1 and text2. text1 = a diffs = self.diff_main(text1, b, True) if len(diffs) > 2: self.diff_cleanupSemantic(diffs) self.diff_cleanupEfficiency(diffs) elif isinstance(a, list) and b is None and c is None: # Method 2: diffs # Compute text1 from diffs. diffs = a text1 = self.diff_text1(diffs) elif isinstance(a, basestring) and isinstance(b, list) and c is None: # Method 3: text1, diffs text1 = a diffs = b elif (isinstance(a, basestring) and isinstance(b, basestring) and isinstance(c, list)): # Method 4: text1, text2, diffs # text2 is not used. text1 = a diffs = c else: raise ValueError("Unknown call format to patch_make.") if not diffs: return [] # Get rid of the None case. patches = [] patch = patch_obj() char_count1 = 0 # Number of characters into the text1 string. char_count2 = 0 # Number of characters into the text2 string. prepatch_text = text1 # Recreate the patches to determine context info. postpatch_text = text1 for x in xrange(len(diffs)): (diff_type, diff_text) = diffs[x] if len(patch.diffs) == 0 and diff_type != self.DIFF_EQUAL: # A new patch starts here. patch.start1 = char_count1 patch.start2 = char_count2 if diff_type == self.DIFF_INSERT: # Insertion patch.diffs.append(diffs[x]) patch.length2 += len(diff_text) postpatch_text = (postpatch_text[:char_count2] + diff_text + postpatch_text[char_count2:]) elif diff_type == self.DIFF_DELETE: # Deletion. patch.length1 += len(diff_text) patch.diffs.append(diffs[x]) postpatch_text = (postpatch_text[:char_count2] + postpatch_text[char_count2 + len(diff_text):]) elif (diff_type == self.DIFF_EQUAL and len(diff_text) <= 2 * self.Patch_Margin and len(patch.diffs) != 0 and len(diffs) != x + 1): # Small equality inside a patch. patch.diffs.append(diffs[x]) patch.length1 += len(diff_text) patch.length2 += len(diff_text) if (diff_type == self.DIFF_EQUAL and len(diff_text) >= 2 * self.Patch_Margin): # Time for a new patch. if len(patch.diffs) != 0: self.patch_addContext(patch, prepatch_text) patches.append(patch) patch = patch_obj() # Unlike Unidiff, our patch lists have a rolling context. # http://code.google.com/p/google-diff-match-patch/wiki/Unidiff # Update prepatch text & pos to reflect the application of the # just completed patch. prepatch_text = postpatch_text char_count1 = char_count2 # Update the current character count. if diff_type != self.DIFF_INSERT: char_count1 += len(diff_text) if diff_type != self.DIFF_DELETE: char_count2 += len(diff_text) # Pick up the leftover patch if not empty. if len(patch.diffs) != 0: self.patch_addContext(patch, prepatch_text) patches.append(patch) return patches def patch_deepCopy(self, patches): """Given an array of patches, return another array that is identical. Args: patches: Array of Patch objects. Returns: Array of Patch objects. """ patchesCopy = [] for patch in patches: patchCopy = patch_obj() # No need to deep copy the tuples since they are immutable. patchCopy.diffs = patch.diffs[:] patchCopy.start1 = patch.start1 patchCopy.start2 = patch.start2 patchCopy.length1 = patch.length1 patchCopy.length2 = patch.length2 patchesCopy.append(patchCopy) return patchesCopy def patch_apply(self, patches, text): """Merge a set of patches onto the text. Return a patched text, as well as a list of true/false values indicating which patches were applied. Args: patches: Array of Patch objects. text: Old text. Returns: Two element Array, containing the new text and an array of boolean values. """ if not patches: return (text, []) # Deep copy the patches so that no changes are made to originals. patches = self.patch_deepCopy(patches) nullPadding = self.patch_addPadding(patches) text = nullPadding + text + nullPadding self.patch_splitMax(patches) # delta keeps track of the offset between the expected and actual location # of the previous patch. If there are patches expected at positions 10 and # 20, but the first patch was found at 12, delta is 2 and the second patch # has an effective expected position of 22. delta = 0 results = [] for patch in patches: expected_loc = patch.start2 + delta text1 = self.diff_text1(patch.diffs) end_loc = -1 if len(text1) > self.Match_MaxBits: # patch_splitMax will only provide an oversized pattern in the case of # a monster delete. start_loc = self.match_main(text, text1[:self.Match_MaxBits], expected_loc) if start_loc != -1: end_loc = self.match_main(text, text1[-self.Match_MaxBits:], expected_loc + len(text1) - self.Match_MaxBits) if end_loc == -1 or start_loc >= end_loc: # Can't find valid trailing context. Drop this patch. start_loc = -1 else: start_loc = self.match_main(text, text1, expected_loc) if start_loc == -1: # No match found. :( results.append(False) # Subtract the delta for this failed patch from subsequent patches. delta -= patch.length2 - patch.length1 else: # Found a match. :) results.append(True) delta = start_loc - expected_loc if end_loc == -1: text2 = text[start_loc : start_loc + len(text1)] else: text2 = text[start_loc : end_loc + self.Match_MaxBits] if text1 == text2: # Perfect match, just shove the replacement text in. text = (text[:start_loc] + self.diff_text2(patch.diffs) + text[start_loc + len(text1):]) else: # Imperfect match. # Run a diff to get a framework of equivalent indices. diffs = self.diff_main(text1, text2, False) if (len(text1) > self.Match_MaxBits and self.diff_levenshtein(diffs) / float(len(text1)) > self.Patch_DeleteThreshold): # The end points match, but the content is unacceptably bad. results[-1] = False else: self.diff_cleanupSemanticLossless(diffs) index1 = 0 for (op, data) in patch.diffs: if op != self.DIFF_EQUAL: index2 = self.diff_xIndex(diffs, index1) if op == self.DIFF_INSERT: # Insertion text = text[:start_loc + index2] + data + text[start_loc + index2:] elif op == self.DIFF_DELETE: # Deletion text = text[:start_loc + index2] + text[start_loc + self.diff_xIndex(diffs, index1 + len(data)):] if op != self.DIFF_DELETE: index1 += len(data) # Strip the padding off. text = text[len(nullPadding):-len(nullPadding)] return (text, results) def patch_addPadding(self, patches): """Add some padding on text start and end so that edges can match something. Intended to be called only from within patch_apply. Args: patches: Array of Patch objects. Returns: The padding string added to each side. """ paddingLength = self.Patch_Margin nullPadding = "" for x in xrange(1, paddingLength + 1): nullPadding += chr(x) # Bump all the patches forward. for patch in patches: patch.start1 += paddingLength patch.start2 += paddingLength # Add some padding on start of first diff. patch = patches[0] diffs = patch.diffs if not diffs or diffs[0][0] != self.DIFF_EQUAL: # Add nullPadding equality. diffs.insert(0, (self.DIFF_EQUAL, nullPadding)) patch.start1 -= paddingLength # Should be 0. patch.start2 -= paddingLength # Should be 0. patch.length1 += paddingLength patch.length2 += paddingLength elif paddingLength > len(diffs[0][1]): # Grow first equality. extraLength = paddingLength - len(diffs[0][1]) newText = nullPadding[len(diffs[0][1]):] + diffs[0][1] diffs[0] = (diffs[0][0], newText) patch.start1 -= extraLength patch.start2 -= extraLength patch.length1 += extraLength patch.length2 += extraLength # Add some padding on end of last diff. patch = patches[-1] diffs = patch.diffs if not diffs or diffs[-1][0] != self.DIFF_EQUAL: # Add nullPadding equality. diffs.append((self.DIFF_EQUAL, nullPadding)) patch.length1 += paddingLength patch.length2 += paddingLength elif paddingLength > len(diffs[-1][1]): # Grow last equality. extraLength = paddingLength - len(diffs[-1][1]) newText = diffs[-1][1] + nullPadding[:extraLength] diffs[-1] = (diffs[-1][0], newText) patch.length1 += extraLength patch.length2 += extraLength return nullPadding def patch_splitMax(self, patches): """Look through the patches and break up any which are longer than the maximum limit of the match algorithm. Intended to be called only from within patch_apply. Args: patches: Array of Patch objects. """ patch_size = self.Match_MaxBits if patch_size == 0: # Python has the option of not splitting strings due to its ability # to handle integers of arbitrary precision. return for x in xrange(len(patches)): if patches[x].length1 <= patch_size: continue bigpatch = patches[x] # Remove the big old patch. del patches[x] x -= 1 start1 = bigpatch.start1 start2 = bigpatch.start2 precontext = '' while len(bigpatch.diffs) != 0: # Create one of several smaller patches. patch = patch_obj() empty = True patch.start1 = start1 - len(precontext) patch.start2 = start2 - len(precontext) if precontext: patch.length1 = patch.length2 = len(precontext) patch.diffs.append((self.DIFF_EQUAL, precontext)) while (len(bigpatch.diffs) != 0 and patch.length1 < patch_size - self.Patch_Margin): (diff_type, diff_text) = bigpatch.diffs[0] if diff_type == self.DIFF_INSERT: # Insertions are harmless. patch.length2 += len(diff_text) start2 += len(diff_text) patch.diffs.append(bigpatch.diffs.pop(0)) empty = False elif (diff_type == self.DIFF_DELETE and len(patch.diffs) == 1 and patch.diffs[0][0] == self.DIFF_EQUAL and len(diff_text) > 2 * patch_size): # This is a large deletion. Let it pass in one chunk. patch.length1 += len(diff_text) start1 += len(diff_text) empty = False patch.diffs.append((diff_type, diff_text)) del bigpatch.diffs[0] else: # Deletion or equality. Only take as much as we can stomach. diff_text = diff_text[:patch_size - patch.length1 - self.Patch_Margin] patch.length1 += len(diff_text) start1 += len(diff_text) if diff_type == self.DIFF_EQUAL: patch.length2 += len(diff_text) start2 += len(diff_text) else: empty = False patch.diffs.append((diff_type, diff_text)) if diff_text == bigpatch.diffs[0][1]: del bigpatch.diffs[0] else: bigpatch.diffs[0] = (bigpatch.diffs[0][0], bigpatch.diffs[0][1][len(diff_text):]) # Compute the head context for the next patch. precontext = self.diff_text2(patch.diffs) precontext = precontext[-self.Patch_Margin:] # Append the end context for this patch. postcontext = self.diff_text1(bigpatch.diffs)[:self.Patch_Margin] if postcontext: patch.length1 += len(postcontext) patch.length2 += len(postcontext) if len(patch.diffs) != 0 and patch.diffs[-1][0] == self.DIFF_EQUAL: patch.diffs[-1] = (self.DIFF_EQUAL, patch.diffs[-1][1] + postcontext) else: patch.diffs.append((self.DIFF_EQUAL, postcontext)) if not empty: x += 1 patches.insert(x, patch) def patch_toText(self, patches): """Take a list of patches and return a textual representation. Args: patches: Array of Patch objects. Returns: Text representation of patches. """ text = [] for patch in patches: text.append(str(patch)) return "".join(text) def patch_fromText(self, textline): """Parse a textual representation of patches and return a list of patch objects. Args: textline: Text representation of patches. Returns: Array of Patch objects. Raises: ValueError: If invalid input. """ if type(textline) == unicode: # Patches should be composed of a subset of ascii chars, Unicode not # required. If this encode raises UnicodeEncodeError, patch is invalid. textline = textline.encode("ascii") patches = [] if not textline: return patches text = textline.split('\n') while len(text) != 0: m = re.match("^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@$", text[0]) if not m: raise ValueError("Invalid patch string: " + text[0]) patch = patch_obj() patches.append(patch) patch.start1 = int(m.group(1)) if m.group(2) == '': patch.start1 -= 1 patch.length1 = 1 elif m.group(2) == '0': patch.length1 = 0 else: patch.start1 -= 1 patch.length1 = int(m.group(2)) patch.start2 = int(m.group(3)) if m.group(4) == '': patch.start2 -= 1 patch.length2 = 1 elif m.group(4) == '0': patch.length2 = 0 else: patch.start2 -= 1 patch.length2 = int(m.group(4)) del text[0] while len(text) != 0: if text[0]: sign = text[0][0] else: sign = '' line = urllib.unquote(text[0][1:]) line = line.decode("utf-8") if sign == '+': # Insertion. patch.diffs.append((self.DIFF_INSERT, line)) elif sign == '-': # Deletion. patch.diffs.append((self.DIFF_DELETE, line)) elif sign == ' ': # Minor equality. patch.diffs.append((self.DIFF_EQUAL, line)) elif sign == '@': # Start of next patch. break elif sign == '': # Blank line? Whatever. pass else: # WTF? raise ValueError("Invalid patch mode: '%s'\n%s" % (sign, line)) del text[0] return patches class patch_obj: """Class representing one patch operation. """ def __init__(self): """Initializes with an empty list of diffs. """ self.diffs = [] self.start1 = None self.start2 = None self.length1 = 0 self.length2 = 0 def __str__(self): """Emmulate GNU diff's format. Header: @@ -382,8 +481,9 @@ Indicies are printed as 1-based, not 0-based. Returns: The GNU diff string. """ if self.length1 == 0: coords1 = str(self.start1) + ",0" elif self.length1 == 1: coords1 = str(self.start1 + 1) else: coords1 = str(self.start1 + 1) + "," + str(self.length1) if self.length2 == 0: coords2 = str(self.start2) + ",0" elif self.length2 == 1: coords2 = str(self.start2 + 1) else: coords2 = str(self.start2 + 1) + "," + str(self.length2) text = ["@@ -", coords1, " +", coords2, " @@\n"] # Escape the body of the patch with %xx notation. for (op, data) in self.diffs: if op == diff_match_patch.DIFF_INSERT: text.append("+") elif op == diff_match_patch.DIFF_DELETE: text.append("-") elif op == diff_match_patch.DIFF_EQUAL: text.append(" ") # High ascii will raise UnicodeDecodeError. Use Unicode instead. data = data.encode("utf-8") text.append(urllib.quote(data, "!~*'();/?:@&=+$,# ") + "\n") return "".join(text)
apache-2.0
chongtianfeiyu/kbengine
kbe/src/lib/python/Tools/freeze/makefreeze.py
37
2706
import marshal import bkfile # Write a file containing frozen code for the modules in the dictionary. header = """ #include "Python.h" static struct _frozen _PyImport_FrozenModules[] = { """ trailer = """\ {0, 0, 0} /* sentinel */ }; """ # if __debug__ == 0 (i.e. -O option given), set Py_OptimizeFlag in frozen app. default_entry_point = """ int main(int argc, char **argv) { extern int Py_FrozenMain(int, char **); """ + ((not __debug__ and """ Py_OptimizeFlag++; """) or "") + """ PyImport_FrozenModules = _PyImport_FrozenModules; return Py_FrozenMain(argc, argv); } """ def makefreeze(base, dict, debug=0, entry_point=None, fail_import=()): if entry_point is None: entry_point = default_entry_point done = [] files = [] mods = sorted(dict.keys()) for mod in mods: m = dict[mod] mangled = "__".join(mod.split(".")) if m.__code__: file = 'M_' + mangled + '.c' outfp = bkfile.open(base + file, 'w') files.append(file) if debug: print("freezing", mod, "...") str = marshal.dumps(m.__code__) size = len(str) if m.__path__: # Indicate package by negative size size = -size done.append((mod, mangled, size)) writecode(outfp, mangled, str) outfp.close() if debug: print("generating table of frozen modules") outfp = bkfile.open(base + 'frozen.c', 'w') for mod, mangled, size in done: outfp.write('extern unsigned char M_%s[];\n' % mangled) outfp.write(header) for mod, mangled, size in done: outfp.write('\t{"%s", M_%s, %d},\n' % (mod, mangled, size)) outfp.write('\n') # The following modules have a NULL code pointer, indicating # that the frozen program should not search for them on the host # system. Importing them will *always* raise an ImportError. # The zero value size is never used. for mod in fail_import: outfp.write('\t{"%s", NULL, 0},\n' % (mod,)) outfp.write(trailer) outfp.write(entry_point) outfp.close() return files # Write a C initializer for a module containing the frozen python code. # The array is called M_<mod>. def writecode(outfp, mod, str): outfp.write('unsigned char M_%s[] = {' % mod) for i in range(0, len(str), 16): outfp.write('\n\t') for c in bytes(str[i:i+16]): outfp.write('%d,' % c) outfp.write('\n};\n') ## def writecode(outfp, mod, str): ## outfp.write('unsigned char M_%s[%d] = "%s";\n' % (mod, len(str), ## '\\"'.join(map(lambda s: repr(s)[1:-1], str.split('"')))))
lgpl-3.0
samuto/UnityOpenCV
opencv/samples/swig_python/squares.py
6
5931
#!/usr/bin/python # # The full "Square Detector" program. # It loads several images subsequentally and tries to find squares in # each image # from opencv.cv import * from opencv.highgui import * from math import sqrt thresh = 50; img = None; img0 = None; storage = None; wndname = "Square Detection Demo"; def angle( pt1, pt2, pt0 ): dx1 = pt1.x - pt0.x; dy1 = pt1.y - pt0.y; dx2 = pt2.x - pt0.x; dy2 = pt2.y - pt0.y; return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10); def findSquares4( img, storage ): N = 11; sz = cvSize( img.width & -2, img.height & -2 ); timg = cvCloneImage( img ); # make a copy of input image gray = cvCreateImage( sz, 8, 1 ); pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 3 ); # create empty sequence that will contain points - # 4 points per square (the square's vertices) squares = cvCreateSeq( 0, sizeof_CvSeq, sizeof_CvPoint, storage ); squares = CvSeq_CvPoint.cast( squares ) # select the maximum ROI in the image # with the width and height divisible by 2 subimage = cvGetSubRect( timg, cvRect( 0, 0, sz.width, sz.height )) # down-scale and upscale the image to filter out the noise cvPyrDown( subimage, pyr, 7 ); cvPyrUp( pyr, subimage, 7 ); tgray = cvCreateImage( sz, 8, 1 ); # find squares in every color plane of the image for c in range(3): # extract the c-th color plane channels = [None, None, None] channels[c] = tgray cvSplit( subimage, channels[0], channels[1], channels[2], None ) for l in range(N): # hack: use Canny instead of zero threshold level. # Canny helps to catch squares with gradient shading if( l == 0 ): # apply Canny. Take the upper threshold from slider # and set the lower to 0 (which forces edges merging) cvCanny( tgray, gray, 0, thresh, 5 ); # dilate canny output to remove potential # holes between edge segments cvDilate( gray, gray, None, 1 ); else: # apply threshold if l!=0: # tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0 cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY ); # find contours and store them all as a list count, contours = cvFindContours( gray, storage, sizeof_CvContour, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) ); if not contours: continue # test each contour for contour in contours.hrange(): # approximate contour with accuracy proportional # to the contour perimeter result = cvApproxPoly( contour, sizeof_CvContour, storage, CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 ); # square contours should have 4 vertices after approximation # relatively large area (to filter out noisy contours) # and be convex. # Note: absolute value of an area is used because # area may be positive or negative - in accordance with the # contour orientation if( result.total == 4 and abs(cvContourArea(result)) > 1000 and cvCheckContourConvexity(result) ): s = 0; for i in range(5): # find minimum angle between joint # edges (maximum of cosine) if( i >= 2 ): t = abs(angle( result[i], result[i-2], result[i-1])) if s<t: s=t # if cosines of all angles are small # (all angles are ~90 degree) then write quandrange # vertices to resultant sequence if( s < 0.3 ): for i in range(4): squares.append( result[i] ) return squares; # the function draws all the squares in the image def drawSquares( img, squares ): cpy = cvCloneImage( img ); # read 4 sequence elements at a time (all vertices of a square) i=0 while i<squares.total: pt = [] # read 4 vertices pt.append( squares[i] ) pt.append( squares[i+1] ) pt.append( squares[i+2] ) pt.append( squares[i+3] ) # draw the square as a closed polyline cvPolyLine( cpy, [pt], 1, CV_RGB(0,255,0), 3, CV_AA, 0 ); i+=4 # show the resultant image cvShowImage( wndname, cpy ); def on_trackbar( a ): if( img ): drawSquares( img, findSquares4( img, storage ) ); names = ["../c/pic1.png", "../c/pic2.png", "../c/pic3.png", "../c/pic4.png", "../c/pic5.png", "../c/pic6.png" ]; if __name__ == "__main__": # create memory storage that will contain all the dynamic data storage = cvCreateMemStorage(0); for name in names: img0 = cvLoadImage( name, 1 ); if not img0: print "Couldn't load %s" % name continue; img = cvCloneImage( img0 ); # create window and a trackbar (slider) with parent "image" and set callback # (the slider regulates upper threshold, passed to Canny edge detector) cvNamedWindow( wndname, 1 ); cvCreateTrackbar( "canny thresh", wndname, thresh, 1000, on_trackbar ); # force the image processing on_trackbar(0); # wait for key. # Also the function cvWaitKey takes care of event processing c = cvWaitKey(0); # clear memory storage - reset free space position cvClearMemStorage( storage ); if( c == '\x1b' ): break; cvDestroyWindow( wndname );
gpl-3.0
sanjeevtripurari/hue
desktop/libs/hadoop/src/hadoop/fs/upload.py
26
5786
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Classes for a custom upload handler to stream into HDFS. Note that since our middlewares inspect request.POST, we cannot inject a custom handler into a specific view. Therefore we always use the HDFSfileUploadHandler, which is triggered by a magic prefix ("HDFS") in the field name. See http://docs.djangoproject.com/en/1.2/topics/http/file-uploads/ """ import errno import logging import time from django.core.files.uploadhandler import FileUploadHandler, StopFutureHandlers, StopUpload from django.utils.translation import ugettext as _ import hadoop.cluster from hadoop.conf import UPLOAD_CHUNK_SIZE from hadoop.fs.exceptions import WebHdfsException LOG = logging.getLogger(__name__) UPLOAD_SUBDIR = 'hue-uploads' class HDFSerror(Exception): pass class HDFStemporaryUploadedFile(object): """ A temporary HDFS file to store upload data. This class does not have any file read methods. """ def __init__(self, request, name, destination): self.name = name self.size = None self._do_cleanup = False try: self._fs = request.fs except AttributeError: self._fs = hadoop.cluster.get_hdfs() # Don't want to handle this upload if we don't have an HDFS if not self._fs: raise HDFSerror("No HDFS found") # We want to set the user to be the user doing the upload self._fs.setuser(request.user.username) self._path = self._fs.mkswap(name, suffix='tmp', basedir=destination) if self._fs.exists(self._path): self._fs._delete(self._path) self._file = self._fs.open(self._path, 'w') self._do_cleanup = True def __del__(self): if self._do_cleanup: # Do not do cleanup here. It's hopeless. The self._fs threadlocal states # are going to be all wrong. LOG.error("Left-over upload file is not cleaned up: %s" % (self._path,)) def get_temp_path(self): return self._path def finish_upload(self, size): try: self.size = size self.close() except Exception, ex: LOG.exception('Error uploading file to %s' % (self._path,)) raise def remove(self): try: self._fs.remove(self._path, True) self._do_cleanup = False except IOError, ex: if ex.errno != errno.ENOENT: LOG.exception('Failed to remove temporary upload file "%s". ' 'Please cleanup manually: %s' % (self._path, ex)) def write(self, data): self._file.write(data) def flush(self): self._file.flush() def close(self): self._file.close() class HDFSfileUploadHandler(FileUploadHandler): """ Handle file upload by storing data in a temp HDFS file. This handler is triggered by any upload field whose name starts with "HDFS" (case insensitive). In practice, the middlewares (which access the request.REQUEST/POST/FILES objects) triggers the upload before reaching the view in case of permissions error. Read about Django uploading documentation. This might trigger the upload before executing the hue auth middleware. HDFS destination permissions will be doing the checks. """ def __init__(self, request): FileUploadHandler.__init__(self, request) self._file = None self._starttime = 0 self._activated = False self._destination = request.GET.get('dest', None) # GET param avoids infinite looping self.request = request # Need to directly modify FileUploadHandler.chunk_size FileUploadHandler.chunk_size = UPLOAD_CHUNK_SIZE.get() def new_file(self, field_name, file_name, *args, **kwargs): # Detect "HDFS" in the field name. if field_name.upper().startswith('HDFS'): try: self._file = HDFStemporaryUploadedFile(self.request, file_name, self._destination) LOG.debug('Upload attempt to %s' % (self._file.get_temp_path(),)) self._activated = True self._starttime = time.time() except Exception, ex: LOG.error("Not using HDFS upload handler: %s" % (ex,)) self.request.META['upload_failed'] = ex raise StopFutureHandlers() def receive_data_chunk(self, raw_data, start): if not self._activated: if self.request.META.get('PATH_INFO').startswith('/filebrowser') and self.request.META.get('PATH_INFO') != '/filebrowser/upload/archive': raise StopUpload() return raw_data try: self._file.write(raw_data) self._file.flush() return None except IOError: LOG.exception('Error storing upload data in temporary file "%s"' % (self._file.get_temp_path(),)) raise StopUpload() def file_complete(self, file_size): if not self._activated: return None try: self._file.finish_upload(file_size) except IOError: LOG.exception('Error closing uploaded temporary file "%s"' % (self._file.get_temp_path(),)) raise elapsed = time.time() - self._starttime LOG.debug('Uploaded %s bytes to HDFS in %s seconds' % (file_size, elapsed)) return self._file
apache-2.0
sulaweyo/torrentflux-b4rt-php7
html/bin/clients/mainline/BTL/psapi.py
11
2555
# Windows PSAPI function wrappers. # http://msdn.microsoft.com/library/default.asp?url=/library/en-us/dllproc/base/psapi_functions.asp # # The contents of this file are subject to the Python Software Foundation # License Version 2.3 (the License). You may not copy or use this file, in # either source code or executable form, except in compliance with the License. # You may obtain a copy of the License at http://www.python.org/license. # # Software distributed under the License is distributed on an AS IS basis, # WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License # for the specific language governing rights and limitations under the # License. # # by Greg Hazel import ctypes DWORD = ctypes.c_ulong SIZE_T = ctypes.c_ulong psapi = ctypes.windll.psapi Kernel32 = ctypes.windll.Kernel32 class PROCESS_MEMORY_COUNTERS(ctypes.Structure): _fields_ = [("cb", DWORD), ("PageFaultCount", DWORD), ("PeakWorkingSetSize", SIZE_T), ("WorkingSetSize", SIZE_T), ("QuotaPeakPagedPoolUsage", SIZE_T), ("QuotaPagedPoolUsage", SIZE_T), ("QuotaPeakNonPagedPoolUsage", SIZE_T), ("QuotaNonPagedPoolUsage", SIZE_T), ("PagefileUsage", SIZE_T), ("PeakPagefileUsage", SIZE_T), ] class PROCESS_MEMORY_COUNTERS_EX(ctypes.Structure): _fields_ = [("cb", DWORD), ("PageFaultCount", DWORD), ("PeakWorkingSetSize", SIZE_T), ("WorkingSetSize", SIZE_T), ("QuotaPeakPagedPoolUsage", SIZE_T), ("QuotaPagedPoolUsage", SIZE_T), ("QuotaPeakNonPagedPoolUsage", SIZE_T), ("QuotaNonPagedPoolUsage", SIZE_T), ("PagefileUsage", SIZE_T), ("PeakPagefileUsage", SIZE_T), ("PrivateUsage", SIZE_T), ] def GetCurrentProcess(): return Kernel32.GetCurrentProcess() def GetProcessMemoryInfo(handle): psmemCounters = PROCESS_MEMORY_COUNTERS_EX() cb = DWORD(ctypes.sizeof(psmemCounters)) b = psapi.GetProcessMemoryInfo(handle, ctypes.byref(psmemCounters), cb) if not b: psmemCounters = PROCESS_MEMORY_COUNTERS() cb = DWORD(ctypes.sizeof(psmemCounters)) b = psapi.GetProcessMemoryInfo(handle, ctypes.byref(psmemCounters), cb) if not b: raise ctypes.WinError() d = {} for k, t in psmemCounters._fields_: d[k] = getattr(psmemCounters, k) return d
gpl-2.0
Denniskamau/Twitter-Sentiment-Analysis
stream.py
1
3946
import re import json import tweepy from config import * from tweepy import OAuthHandler from textblob import TextBlob class TwitterClient(object): ''' Generic Twitter Class for sentiment analysis. ''' def __init__(self): ''' Class constructor or initialization method. ''' # attempt authentication try: # create OAuthHandler object self.auth = OAuthHandler(consumer_key, consumer_secret) # set access token and secret self.auth.set_access_token(access_token, access_token_secret) # create tweepy API object to fetch tweets self.api = tweepy.API(self.auth) except: print("Error: Authentication Failed") def clean_tweet(self, tweet): ''' Utility function to clean tweet text by removing links, special characters using simple regex statements. ''' return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split()) def get_tweet_sentiment(self, tweet): ''' Utility function to classify sentiment of passed tweet using textblob's sentiment method ''' # create TextBlob object of passed tweet text analysis = TextBlob(self.clean_tweet(tweet)) # set sentiment if analysis.sentiment.polarity > 0: return 'positive' elif analysis.sentiment.polarity == 0: return 'neutral' else: return 'negative' def get_tweets(self, query, count = 10): ''' Main function to fetch tweets and parse them. ''' # empty list to store parsed tweets tweets = [] try: # call twitter api to fetch tweets fetched_tweets = self.api.search(q = query, count = count) # parsing tweets one by one for tweet in fetched_tweets: # empty dictionary to store required params of a tweet parsed_tweet = {} # saving text of tweet parsed_tweet['text'] = tweet.text # saving sentiment of tweet parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text) # appending parsed tweet to tweets list if tweet.retweet_count > 0: # if tweet has retweets, ensure that it is appended only once if parsed_tweet not in tweets: tweets.append(parsed_tweet) else: tweets.append(parsed_tweet) # return parsed tweets return tweets except tweepy.TweepError as e: # print error (if any) print("Error : " + str(e)) def main(): # creating object of TwitterClient Class api = TwitterClient() # calling function to get tweets tweets = api.get_tweets(query = input(" "), count = 200) # picking positive tweets from tweets ptweets = [tweet for tweet in tweets if tweet['sentiment'] == 'positive'] # percentage of positive tweets print("Positive tweets percentage: {} %".format(100*len(ptweets)/len(tweets))) # picking negative tweets from tweets ntweets = [tweet for tweet in tweets if tweet['sentiment'] == 'negative'] # percentage of negative tweets print("Negative tweets percentage: {} %".format(100*len(ntweets)/len(tweets))) # percentage of neutral tweets # print("Neutral tweets percentage: {} % ".format(100*len(tweets - ntweets - ptweets)/len(tweets))) # printing first 5 positive tweets print("\n\nPositive tweets:") for tweet in ptweets[:10]: print(tweet['text']) # printing first 5 negative tweets print("\n\nNegative tweets:") for tweet in ntweets[:10]: print(tweet['text']) if __name__ == "__main__": # calling main function main()
mit
VasuAgrawal/tartanHacks2015
site/flask/lib/python2.7/site-packages/whoosh/filedb/compound.py
87
11090
# Copyright 2011 Matt Chaput. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and documentation are # those of the authors and should not be interpreted as representing official # policies, either expressed or implied, of Matt Chaput. import errno import os import sys from threading import Lock from shutil import copyfileobj try: import mmap except ImportError: mmap = None from whoosh.compat import BytesIO, memoryview_ from whoosh.filedb.structfile import BufferFile, StructFile from whoosh.filedb.filestore import FileStorage, StorageError from whoosh.system import emptybytes from whoosh.util import random_name class CompoundStorage(FileStorage): readonly = True def __init__(self, dbfile, use_mmap=True, basepos=0): self._file = dbfile self.is_closed = False # Seek to the end to get total file size (to check if mmap is OK) dbfile.seek(0, os.SEEK_END) filesize = self._file.tell() dbfile.seek(basepos) self._diroffset = self._file.read_long() self._dirlength = self._file.read_int() self._file.seek(self._diroffset) self._dir = self._file.read_pickle() self._options = self._file.read_pickle() self._locks = {} self._source = None use_mmap = ( use_mmap and hasattr(self._file, "fileno") # check file is a real file and filesize < sys.maxsize # check fit on 32-bit Python ) if mmap and use_mmap: # Try to open the entire segment as a memory-mapped object try: fileno = self._file.fileno() self._source = mmap.mmap(fileno, 0, access=mmap.ACCESS_READ) except (mmap.error, OSError): e = sys.exc_info()[1] # If we got an error because there wasn't enough memory to # open the map, ignore it and fall through, we'll just use the # (slower) "sub-file" implementation if e.errno == errno.ENOMEM: pass else: raise else: # If that worked, we can close the file handle we were given self._file.close() self._file = None def __repr__(self): return "<%s (%s)>" % (self.__class__.__name__, self._name) def close(self): if self.is_closed: raise Exception("Already closed") self.is_closed = True if self._source: try: self._source.close() except BufferError: del self._source if self._file: self._file.close() def range(self, name): try: fileinfo = self._dir[name] except KeyError: raise NameError("Unknown file %r" % (name,)) return fileinfo["offset"], fileinfo["length"] def open_file(self, name, *args, **kwargs): if self.is_closed: raise StorageError("Storage was closed") offset, length = self.range(name) if self._source: # Create a memoryview/buffer from the mmap buf = memoryview_(self._source, offset, length) f = BufferFile(buf, name=name) elif hasattr(self._file, "subset"): f = self._file.subset(offset, length, name=name) else: f = StructFile(SubFile(self._file, offset, length), name=name) return f def list(self): return list(self._dir.keys()) def file_exists(self, name): return name in self._dir def file_length(self, name): info = self._dir[name] return info["length"] def file_modified(self, name): info = self._dir[name] return info["modified"] def lock(self, name): if name not in self._locks: self._locks[name] = Lock() return self._locks[name] @staticmethod def assemble(dbfile, store, names, **options): assert names, names directory = {} basepos = dbfile.tell() dbfile.write_long(0) # Directory position dbfile.write_int(0) # Directory length # Copy the files into the compound file for name in names: if name.endswith(".toc") or name.endswith(".seg"): raise Exception(name) for name in names: offset = dbfile.tell() length = store.file_length(name) modified = store.file_modified(name) directory[name] = {"offset": offset, "length": length, "modified": modified} f = store.open_file(name) copyfileobj(f, dbfile) f.close() CompoundStorage.write_dir(dbfile, basepos, directory, options) @staticmethod def write_dir(dbfile, basepos, directory, options=None): options = options or {} dirpos = dbfile.tell() # Remember the start of the directory dbfile.write_pickle(directory) # Write the directory dbfile.write_pickle(options) endpos = dbfile.tell() # Remember the end of the directory dbfile.flush() dbfile.seek(basepos) # Seek back to the start dbfile.write_long(dirpos) # Directory position dbfile.write_int(endpos - dirpos) # Directory length dbfile.close() class SubFile(object): def __init__(self, parentfile, offset, length, name=None): self._file = parentfile self._offset = offset self._length = length self._end = offset + length self._pos = 0 self.name = name self.closed = False def close(self): self.closed = True def subset(self, position, length, name=None): start = self._offset + position end = start + length name = name or self.name assert self._offset >= start >= self._end assert self._offset >= end >= self._end return SubFile(self._file, self._offset + position, length, name=name) def read(self, size=None): if size is None: size = self._length - self._pos else: size = min(size, self._length - self._pos) if size < 0: size = 0 if size > 0: self._file.seek(self._offset + self._pos) self._pos += size return self._file.read(size) else: return emptybytes def readline(self): maxsize = self._length - self._pos self._file.seek(self._offset + self._pos) data = self._file.readline() if len(data) > maxsize: data = data[:maxsize] self._pos += len(data) return data def seek(self, where, whence=0): if whence == 0: # Absolute pos = where elif whence == 1: # Relative pos = self._pos + where elif whence == 2: # From end pos = self._length - where else: raise ValueError self._pos = pos def tell(self): return self._pos class CompoundWriter(object): def __init__(self, tempstorage, buffersize=32 * 1024): assert isinstance(buffersize, int) self._tempstorage = tempstorage self._tempname = "%s.ctmp" % random_name() self._temp = tempstorage.create_file(self._tempname, mode="w+b") self._buffersize = buffersize self._streams = {} def create_file(self, name): ss = self.SubStream(self._temp, self._buffersize) self._streams[name] = ss return StructFile(ss) def _readback(self): temp = self._temp for name, substream in self._streams.items(): substream.close() def gen(): for f, offset, length in substream.blocks: if f is None: f = temp f.seek(offset) yield f.read(length) yield (name, gen) temp.close() self._tempstorage.delete_file(self._tempname) def save_as_compound(self, dbfile): basepos = dbfile.tell() dbfile.write_long(0) # Directory offset dbfile.write_int(0) # Directory length directory = {} for name, blocks in self._readback(): filestart = dbfile.tell() for block in blocks(): dbfile.write(block) directory[name] = {"offset": filestart, "length": dbfile.tell() - filestart} CompoundStorage.write_dir(dbfile, basepos, directory) def save_as_files(self, storage, name_fn): for name, blocks in self._readback(): f = storage.create_file(name_fn(name)) for block in blocks(): f.write(block) f.close() class SubStream(object): def __init__(self, dbfile, buffersize): self._dbfile = dbfile self._buffersize = buffersize self._buffer = BytesIO() self.blocks = [] def tell(self): return sum(b[2] for b in self.blocks) + self._buffer.tell() def write(self, inbytes): bio = self._buffer buflen = bio.tell() length = buflen + len(inbytes) if length >= self._buffersize: offset = self._dbfile.tell() self._dbfile.write(bio.getvalue()[:buflen]) self._dbfile.write(inbytes) self.blocks.append((None, offset, length)) self._buffer.seek(0) else: bio.write(inbytes) def close(self): bio = self._buffer length = bio.tell() if length: self.blocks.append((bio, 0, length))
mit
tempbottle/kbengine
kbe/res/scripts/common/Lib/unittest/test/test_assertions.py
82
16470
import datetime import warnings import weakref import unittest from itertools import product class Test_Assertions(unittest.TestCase): def test_AlmostEqual(self): self.assertAlmostEqual(1.00000001, 1.0) self.assertNotAlmostEqual(1.0000001, 1.0) self.assertRaises(self.failureException, self.assertAlmostEqual, 1.0000001, 1.0) self.assertRaises(self.failureException, self.assertNotAlmostEqual, 1.00000001, 1.0) self.assertAlmostEqual(1.1, 1.0, places=0) self.assertRaises(self.failureException, self.assertAlmostEqual, 1.1, 1.0, places=1) self.assertAlmostEqual(0, .1+.1j, places=0) self.assertNotAlmostEqual(0, .1+.1j, places=1) self.assertRaises(self.failureException, self.assertAlmostEqual, 0, .1+.1j, places=1) self.assertRaises(self.failureException, self.assertNotAlmostEqual, 0, .1+.1j, places=0) self.assertAlmostEqual(float('inf'), float('inf')) self.assertRaises(self.failureException, self.assertNotAlmostEqual, float('inf'), float('inf')) def test_AmostEqualWithDelta(self): self.assertAlmostEqual(1.1, 1.0, delta=0.5) self.assertAlmostEqual(1.0, 1.1, delta=0.5) self.assertNotAlmostEqual(1.1, 1.0, delta=0.05) self.assertNotAlmostEqual(1.0, 1.1, delta=0.05) self.assertAlmostEqual(1.0, 1.0, delta=0.5) self.assertRaises(self.failureException, self.assertNotAlmostEqual, 1.0, 1.0, delta=0.5) self.assertRaises(self.failureException, self.assertAlmostEqual, 1.1, 1.0, delta=0.05) self.assertRaises(self.failureException, self.assertNotAlmostEqual, 1.1, 1.0, delta=0.5) self.assertRaises(TypeError, self.assertAlmostEqual, 1.1, 1.0, places=2, delta=2) self.assertRaises(TypeError, self.assertNotAlmostEqual, 1.1, 1.0, places=2, delta=2) first = datetime.datetime.now() second = first + datetime.timedelta(seconds=10) self.assertAlmostEqual(first, second, delta=datetime.timedelta(seconds=20)) self.assertNotAlmostEqual(first, second, delta=datetime.timedelta(seconds=5)) def test_assertRaises(self): def _raise(e): raise e self.assertRaises(KeyError, _raise, KeyError) self.assertRaises(KeyError, _raise, KeyError("key")) try: self.assertRaises(KeyError, lambda: None) except self.failureException as e: self.assertIn("KeyError not raised", str(e)) else: self.fail("assertRaises() didn't fail") try: self.assertRaises(KeyError, _raise, ValueError) except ValueError: pass else: self.fail("assertRaises() didn't let exception pass through") with self.assertRaises(KeyError) as cm: try: raise KeyError except Exception as e: exc = e raise self.assertIs(cm.exception, exc) with self.assertRaises(KeyError): raise KeyError("key") try: with self.assertRaises(KeyError): pass except self.failureException as e: self.assertIn("KeyError not raised", str(e)) else: self.fail("assertRaises() didn't fail") try: with self.assertRaises(KeyError): raise ValueError except ValueError: pass else: self.fail("assertRaises() didn't let exception pass through") def test_assertRaises_frames_survival(self): # Issue #9815: assertRaises should avoid keeping local variables # in a traceback alive. class A: pass wr = None class Foo(unittest.TestCase): def foo(self): nonlocal wr a = A() wr = weakref.ref(a) try: raise IOError except IOError: raise ValueError def test_functional(self): self.assertRaises(ValueError, self.foo) def test_with(self): with self.assertRaises(ValueError): self.foo() Foo("test_functional").run() self.assertIsNone(wr()) Foo("test_with").run() self.assertIsNone(wr()) def testAssertNotRegex(self): self.assertNotRegex('Ala ma kota', r'r+') try: self.assertNotRegex('Ala ma kota', r'k.t', 'Message') except self.failureException as e: self.assertIn("'kot'", e.args[0]) self.assertIn('Message', e.args[0]) else: self.fail('assertNotRegex should have failed.') class TestLongMessage(unittest.TestCase): """Test that the individual asserts honour longMessage. This actually tests all the message behaviour for asserts that use longMessage.""" def setUp(self): class TestableTestFalse(unittest.TestCase): longMessage = False failureException = self.failureException def testTest(self): pass class TestableTestTrue(unittest.TestCase): longMessage = True failureException = self.failureException def testTest(self): pass self.testableTrue = TestableTestTrue('testTest') self.testableFalse = TestableTestFalse('testTest') def testDefault(self): self.assertTrue(unittest.TestCase.longMessage) def test_formatMsg(self): self.assertEqual(self.testableFalse._formatMessage(None, "foo"), "foo") self.assertEqual(self.testableFalse._formatMessage("foo", "bar"), "foo") self.assertEqual(self.testableTrue._formatMessage(None, "foo"), "foo") self.assertEqual(self.testableTrue._formatMessage("foo", "bar"), "bar : foo") # This blows up if _formatMessage uses string concatenation self.testableTrue._formatMessage(object(), 'foo') def test_formatMessage_unicode_error(self): one = ''.join(chr(i) for i in range(255)) # this used to cause a UnicodeDecodeError constructing msg self.testableTrue._formatMessage(one, '\uFFFD') def assertMessages(self, methodName, args, errors): """ Check that methodName(*args) raises the correct error messages. errors should be a list of 4 regex that match the error when: 1) longMessage = False and no msg passed; 2) longMessage = False and msg passed; 3) longMessage = True and no msg passed; 4) longMessage = True and msg passed; """ def getMethod(i): useTestableFalse = i < 2 if useTestableFalse: test = self.testableFalse else: test = self.testableTrue return getattr(test, methodName) for i, expected_regex in enumerate(errors): testMethod = getMethod(i) kwargs = {} withMsg = i % 2 if withMsg: kwargs = {"msg": "oops"} with self.assertRaisesRegex(self.failureException, expected_regex=expected_regex): testMethod(*args, **kwargs) def testAssertTrue(self): self.assertMessages('assertTrue', (False,), ["^False is not true$", "^oops$", "^False is not true$", "^False is not true : oops$"]) def testAssertFalse(self): self.assertMessages('assertFalse', (True,), ["^True is not false$", "^oops$", "^True is not false$", "^True is not false : oops$"]) def testNotEqual(self): self.assertMessages('assertNotEqual', (1, 1), ["^1 == 1$", "^oops$", "^1 == 1$", "^1 == 1 : oops$"]) def testAlmostEqual(self): self.assertMessages('assertAlmostEqual', (1, 2), ["^1 != 2 within 7 places$", "^oops$", "^1 != 2 within 7 places$", "^1 != 2 within 7 places : oops$"]) def testNotAlmostEqual(self): self.assertMessages('assertNotAlmostEqual', (1, 1), ["^1 == 1 within 7 places$", "^oops$", "^1 == 1 within 7 places$", "^1 == 1 within 7 places : oops$"]) def test_baseAssertEqual(self): self.assertMessages('_baseAssertEqual', (1, 2), ["^1 != 2$", "^oops$", "^1 != 2$", "^1 != 2 : oops$"]) def testAssertSequenceEqual(self): # Error messages are multiline so not testing on full message # assertTupleEqual and assertListEqual delegate to this method self.assertMessages('assertSequenceEqual', ([], [None]), ["\+ \[None\]$", "^oops$", r"\+ \[None\]$", r"\+ \[None\] : oops$"]) def testAssertSetEqual(self): self.assertMessages('assertSetEqual', (set(), set([None])), ["None$", "^oops$", "None$", "None : oops$"]) def testAssertIn(self): self.assertMessages('assertIn', (None, []), ['^None not found in \[\]$', "^oops$", '^None not found in \[\]$', '^None not found in \[\] : oops$']) def testAssertNotIn(self): self.assertMessages('assertNotIn', (None, [None]), ['^None unexpectedly found in \[None\]$', "^oops$", '^None unexpectedly found in \[None\]$', '^None unexpectedly found in \[None\] : oops$']) def testAssertDictEqual(self): self.assertMessages('assertDictEqual', ({}, {'key': 'value'}), [r"\+ \{'key': 'value'\}$", "^oops$", "\+ \{'key': 'value'\}$", "\+ \{'key': 'value'\} : oops$"]) def testAssertDictContainsSubset(self): with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) self.assertMessages('assertDictContainsSubset', ({'key': 'value'}, {}), ["^Missing: 'key'$", "^oops$", "^Missing: 'key'$", "^Missing: 'key' : oops$"]) def testAssertMultiLineEqual(self): self.assertMessages('assertMultiLineEqual', ("", "foo"), [r"\+ foo$", "^oops$", r"\+ foo$", r"\+ foo : oops$"]) def testAssertLess(self): self.assertMessages('assertLess', (2, 1), ["^2 not less than 1$", "^oops$", "^2 not less than 1$", "^2 not less than 1 : oops$"]) def testAssertLessEqual(self): self.assertMessages('assertLessEqual', (2, 1), ["^2 not less than or equal to 1$", "^oops$", "^2 not less than or equal to 1$", "^2 not less than or equal to 1 : oops$"]) def testAssertGreater(self): self.assertMessages('assertGreater', (1, 2), ["^1 not greater than 2$", "^oops$", "^1 not greater than 2$", "^1 not greater than 2 : oops$"]) def testAssertGreaterEqual(self): self.assertMessages('assertGreaterEqual', (1, 2), ["^1 not greater than or equal to 2$", "^oops$", "^1 not greater than or equal to 2$", "^1 not greater than or equal to 2 : oops$"]) def testAssertIsNone(self): self.assertMessages('assertIsNone', ('not None',), ["^'not None' is not None$", "^oops$", "^'not None' is not None$", "^'not None' is not None : oops$"]) def testAssertIsNotNone(self): self.assertMessages('assertIsNotNone', (None,), ["^unexpectedly None$", "^oops$", "^unexpectedly None$", "^unexpectedly None : oops$"]) def testAssertIs(self): self.assertMessages('assertIs', (None, 'foo'), ["^None is not 'foo'$", "^oops$", "^None is not 'foo'$", "^None is not 'foo' : oops$"]) def testAssertIsNot(self): self.assertMessages('assertIsNot', (None, None), ["^unexpectedly identical: None$", "^oops$", "^unexpectedly identical: None$", "^unexpectedly identical: None : oops$"]) def assertMessagesCM(self, methodName, args, func, errors): """ Check that the correct error messages are raised while executing: with method(*args): func() *errors* should be a list of 4 regex that match the error when: 1) longMessage = False and no msg passed; 2) longMessage = False and msg passed; 3) longMessage = True and no msg passed; 4) longMessage = True and msg passed; """ p = product((self.testableFalse, self.testableTrue), ({}, {"msg": "oops"})) for (cls, kwargs), err in zip(p, errors): method = getattr(cls, methodName) with self.assertRaisesRegex(cls.failureException, err): with method(*args, **kwargs) as cm: func() def testAssertRaises(self): self.assertMessagesCM('assertRaises', (TypeError,), lambda: None, ['^TypeError not raised$', '^oops$', '^TypeError not raised$', '^TypeError not raised : oops$']) def testAssertRaisesRegex(self): # test error not raised self.assertMessagesCM('assertRaisesRegex', (TypeError, 'unused regex'), lambda: None, ['^TypeError not raised$', '^oops$', '^TypeError not raised$', '^TypeError not raised : oops$']) # test error raised but with wrong message def raise_wrong_message(): raise TypeError('foo') self.assertMessagesCM('assertRaisesRegex', (TypeError, 'regex'), raise_wrong_message, ['^"regex" does not match "foo"$', '^oops$', '^"regex" does not match "foo"$', '^"regex" does not match "foo" : oops$']) def testAssertWarns(self): self.assertMessagesCM('assertWarns', (UserWarning,), lambda: None, ['^UserWarning not triggered$', '^oops$', '^UserWarning not triggered$', '^UserWarning not triggered : oops$']) def testAssertWarnsRegex(self): # test error not raised self.assertMessagesCM('assertWarnsRegex', (UserWarning, 'unused regex'), lambda: None, ['^UserWarning not triggered$', '^oops$', '^UserWarning not triggered$', '^UserWarning not triggered : oops$']) # test warning raised but with wrong message def raise_wrong_message(): warnings.warn('foo') self.assertMessagesCM('assertWarnsRegex', (UserWarning, 'regex'), raise_wrong_message, ['^"regex" does not match "foo"$', '^oops$', '^"regex" does not match "foo"$', '^"regex" does not match "foo" : oops$']) if __name__ == "__main__": unittest.main()
lgpl-3.0
slayerjain/servo
tests/wpt/css-tests/css21_dev/xhtml1print/reference/support/fonts/makegsubfonts.py
1616
14125
import os import textwrap from xml.etree import ElementTree from fontTools.ttLib import TTFont, newTable from fontTools.misc.psCharStrings import T2CharString from fontTools.ttLib.tables.otTables import GSUB,\ ScriptList, ScriptRecord, Script, DefaultLangSys,\ FeatureList, FeatureRecord, Feature,\ LookupList, Lookup, AlternateSubst, SingleSubst # paths directory = os.path.dirname(__file__) shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx") shellTempPath = os.path.join(directory, "gsubtest-shell.otf") featureList = os.path.join(directory, "gsubtest-features.txt") javascriptData = os.path.join(directory, "gsubtest-features.js") outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d") baseCodepoint = 0xe000 # ------- # Features # ------- f = open(featureList, "rb") text = f.read() f.close() mapping = [] for line in text.splitlines(): line = line.strip() if not line: continue if line.startswith("#"): continue # parse values = line.split("\t") tag = values.pop(0) mapping.append(tag); # -------- # Outlines # -------- def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None): charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs) charStringsIndex.append(charString) glyphID = len(topDict.charset) charStrings.charStrings[glyphName] = glyphID topDict.charset.append(glyphName) def makeLookup1(): # make a variation of the shell TTX data f = open(shellSourcePath) ttxData = f.read() f.close() ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1") tempShellSourcePath = shellSourcePath + ".temp" f = open(tempShellSourcePath, "wb") f.write(ttxData) f.close() # compile the shell shell = TTFont(sfntVersion="OTTO") shell.importXML(tempShellSourcePath) shell.save(shellTempPath) os.remove(tempShellSourcePath) # load the shell shell = TTFont(shellTempPath) # grab the PASS and FAIL data hmtx = shell["hmtx"] glyphSet = shell.getGlyphSet() failGlyph = glyphSet["F"] failGlyph.decompile() failGlyphProgram = list(failGlyph.program) failGlyphMetrics = hmtx["F"] passGlyph = glyphSet["P"] passGlyph.decompile() passGlyphProgram = list(passGlyph.program) passGlyphMetrics = hmtx["P"] # grab some tables hmtx = shell["hmtx"] cmap = shell["cmap"] # start the glyph order existingGlyphs = [".notdef", "space", "F", "P"] glyphOrder = list(existingGlyphs) # start the CFF cff = shell["CFF "].cff globalSubrs = cff.GlobalSubrs topDict = cff.topDictIndex[0] topDict.charset = existingGlyphs private = topDict.Private charStrings = topDict.CharStrings charStringsIndex = charStrings.charStringsIndex features = sorted(mapping) # build the outline, hmtx and cmap data cp = baseCodepoint for index, tag in enumerate(features): # tag.pass glyphName = "%s.pass" % tag glyphOrder.append(glyphName) addGlyphToCFF( glyphName=glyphName, program=passGlyphProgram, private=private, globalSubrs=globalSubrs, charStringsIndex=charStringsIndex, topDict=topDict, charStrings=charStrings ) hmtx[glyphName] = passGlyphMetrics for table in cmap.tables: if table.format == 4: table.cmap[cp] = glyphName else: raise NotImplementedError, "Unsupported cmap table format: %d" % table.format cp += 1 # tag.fail glyphName = "%s.fail" % tag glyphOrder.append(glyphName) addGlyphToCFF( glyphName=glyphName, program=failGlyphProgram, private=private, globalSubrs=globalSubrs, charStringsIndex=charStringsIndex, topDict=topDict, charStrings=charStrings ) hmtx[glyphName] = failGlyphMetrics for table in cmap.tables: if table.format == 4: table.cmap[cp] = glyphName else: raise NotImplementedError, "Unsupported cmap table format: %d" % table.format # bump this up so that the sequence is the same as the lookup 3 font cp += 3 # set the glyph order shell.setGlyphOrder(glyphOrder) # start the GSUB shell["GSUB"] = newTable("GSUB") gsub = shell["GSUB"].table = GSUB() gsub.Version = 1.0 # make a list of all the features we will make featureCount = len(features) # set up the script list scriptList = gsub.ScriptList = ScriptList() scriptList.ScriptCount = 1 scriptList.ScriptRecord = [] scriptRecord = ScriptRecord() scriptList.ScriptRecord.append(scriptRecord) scriptRecord.ScriptTag = "DFLT" script = scriptRecord.Script = Script() defaultLangSys = script.DefaultLangSys = DefaultLangSys() defaultLangSys.FeatureCount = featureCount defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount) defaultLangSys.ReqFeatureIndex = 65535 defaultLangSys.LookupOrder = None script.LangSysCount = 0 script.LangSysRecord = [] # set up the feature list featureList = gsub.FeatureList = FeatureList() featureList.FeatureCount = featureCount featureList.FeatureRecord = [] for index, tag in enumerate(features): # feature record featureRecord = FeatureRecord() featureRecord.FeatureTag = tag feature = featureRecord.Feature = Feature() featureList.FeatureRecord.append(featureRecord) # feature feature.FeatureParams = None feature.LookupCount = 1 feature.LookupListIndex = [index] # write the lookups lookupList = gsub.LookupList = LookupList() lookupList.LookupCount = featureCount lookupList.Lookup = [] for tag in features: # lookup lookup = Lookup() lookup.LookupType = 1 lookup.LookupFlag = 0 lookup.SubTableCount = 1 lookup.SubTable = [] lookupList.Lookup.append(lookup) # subtable subtable = SingleSubst() subtable.Format = 2 subtable.LookupType = 1 subtable.mapping = { "%s.pass" % tag : "%s.fail" % tag, "%s.fail" % tag : "%s.pass" % tag, } lookup.SubTable.append(subtable) path = outputPath % 1 + ".otf" if os.path.exists(path): os.remove(path) shell.save(path) # get rid of the shell if os.path.exists(shellTempPath): os.remove(shellTempPath) def makeLookup3(): # make a variation of the shell TTX data f = open(shellSourcePath) ttxData = f.read() f.close() ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3") tempShellSourcePath = shellSourcePath + ".temp" f = open(tempShellSourcePath, "wb") f.write(ttxData) f.close() # compile the shell shell = TTFont(sfntVersion="OTTO") shell.importXML(tempShellSourcePath) shell.save(shellTempPath) os.remove(tempShellSourcePath) # load the shell shell = TTFont(shellTempPath) # grab the PASS and FAIL data hmtx = shell["hmtx"] glyphSet = shell.getGlyphSet() failGlyph = glyphSet["F"] failGlyph.decompile() failGlyphProgram = list(failGlyph.program) failGlyphMetrics = hmtx["F"] passGlyph = glyphSet["P"] passGlyph.decompile() passGlyphProgram = list(passGlyph.program) passGlyphMetrics = hmtx["P"] # grab some tables hmtx = shell["hmtx"] cmap = shell["cmap"] # start the glyph order existingGlyphs = [".notdef", "space", "F", "P"] glyphOrder = list(existingGlyphs) # start the CFF cff = shell["CFF "].cff globalSubrs = cff.GlobalSubrs topDict = cff.topDictIndex[0] topDict.charset = existingGlyphs private = topDict.Private charStrings = topDict.CharStrings charStringsIndex = charStrings.charStringsIndex features = sorted(mapping) # build the outline, hmtx and cmap data cp = baseCodepoint for index, tag in enumerate(features): # tag.pass glyphName = "%s.pass" % tag glyphOrder.append(glyphName) addGlyphToCFF( glyphName=glyphName, program=passGlyphProgram, private=private, globalSubrs=globalSubrs, charStringsIndex=charStringsIndex, topDict=topDict, charStrings=charStrings ) hmtx[glyphName] = passGlyphMetrics # tag.fail glyphName = "%s.fail" % tag glyphOrder.append(glyphName) addGlyphToCFF( glyphName=glyphName, program=failGlyphProgram, private=private, globalSubrs=globalSubrs, charStringsIndex=charStringsIndex, topDict=topDict, charStrings=charStrings ) hmtx[glyphName] = failGlyphMetrics # tag.default glyphName = "%s.default" % tag glyphOrder.append(glyphName) addGlyphToCFF( glyphName=glyphName, program=passGlyphProgram, private=private, globalSubrs=globalSubrs, charStringsIndex=charStringsIndex, topDict=topDict, charStrings=charStrings ) hmtx[glyphName] = passGlyphMetrics for table in cmap.tables: if table.format == 4: table.cmap[cp] = glyphName else: raise NotImplementedError, "Unsupported cmap table format: %d" % table.format cp += 1 # tag.alt1,2,3 for i in range(1,4): glyphName = "%s.alt%d" % (tag, i) glyphOrder.append(glyphName) addGlyphToCFF( glyphName=glyphName, program=failGlyphProgram, private=private, globalSubrs=globalSubrs, charStringsIndex=charStringsIndex, topDict=topDict, charStrings=charStrings ) hmtx[glyphName] = failGlyphMetrics for table in cmap.tables: if table.format == 4: table.cmap[cp] = glyphName else: raise NotImplementedError, "Unsupported cmap table format: %d" % table.format cp += 1 # set the glyph order shell.setGlyphOrder(glyphOrder) # start the GSUB shell["GSUB"] = newTable("GSUB") gsub = shell["GSUB"].table = GSUB() gsub.Version = 1.0 # make a list of all the features we will make featureCount = len(features) # set up the script list scriptList = gsub.ScriptList = ScriptList() scriptList.ScriptCount = 1 scriptList.ScriptRecord = [] scriptRecord = ScriptRecord() scriptList.ScriptRecord.append(scriptRecord) scriptRecord.ScriptTag = "DFLT" script = scriptRecord.Script = Script() defaultLangSys = script.DefaultLangSys = DefaultLangSys() defaultLangSys.FeatureCount = featureCount defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount) defaultLangSys.ReqFeatureIndex = 65535 defaultLangSys.LookupOrder = None script.LangSysCount = 0 script.LangSysRecord = [] # set up the feature list featureList = gsub.FeatureList = FeatureList() featureList.FeatureCount = featureCount featureList.FeatureRecord = [] for index, tag in enumerate(features): # feature record featureRecord = FeatureRecord() featureRecord.FeatureTag = tag feature = featureRecord.Feature = Feature() featureList.FeatureRecord.append(featureRecord) # feature feature.FeatureParams = None feature.LookupCount = 1 feature.LookupListIndex = [index] # write the lookups lookupList = gsub.LookupList = LookupList() lookupList.LookupCount = featureCount lookupList.Lookup = [] for tag in features: # lookup lookup = Lookup() lookup.LookupType = 3 lookup.LookupFlag = 0 lookup.SubTableCount = 1 lookup.SubTable = [] lookupList.Lookup.append(lookup) # subtable subtable = AlternateSubst() subtable.Format = 1 subtable.LookupType = 3 subtable.alternates = { "%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag], "%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag], "%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag], "%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag] } lookup.SubTable.append(subtable) path = outputPath % 3 + ".otf" if os.path.exists(path): os.remove(path) shell.save(path) # get rid of the shell if os.path.exists(shellTempPath): os.remove(shellTempPath) def makeJavascriptData(): features = sorted(mapping) outStr = [] outStr.append("") outStr.append("/* This file is autogenerated by makegsubfonts.py */") outStr.append("") outStr.append("/* ") outStr.append(" Features defined in gsubtest fonts with associated base") outStr.append(" codepoints for each feature:") outStr.append("") outStr.append(" cp = codepoint for feature featX") outStr.append("") outStr.append(" cp default PASS") outStr.append(" cp featX=1 FAIL") outStr.append(" cp featX=2 FAIL") outStr.append("") outStr.append(" cp+1 default FAIL") outStr.append(" cp+1 featX=1 PASS") outStr.append(" cp+1 featX=2 FAIL") outStr.append("") outStr.append(" cp+2 default FAIL") outStr.append(" cp+2 featX=1 FAIL") outStr.append(" cp+2 featX=2 PASS") outStr.append("") outStr.append("*/") outStr.append("") outStr.append("var gFeatures = {"); cp = baseCodepoint taglist = [] for tag in features: taglist.append("\"%s\": 0x%x" % (tag, cp)) cp += 4 outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" ")) outStr.append("};"); outStr.append(""); if os.path.exists(javascriptData): os.remove(javascriptData) f = open(javascriptData, "wb") f.write("\n".join(outStr)) f.close() # build fonts print "Making lookup type 1 font..." makeLookup1() print "Making lookup type 3 font..." makeLookup3() # output javascript data print "Making javascript data file..." makeJavascriptData()
mpl-2.0
knkinnard/byte-2
lib/werkzeug/contrib/wrappers.py
181
10337
# -*- coding: utf-8 -*- """ werkzeug.contrib.wrappers ~~~~~~~~~~~~~~~~~~~~~~~~~ Extra wrappers or mixins contributed by the community. These wrappers can be mixed in into request objects to add extra functionality. Example:: from werkzeug.wrappers import Request as RequestBase from werkzeug.contrib.wrappers import JSONRequestMixin class Request(RequestBase, JSONRequestMixin): pass Afterwards this request object provides the extra functionality of the :class:`JSONRequestMixin`. :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import codecs try: from simplejson import loads except ImportError: from json import loads from werkzeug.exceptions import BadRequest from werkzeug.utils import cached_property from werkzeug.http import dump_options_header, parse_options_header from werkzeug._compat import wsgi_decoding_dance def is_known_charset(charset): """Checks if the given charset is known to Python.""" try: codecs.lookup(charset) except LookupError: return False return True class JSONRequestMixin(object): """Add json method to a request object. This will parse the input data through simplejson if possible. :exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type is not json or if the data itself cannot be parsed as json. """ @cached_property def json(self): """Get the result of simplejson.loads if possible.""" if 'json' not in self.environ.get('CONTENT_TYPE', ''): raise BadRequest('Not a JSON request') try: return loads(self.data) except Exception: raise BadRequest('Unable to read JSON request') class ProtobufRequestMixin(object): """Add protobuf parsing method to a request object. This will parse the input data through `protobuf`_ if possible. :exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type is not protobuf or if the data itself cannot be parsed property. .. _protobuf: http://code.google.com/p/protobuf/ """ #: by default the :class:`ProtobufRequestMixin` will raise a #: :exc:`~werkzeug.exceptions.BadRequest` if the object is not #: initialized. You can bypass that check by setting this #: attribute to `False`. protobuf_check_initialization = True def parse_protobuf(self, proto_type): """Parse the data into an instance of proto_type.""" if 'protobuf' not in self.environ.get('CONTENT_TYPE', ''): raise BadRequest('Not a Protobuf request') obj = proto_type() try: obj.ParseFromString(self.data) except Exception: raise BadRequest("Unable to parse Protobuf request") # Fail if not all required fields are set if self.protobuf_check_initialization and not obj.IsInitialized(): raise BadRequest("Partial Protobuf request") return obj class RoutingArgsRequestMixin(object): """This request mixin adds support for the wsgiorg routing args `specification`_. .. _specification: http://www.wsgi.org/wsgi/Specifications/routing_args """ def _get_routing_args(self): return self.environ.get('wsgiorg.routing_args', (()))[0] def _set_routing_args(self, value): if self.shallow: raise RuntimeError('A shallow request tried to modify the WSGI ' 'environment. If you really want to do that, ' 'set `shallow` to False.') self.environ['wsgiorg.routing_args'] = (value, self.routing_vars) routing_args = property(_get_routing_args, _set_routing_args, doc=''' The positional URL arguments as `tuple`.''') del _get_routing_args, _set_routing_args def _get_routing_vars(self): rv = self.environ.get('wsgiorg.routing_args') if rv is not None: return rv[1] rv = {} if not self.shallow: self.routing_vars = rv return rv def _set_routing_vars(self, value): if self.shallow: raise RuntimeError('A shallow request tried to modify the WSGI ' 'environment. If you really want to do that, ' 'set `shallow` to False.') self.environ['wsgiorg.routing_args'] = (self.routing_args, value) routing_vars = property(_get_routing_vars, _set_routing_vars, doc=''' The keyword URL arguments as `dict`.''') del _get_routing_vars, _set_routing_vars class ReverseSlashBehaviorRequestMixin(object): """This mixin reverses the trailing slash behavior of :attr:`script_root` and :attr:`path`. This makes it possible to use :func:`~urlparse.urljoin` directly on the paths. Because it changes the behavior or :class:`Request` this class has to be mixed in *before* the actual request class:: class MyRequest(ReverseSlashBehaviorRequestMixin, Request): pass This example shows the differences (for an application mounted on `/application` and the request going to `/application/foo/bar`): +---------------+-------------------+---------------------+ | | normal behavior | reverse behavior | +===============+===================+=====================+ | `script_root` | ``/application`` | ``/application/`` | +---------------+-------------------+---------------------+ | `path` | ``/foo/bar`` | ``foo/bar`` | +---------------+-------------------+---------------------+ """ @cached_property def path(self): """Requested path as unicode. This works a bit like the regular path info in the WSGI environment but will not include a leading slash. """ path = wsgi_decoding_dance(self.environ.get('PATH_INFO') or '', self.charset, self.encoding_errors) return path.lstrip('/') @cached_property def script_root(self): """The root path of the script includling a trailing slash.""" path = wsgi_decoding_dance(self.environ.get('SCRIPT_NAME') or '', self.charset, self.encoding_errors) return path.rstrip('/') + '/' class DynamicCharsetRequestMixin(object): """"If this mixin is mixed into a request class it will provide a dynamic `charset` attribute. This means that if the charset is transmitted in the content type headers it's used from there. Because it changes the behavior or :class:`Request` this class has to be mixed in *before* the actual request class:: class MyRequest(DynamicCharsetRequestMixin, Request): pass By default the request object assumes that the URL charset is the same as the data charset. If the charset varies on each request based on the transmitted data it's not a good idea to let the URLs change based on that. Most browsers assume either utf-8 or latin1 for the URLs if they have troubles figuring out. It's strongly recommended to set the URL charset to utf-8:: class MyRequest(DynamicCharsetRequestMixin, Request): url_charset = 'utf-8' .. versionadded:: 0.6 """ #: the default charset that is assumed if the content type header #: is missing or does not contain a charset parameter. The default #: is latin1 which is what HTTP specifies as default charset. #: You may however want to set this to utf-8 to better support #: browsers that do not transmit a charset for incoming data. default_charset = 'latin1' def unknown_charset(self, charset): """Called if a charset was provided but is not supported by the Python codecs module. By default latin1 is assumed then to not lose any information, you may override this method to change the behavior. :param charset: the charset that was not found. :return: the replacement charset. """ return 'latin1' @cached_property def charset(self): """The charset from the content type.""" header = self.environ.get('CONTENT_TYPE') if header: ct, options = parse_options_header(header) charset = options.get('charset') if charset: if is_known_charset(charset): return charset return self.unknown_charset(charset) return self.default_charset class DynamicCharsetResponseMixin(object): """If this mixin is mixed into a response class it will provide a dynamic `charset` attribute. This means that if the charset is looked up and stored in the `Content-Type` header and updates itself automatically. This also means a small performance hit but can be useful if you're working with different charsets on responses. Because the charset attribute is no a property at class-level, the default value is stored in `default_charset`. Because it changes the behavior or :class:`Response` this class has to be mixed in *before* the actual response class:: class MyResponse(DynamicCharsetResponseMixin, Response): pass .. versionadded:: 0.6 """ #: the default charset. default_charset = 'utf-8' def _get_charset(self): header = self.headers.get('content-type') if header: charset = parse_options_header(header)[1].get('charset') if charset: return charset return self.default_charset def _set_charset(self, charset): header = self.headers.get('content-type') ct, options = parse_options_header(header) if not ct: raise TypeError('Cannot set charset if Content-Type ' 'header is missing.') options['charset'] = charset self.headers['Content-Type'] = dump_options_header(ct, options) charset = property(_get_charset, _set_charset, doc=""" The charset for the response. It's stored inside the Content-Type header as a parameter.""") del _get_charset, _set_charset
apache-2.0
rschnapka/partner-contact
passport/tests/test_passport.py
1
6929
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # This module copyright (C) 2013 Savoir-faire Linux # (<http://www.savoirfairelinux.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.tests.common import TransactionCase from openerp.osv.orm import browse_record from datetime import date class Base_Test_passport(TransactionCase): """Simple test creating a passport This is a base class for passport test cases. Inherit from this and setup values. """ def setUp(self, vals=None): """ Setting up passport. """ if vals is None: vals = {} # Default test values self.vals = {'name': 'This is a test passport name', 'number': 'A200124789', 'country_id': 1, 'expiration_date': date(2013, 11, 14), 'birth_date': date(1980, 11, 21), 'gender': 'male', } super(Base_Test_passport, self).setUp() # Overwrite vals if needed self.vals = dict(self.vals.items() + vals.items()) # Create the passport object; we will be testing this, so store in self res_passport = self.registry('res.passport') self.passport_id = res_passport.create( self.cr, self.uid, self.vals, context=None ) def test_passport(self): """ Checking the passport creation. """ res_passport = self.registry('res.passport') passport_obj = res_passport.browse( self.cr, self.uid, self.passport_id, context=None ) for field in self.vals: val = passport_obj[field] if type(val) == browse_record: self.assertEquals(self.vals[field], val.id, "IDs for %s don't match: (%i != %i)" % (field, self.vals[field], val.id)) else: self.assertEquals(str(self.vals[field]), str(val), "Values for %s don't match: (%s != %s)" % (field, str(self.vals[field]), str(val))) class Test_passport_bad(Base_Test_passport): """Simple test creating a passport, test against bad values""" def setUp(self): """ Setting up passport, then changing the values to test against. """ super(Test_passport_bad, self).setUp() # Change vals to something wrong self.vals = { 'name': 'This is the wrong passport name', 'number': 'A111111111', 'country_id': 0, 'expiration_date': date(1999, 11, 14), 'birth_date': date(1999, 11, 21), 'gender': '', } def test_passport(self): """ Checking the passport creation, assertions should all be false. """ res_passport = self.registry('res.passport') passport_obj = res_passport.browse( self.cr, self.uid, self.passport_id, context=None ) for field in self.vals: val = passport_obj[field] if type(val) == browse_record: self.assertNotEqual(self.vals[field], val.id, "IDs for %s don't match: (%i != %i)" % (field, self.vals[field], val.id)) else: self.assertNotEqual(str(self.vals[field]), str(val), "Values for %s don't match: (%s != %s)" % (field, str(self.vals[field]), str(val))) class Test_passport_name_get(TransactionCase): """Test name_get""" def setUp(self): """ Setting up passport with name, country, either and none. """ super(Test_passport_name_get, self).setUp() res_passport = self.registry('res.passport') res_country = self.registry('res.country') country = res_country.browse(self.cr, self.uid, 1, context=None) self.name_on_passport = 'test name' self.country_name = country.name_get()[0][1] self.both = res_passport.create( self.cr, self.uid, {'name': self.name_on_passport, 'country_id': country.id, }, context=None) self.name_only = res_passport.create( self.cr, self.uid, {'name': self.name_on_passport, }, context=None) self.country_only = res_passport.create( self.cr, self.uid, {'country_id': country.id, }, context=None) self.neither = res_passport.create( self.cr, self.uid, {}, context=None) def test_passport(self): """ Checking the passport creation, assertions should all be false. """ res_passport = self.registry('res.passport') both_obj = res_passport.browse( self.cr, self.uid, self.both, context=None ) name_only = res_passport.browse( self.cr, self.uid, self.name_only, context=None ) country_only = res_passport.browse( self.cr, self.uid, self.country_only, context=None ) neither = res_passport.browse( self.cr, self.uid, self.neither, context=None ) self.assertEquals( both_obj.name_get()[0][1], ' | '.join((self.country_name, self.name_on_passport)), 'Error in passport name_get() with both country name and name on ' 'passport.' ) self.assertEquals( name_only.name_get()[0][1], self.name_on_passport, 'Error in passport name_get() with only name on passport.' ) self.assertEquals( country_only.name_get()[0][1], self.country_name, 'Error in passport name_get() with only name of country.' ) self.assertEquals( neither.name_get()[0][1], '', 'Error in passport name_get() with neither country name nor name ' 'on passport.' )
agpl-3.0
milad-soufastai/ansible-modules-extras
cloud/vmware/vmware_vm_vss_dvs_migrate.py
71
5422
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Joseph Callen <jcallen () csc.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: vmware_vm_vss_dvs_migrate short_description: Migrates a virtual machine from a standard vswitch to distributed description: - Migrates a virtual machine from a standard vswitch to distributed version_added: 2.0 author: "Joseph Callen (@jcpowermac)" notes: - Tested on vSphere 5.5 requirements: - "python >= 2.6" - PyVmomi options: vm_name: description: - Name of the virtual machine to migrate to a dvSwitch required: True dvportgroup_name: description: - Name of the portgroup to migrate to the virtual machine to required: True extends_documentation_fragment: vmware.documentation ''' EXAMPLES = ''' - name: Migrate VCSA to vDS local_action: module: vmware_vm_vss_dvs_migrate hostname: vcenter_ip_or_hostname username: vcenter_username password: vcenter_password vm_name: virtual_machine_name dvportgroup_name: distributed_portgroup_name ''' try: from pyVmomi import vim, vmodl HAS_PYVMOMI = True except ImportError: HAS_PYVMOMI = False class VMwareVmVssDvsMigrate(object): def __init__(self, module): self.module = module self.content = connect_to_api(module) self.vm = None self.vm_name = module.params['vm_name'] self.dvportgroup_name = module.params['dvportgroup_name'] def process_state(self): vm_nic_states = { 'absent': self.migrate_network_adapter_vds, 'present': self.state_exit_unchanged, } vm_nic_states[self.check_vm_network_state()]() def find_dvspg_by_name(self): vmware_distributed_port_group = get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup]) for dvspg in vmware_distributed_port_group: if dvspg.name == self.dvportgroup_name: return dvspg return None def find_vm_by_name(self): virtual_machines = get_all_objs(self.content, [vim.VirtualMachine]) for vm in virtual_machines: if vm.name == self.vm_name: return vm return None def migrate_network_adapter_vds(self): vm_configspec = vim.vm.ConfigSpec() nic = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo() port = vim.dvs.PortConnection() devicespec = vim.vm.device.VirtualDeviceSpec() pg = self.find_dvspg_by_name() if pg is None: self.module.fail_json(msg="The standard portgroup was not found") dvswitch = pg.config.distributedVirtualSwitch port.switchUuid = dvswitch.uuid port.portgroupKey = pg.key nic.port = port for device in self.vm.config.hardware.device: if isinstance(device, vim.vm.device.VirtualEthernetCard): devicespec.device = device devicespec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit devicespec.device.backing = nic vm_configspec.deviceChange.append(devicespec) task = self.vm.ReconfigVM_Task(vm_configspec) changed, result = wait_for_task(task) self.module.exit_json(changed=changed, result=result) def state_exit_unchanged(self): self.module.exit_json(changed=False) def check_vm_network_state(self): try: self.vm = self.find_vm_by_name() if self.vm is None: self.module.fail_json(msg="A virtual machine with name %s does not exist" % self.vm_name) for device in self.vm.config.hardware.device: if isinstance(device, vim.vm.device.VirtualEthernetCard): if isinstance(device.backing, vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo): return 'present' return 'absent' except vmodl.RuntimeFault as runtime_fault: self.module.fail_json(msg=runtime_fault.msg) except vmodl.MethodFault as method_fault: self.module.fail_json(msg=method_fault.msg) def main(): argument_spec = vmware_argument_spec() argument_spec.update(dict(vm_name=dict(required=True, type='str'), dvportgroup_name=dict(required=True, type='str'))) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) if not HAS_PYVMOMI: module.fail_json(msg='pyvmomi is required for this module') vmware_vmnic_migrate = VMwareVmVssDvsMigrate(module) vmware_vmnic_migrate.process_state() from ansible.module_utils.vmware import * from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
PaulWay/spacewalk
client/debian/packages-already-in-debian/python-ethtool/pifconfig.py
2
3217
#! /usr/bin/python # -*- python -*- # -*- coding: utf-8 -*- # Copyright (C) 2008--2013 Red Hat, Inc. # # Arnaldo Carvalho de Melo <acme@redhat.com> # # This application is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2. # # This application is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. import getopt, ethtool, sys def usage(): print '''Usage: pifconfig <interface> ''' def flags2str(flags): string = "" if flags & ethtool.IFF_UP: string += "UP " if flags & ethtool.IFF_BROADCAST: string += "BROADCAST " if flags & ethtool.IFF_DEBUG: string += "DEBUG " if flags & ethtool.IFF_LOOPBACK: string += "LOOPBACK " if flags & ethtool.IFF_POINTOPOINT: string += "POINTOPOINT " if flags & ethtool.IFF_NOTRAILERS: string += "NOTRAILERS " if flags & ethtool.IFF_RUNNING: string += "RUNNING " if flags & ethtool.IFF_NOARP: string += "NOARP " if flags & ethtool.IFF_PROMISC: string += "PROMISC " if flags & ethtool.IFF_ALLMULTI: string += "ALLMULTI " if flags & ethtool.IFF_MASTER: string += "MASTER " if flags & ethtool.IFF_SLAVE: string += "SLAVE " if flags & ethtool.IFF_MULTICAST: string += "MULTICAST " if flags & ethtool.IFF_PORTSEL: string += "PORTSEL " if flags & ethtool.IFF_AUTOMEDIA: string += "AUTOMEDIA " if flags & ethtool.IFF_DYNAMIC: string += "DYNAMIC " return string.strip() def show_config(device): ipaddr = ethtool.get_ipaddr(device) netmask = ethtool.get_netmask(device) flags = ethtool.get_flags(device) print '%-9.9s' % device, if not (flags & ethtool.IFF_LOOPBACK): print "HWaddr %s" % ethtool.get_hwaddr(device), print ''' inet addr:%s''' % ipaddr, if not (flags & (ethtool.IFF_LOOPBACK | ethtool.IFF_POINTOPOINT)): print "Bcast:%s" % ethtool.get_broadcast(device), print ''' Mask:%s %s ''' % (netmask, flags2str(flags)) def main(): global all_devices try: opts, args = getopt.getopt(sys.argv[1:], "h", ("help",)) except getopt.GetoptError, err: usage() print str(err) sys.exit(2) for o, a in opts: if o in ("-h", "--help"): usage() return active_devices = ethtool.get_active_devices() for device in active_devices: show_config(device) if __name__ == '__main__': main()
gpl-2.0
jcai19/smm_gem5
src/cpu/CheckerCPU.py
69
2022
# Copyright (c) 2007 The Regents of The University of Michigan # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Nathan Binkert from m5.params import * from BaseCPU import BaseCPU class CheckerCPU(BaseCPU): type = 'CheckerCPU' abstract = True cxx_header = "cpu/checker/cpu.hh" exitOnError = Param.Bool(False, "Exit on an error") updateOnError = Param.Bool(False, "Update the checker with the main CPU's state on an error") warnOnlyOnLoadError = Param.Bool(True, "If a load result is incorrect, only print a warning and do not exit")
bsd-3-clause
NeuroDataDesign/seelviz
Cherrypy/cherrypyapp.py
1
2633
import os, os.path import random import sqlite3 import string import time import cherrypy DB_STRING = "my.db" class StringGenerator(object): @cherrypy.expose def index(self): return file('index.html') class StringGeneratorWebService(object): exposed = True @cherrypy.tools.accept(media='text/plain') def GET(self): with sqlite3.connect(DB_STRING) as c: cherrypy.session['ts'] = time.time() r = c.execute("SELECT value FROM user_string WHERE session_id=?", [cherrypy.session.id]) return r.fetchone() def POST(self, length=8): some_string = ''.join(random.sample(string.hexdigits, int(length))) with sqlite3.connect(DB_STRING) as c: cherrypy.session['ts'] = time.time() c.execute("INSERT INTO user_string VALUES (?, ?)", [cherrypy.session.id, some_string]) return some_string def PUT(self, another_string): with sqlite3.connect(DB_STRING) as c: cherrypy.session['ts'] = time.time() c.execute("UPDATE user_string SET value=? WHERE session_id=?", [another_string, cherrypy.session.id]) def DELETE(self): cherrypy.session.pop('ts', None) with sqlite3.connect(DB_STRING) as c: c.execute("DELETE FROM user_string WHERE session_id=?", [cherrypy.session.id]) def setup_database(): """ Create the `user_string` table in the database on server startup """ with sqlite3.connect(DB_STRING) as con: con.execute("CREATE TABLE user_string (session_id, value)") def cleanup_database(): """ Destroy the `user_string` table from the database on server shutdown. """ with sqlite3.connect(DB_STRING) as con: con.execute("DROP TABLE user_string") if __name__ == '__main__': conf = { '/': { 'tools.sessions.on': True, 'tools.staticdir.root': os.path.abspath(os.getcwd()) }, '/generator': { 'request.dispatch': cherrypy.dispatch.MethodDispatcher(), 'tools.response_headers.on': True, 'tools.response_headers.headers': [('Content-Type', 'text/plain')], }, '/static': { 'tools.staticdir.on': True, 'tools.staticdir.dir': './public' } } cherrypy.engine.subscribe('start', setup_database) cherrypy.engine.subscribe('stop', cleanup_database) webapp = StringGenerator() webapp.generator = StringGeneratorWebService() cherrypy.quickstart(webapp, '/', conf)
apache-2.0
mxOBS/deb-pkg_trusty_chromium-browser
net/android/tools/proxy_test_cases.py
165
10640
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Generator script for proxy tests. See AndroidProxySelectorTest.java and net/proxy/proxy_config_service_android_unittest.cc To generate C++, run this script without arguments. To generate Java, run this script with -j argument. Note that this generator is not run as part of the build process because we are assuming that these test cases will not change often. """ import optparse test_cases = [ { "name": "NoProxy", "description" : "Test direct mapping when no proxy defined.", "properties" : { }, "mappings" : { "http://example.com/" : "DIRECT", "ftp://example.com/" : "DIRECT", "https://example.com/" : "DIRECT", } }, { "name": "HttpProxyHostAndPort", "description" : "Test http.proxyHost and http.proxyPort works.", "properties" : { "http.proxyHost" : "httpproxy.com", "http.proxyPort" : "8080", }, "mappings" : { "http://example.com/" : "PROXY httpproxy.com:8080", "ftp://example.com/" : "DIRECT", "https://example.com/" : "DIRECT", } }, { "name": "HttpProxyHostOnly", "description" : "We should get the default port (80) for proxied hosts.", "properties" : { "http.proxyHost" : "httpproxy.com", }, "mappings" : { "http://example.com/" : "PROXY httpproxy.com:80", "ftp://example.com/" : "DIRECT", "https://example.com/" : "DIRECT", } }, { "name": "HttpProxyPortOnly", "description" : "http.proxyPort only should not result in any hosts being proxied.", "properties" : { "http.proxyPort" : "8080", }, "mappings" : { "http://example.com/" : "DIRECT", "ftp://example.com/" : "DIRECT", "https://example.com/" : "DIRECT" } }, { "name": "HttpNonProxyHosts1", "description" : "Test that HTTP non proxy hosts are mapped correctly", "properties" : { "http.nonProxyHosts" : "slashdot.org", "http.proxyHost" : "httpproxy.com", "http.proxyPort" : "8080", }, "mappings" : { "http://example.com/" : "PROXY httpproxy.com:8080", "http://slashdot.org/" : "DIRECT", } }, { "name": "HttpNonProxyHosts2", "description" : "Test that | pattern works.", "properties" : { "http.nonProxyHosts" : "slashdot.org|freecode.net", "http.proxyHost" : "httpproxy.com", "http.proxyPort" : "8080", }, "mappings" : { "http://example.com/" : "PROXY httpproxy.com:8080", "http://slashdot.org/" : "DIRECT", "http://freecode.net/" : "DIRECT", } }, { "name": "HttpNonProxyHosts3", "description" : "Test that * pattern works.", "properties" : { "http.nonProxyHosts" : "*example.com", "http.proxyHost" : "httpproxy.com", "http.proxyPort" : "8080", }, "mappings" : { "http://example.com/" : "DIRECT", "http://www.example.com/" : "DIRECT", "http://slashdot.org/" : "PROXY httpproxy.com:8080", } }, { "name": "FtpNonProxyHosts", "description" : "Test that FTP non proxy hosts are mapped correctly", "properties" : { "ftp.nonProxyHosts" : "slashdot.org", "ftp.proxyHost" : "httpproxy.com", "ftp.proxyPort" : "8080", }, "mappings" : { "http://example.com/" : "DIRECT", "ftp://example.com/" : "PROXY httpproxy.com:8080", } }, { "name": "FtpProxyHostAndPort", "description" : "Test ftp.proxyHost and ftp.proxyPort works.", "properties" : { "ftp.proxyHost" : "httpproxy.com", "ftp.proxyPort" : "8080", }, "mappings" : { "ftp://example.com/" : "PROXY httpproxy.com:8080", "http://example.com/" : "DIRECT", "https://example.com/" : "DIRECT", } }, { "name": "FtpProxyHostOnly", "description" : "Test ftp.proxyHost and default port.", "properties" : { "ftp.proxyHost" : "httpproxy.com", }, "mappings" : { "ftp://example.com/" : "PROXY httpproxy.com:80", "http://example.com/" : "DIRECT", "https://example.com/" : "DIRECT", } }, { "name": "HttpsProxyHostAndPort", "description" : "Test https.proxyHost and https.proxyPort works.", "properties" : { "https.proxyHost" : "httpproxy.com", "https.proxyPort" : "8080", }, "mappings" : { "https://example.com/" : "PROXY httpproxy.com:8080", "http://example.com/" : "DIRECT", "ftp://example.com/" : "DIRECT", } }, { "name": "HttpsProxyHostOnly", "description" : "Test https.proxyHost and default port.", # Chromium differs from the Android platform by connecting to port 80 for # HTTPS connections by default, hence cpp-only. "cpp-only" : "", "properties" : { "https.proxyHost" : "httpproxy.com", }, "mappings" : { "https://example.com/" : "PROXY httpproxy.com:80", "http://example.com/" : "DIRECT", "ftp://example.com/" : "DIRECT", } }, { "name": "HttpProxyHostIPv6", "description" : "Test IPv6 https.proxyHost and default port.", "cpp-only" : "", "properties" : { "http.proxyHost" : "a:b:c::d:1", }, "mappings" : { "http://example.com/" : "PROXY [a:b:c::d:1]:80", "ftp://example.com/" : "DIRECT", } }, { "name": "HttpProxyHostAndPortIPv6", "description" : "Test IPv6 http.proxyHost and http.proxyPort works.", "cpp-only" : "", "properties" : { "http.proxyHost" : "a:b:c::d:1", "http.proxyPort" : "8080", }, "mappings" : { "http://example.com/" : "PROXY [a:b:c::d:1]:8080", "ftp://example.com/" : "DIRECT", } }, { "name": "HttpProxyHostAndInvalidPort", "description" : "Test invalid http.proxyPort does not crash.", "cpp-only" : "", "properties" : { "http.proxyHost" : "a:b:c::d:1", "http.proxyPort" : "65536", }, "mappings" : { "http://example.com/" : "DIRECT", "ftp://example.com/" : "DIRECT", } }, { "name": "DefaultProxyExplictPort", "description" : "Default http proxy is used if a scheme-specific one is not found.", "properties" : { "proxyHost" : "defaultproxy.com", "proxyPort" : "8080", "ftp.proxyHost" : "httpproxy.com", "ftp.proxyPort" : "8080", }, "mappings" : { "http://example.com/" : "PROXY defaultproxy.com:8080", "https://example.com/" : "PROXY defaultproxy.com:8080", "ftp://example.com/" : "PROXY httpproxy.com:8080", } }, { "name": "DefaultProxyDefaultPort", "description" : "Check that the default proxy port is as expected.", # Chromium differs from the Android platform by connecting to port 80 for # HTTPS connections by default, hence cpp-only. "cpp-only" : "", "properties" : { "proxyHost" : "defaultproxy.com", }, "mappings" : { "http://example.com/" : "PROXY defaultproxy.com:80", "https://example.com/" : "PROXY defaultproxy.com:80", } }, { "name": "FallbackToSocks", "description" : "SOCKS proxy is used if scheme-specific one is not found.", "properties" : { "http.proxyHost" : "defaultproxy.com", "socksProxyHost" : "socksproxy.com" }, "mappings" : { "http://example.com/" : "PROXY defaultproxy.com:80", "https://example.com/" : "SOCKS5 socksproxy.com:1080", "ftp://example.com" : "SOCKS5 socksproxy.com:1080", } }, { "name": "SocksExplicitPort", "description" : "SOCKS proxy port is used if specified", "properties" : { "socksProxyHost" : "socksproxy.com", "socksProxyPort" : "9000", }, "mappings" : { "http://example.com/" : "SOCKS5 socksproxy.com:9000", } }, { "name": "HttpProxySupercedesSocks", "description" : "SOCKS proxy is ignored if default HTTP proxy defined.", "properties" : { "proxyHost" : "defaultproxy.com", "socksProxyHost" : "socksproxy.com", "socksProxyPort" : "9000", }, "mappings" : { "http://example.com/" : "PROXY defaultproxy.com:80", } }, ] class GenerateCPlusPlus: """Generate C++ test cases""" def Generate(self): for test_case in test_cases: print ("TEST_F(ProxyConfigServiceAndroidTest, %s) {" % test_case["name"]) if "description" in test_case: self._GenerateDescription(test_case["description"]); self._GenerateConfiguration(test_case["properties"]) self._GenerateMappings(test_case["mappings"]) print "}" print "" def _GenerateDescription(self, description): print " // %s" % description def _GenerateConfiguration(self, properties): for key in sorted(properties.iterkeys()): print " AddProperty(\"%s\", \"%s\");" % (key, properties[key]) print " ProxySettingsChanged();" def _GenerateMappings(self, mappings): for url in sorted(mappings.iterkeys()): print " TestMapping(\"%s\", \"%s\");" % (url, mappings[url]) class GenerateJava: """Generate Java test cases""" def Generate(self): for test_case in test_cases: if test_case.has_key("cpp-only"): continue if "description" in test_case: self._GenerateDescription(test_case["description"]); print " @SmallTest" print " @Feature({\"AndroidWebView\"})" print " public void test%s() throws Exception {" % test_case["name"] self._GenerateConfiguration(test_case["properties"]) self._GenerateMappings(test_case["mappings"]) print " }" print "" def _GenerateDescription(self, description): print " /**" print " * %s" % description print " *" print " * @throws Exception" print " */" def _GenerateConfiguration(self, properties): for key in sorted(properties.iterkeys()): print " System.setProperty(\"%s\", \"%s\");" % ( key, properties[key]) def _GenerateMappings(self, mappings): for url in sorted(mappings.iterkeys()): mapping = mappings[url] if 'HTTPS' in mapping: mapping = mapping.replace('HTTPS', 'PROXY') print " checkMapping(\"%s\", \"%s\");" % (url, mapping) def main(): parser = optparse.OptionParser() parser.add_option("-j", "--java", action="store_true", dest="java"); (options, args) = parser.parse_args(); if options.java: generator = GenerateJava() else: generator = GenerateCPlusPlus() generator.Generate() if __name__ == '__main__': main()
bsd-3-clause
icloudrnd/automation_tools
openstack_dashboard/dashboards/project/access_and_security/api_access/tests.py
29
3255
# Copyright 2012 Nebula Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.core.urlresolvers import reverse from django.http import HttpRequest # noqa from mox import IsA # noqa from openstack_dashboard import api from openstack_dashboard.test import helpers as test API_URL = "horizon:project:access_and_security:api_access" EC2_URL = reverse(API_URL + ":ec2") OPENRC_URL = reverse(API_URL + ":openrc") CREDS_URL = reverse(API_URL + ":view_credentials") class APIAccessTests(test.TestCase): def test_ec2_download_view(self): creds = self.ec2.first() cert = self.certs.first() self.mox.StubOutWithMock(api.keystone, "list_ec2_credentials") self.mox.StubOutWithMock(api.nova, "get_x509_credentials") self.mox.StubOutWithMock(api.nova, "get_x509_root_certificate") self.mox.StubOutWithMock(api.keystone, "create_ec2_credentials") api.keystone.list_ec2_credentials(IsA(HttpRequest), self.user.id) \ .AndReturn([]) api.nova.get_x509_credentials(IsA(HttpRequest)).AndReturn(cert) api.nova.get_x509_root_certificate(IsA(HttpRequest)) \ .AndReturn(cert) api.keystone.create_ec2_credentials(IsA(HttpRequest), self.user.id, self.tenant.id).AndReturn(creds) self.mox.ReplayAll() res = self.client.get(EC2_URL) self.assertEqual(res.status_code, 200) self.assertEqual(res['content-type'], 'application/zip') def test_openrc_credentials(self): res = self.client.get(OPENRC_URL) self.assertEqual(res.status_code, 200) openrc = 'project/access_and_security/api_access/openrc.sh.template' self.assertTemplateUsed(res, openrc) name = 'export OS_USERNAME="{}"'.format(self.request.user.username) id = 'export OS_TENANT_ID={}'.format(self.request.user.tenant_id) self.assertTrue(name in res.content) self.assertTrue(id in res.content) @test.create_stubs({api.keystone: ("list_ec2_credentials",)}) def test_credential_api(self): certs = self.ec2.list() api.keystone.list_ec2_credentials(IsA(HttpRequest), self.user.id) \ .AndReturn(certs) self.mox.ReplayAll() res = self.client.get(CREDS_URL) self.assertEqual(res.status_code, 200) credentials = 'project/access_and_security/api_access/credentials.html' self.assertTemplateUsed(res, credentials) self.assertEqual(self.user.id, res.context['openrc_creds']['user'].id) self.assertEqual(certs[0].access, res.context['ec2_creds']['ec2_access_key'])
apache-2.0
paolodedios/tensorflow
tensorflow/python/util/tf_stack.py
9
6644
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functions used to extract and analyze stacks. Faster than Python libs.""" # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import inspect import threading import six # TODO(b/138203821): change to from ...util import ... once the bug is fixed. from tensorflow.python.util import _tf_stack # Generally such lookups should be done using `threading.local()`. See # https://blogs.gnome.org/jamesh/2008/06/11/tls-python/ for a detailed # explanation of why. However the transform stacks are expected to be empty # when a thread is joined, so reusing the key does not introduce a correctness # issue. Moreover, get_ident is faster than storing and retrieving a unique # key in a thread local store. if six.PY2: import thread # pylint: disable=g-import-not-at-top _get_thread_key = thread.get_ident else: _get_thread_key = threading.get_ident # TODO(mdan): Move these to C++ as well. # Moving to C++ can further avoid extra copies made by get_effective_map. _source_mapper_stacks = collections.defaultdict(lambda: [SentinelMapper()]) _source_filter_stacks = collections.defaultdict(lambda: [SentinelFilter()]) class StackTraceTransform(object): """Base class for stack trace transformation functions.""" _stack_dict = None # Subclasses should override _thread_key = None def __enter__(self): # Any given instance is assumed to be used by a single thread, which reduces # expensive thread local lookups. if self._thread_key is None: self._thread_key = _get_thread_key() else: assert self._thread_key == _get_thread_key(), 'Shared across threads?' stack = self._stack_dict[self._thread_key] self.parent = stack[-1] stack.append(self) self.update() return self def __exit__(self, unused_type, unused_value, unused_traceback): top = self._stack_dict[self._thread_key].pop() assert top is self, 'Concurrent access?' def update(self): raise NotImplementedError('subclasses need to override this') class StackTraceMapper(StackTraceTransform): """Allows remapping traceback information to different source code.""" _stack_dict = _source_mapper_stacks def __init__(self): self.internal_map = _tf_stack.PyBindSourceMap() def update(self): self.internal_map.update_to(tuple(self.get_effective_source_map().items())) def get_effective_source_map(self): """Returns a map (filename, lineno) -> (filename, lineno, function_name).""" raise NotImplementedError('subclasses need to override this') EMPTY_DICT = {} class SentinelMapper(StackTraceMapper): def get_effective_source_map(self): return EMPTY_DICT class StackTraceFilter(StackTraceTransform): """Allows filtering traceback information by removing superfluous frames.""" _stack_dict = _source_filter_stacks def __init__(self): self.internal_set = _tf_stack.PyBindFileSet() def update(self): self.internal_set.update_to(set(self.get_filtered_filenames())) def get_filtered_filenames(self): raise NotImplementedError('subclasses need to override this') EMPTY_SET = frozenset() class SentinelFilter(StackTraceFilter): def get_filtered_filenames(self): return EMPTY_SET class CurrentModuleFilter(StackTraceFilter): """Filters stack frames from the module where this is used (best effort).""" def __init__(self): super().__init__() filter_filename = None outer_f = None f = inspect.currentframe() try: if f is not None: # The current frame is __init__. The first outer frame should be the # caller. outer_f = f.f_back if outer_f is not None: filter_filename = inspect.getsourcefile(outer_f) self._filename = filter_filename # This may be called repeatedly: once on entry by the superclass, then by # each child context manager. self._cached_set = None finally: # Avoid reference cycles, see: # https://docs.python.org/3.7/library/inspect.html#the-interpreter-stack del f del outer_f def get_filtered_filenames(self): if self._cached_set is not None: return self._cached_set filtered_filenames = frozenset((self._filename,)) if self.parent is not None: filtered_filenames |= self.parent.get_filtered_filenames() self._cached_set = filtered_filenames return filtered_filenames def extract_stack(): """An eager-friendly alternative to traceback.extract_stack. Returns: A list-like FrameSummary containing StackFrame-like objects, which are namedtuple-like objects with the following fields: filename, lineno, name, line, meant to masquerade as traceback.FrameSummary objects. """ # N.B ExtractStack in tf_stack.cc will drop this frame prior to # traversing the stack. # TODO(cheshire): Remove this function, use extract_stack_for_node or Python # traceback module. thread_key = _get_thread_key() return _tf_stack.extract_stack( _source_mapper_stacks[thread_key][-1].internal_map, _source_filter_stacks[thread_key][-1].internal_set) # TODO(mdan): Revisit these - a single location is almost always sufficient. def extract_stack_for_node(node): """Attaches the current stack trace to `node`. Args: node: a Node object. Returns: A list-like FrameSummary containing StackFrame-like objects, which are namedtuple-like objects with the following fields: filename, lineno, name, line, meant to masquerade as traceback.FrameSummary objects. """ # N.B ExtractStack in tf_stack.cc will drop this frame prior to # traversing the stack. thread_key = _get_thread_key() return _tf_stack.extract_stack_for_node( _source_mapper_stacks[thread_key][-1].internal_map, _source_filter_stacks[thread_key][-1].internal_set, node) StackSummary = _tf_stack.StackTraceWrapper FrameSummary = _tf_stack.StackFrame
apache-2.0
scarcry/snm-mezzanine
mezzanine/twitter/models.py
3
6032
from datetime import datetime, timedelta from django.db import models from django.utils.html import urlize from django.utils.timezone import get_default_timezone, make_aware from django.utils.translation import ugettext_lazy as _ from mezzanine.conf import settings from mezzanine.twitter import QUERY_TYPE_CHOICES, QUERY_TYPE_USER, \ QUERY_TYPE_LIST, QUERY_TYPE_SEARCH from mezzanine.twitter.managers import TweetManager from requests_oauthlib import OAuth1 from time import timezone from urllib2 import quote import re import requests re_usernames = re.compile("@([0-9a-zA-Z+_]+)", re.IGNORECASE) re_hashtags = re.compile("#([0-9a-zA-Z+_]+)", re.IGNORECASE) replace_hashtags = "<a href=\"http://twitter.com/search?q=%23\\1\">#\\1</a>" replace_usernames = "<a href=\"http://twitter.com/\\1\">@\\1</a>" class TwitterQueryException(Exception): pass class Query(models.Model): type = models.CharField(_("Type"), choices=QUERY_TYPE_CHOICES, max_length=10) value = models.CharField(_("Value"), max_length=140) interested = models.BooleanField("Interested", default=True) class Meta: verbose_name = _("Twitter query") verbose_name_plural = _("Twitter queries") ordering = ("-id",) def __unicode__(self): return "%s: %s" % (self.get_type_display(), self.value) def run(self): """ Request new tweets from the Twitter API. """ urls = { QUERY_TYPE_USER: ("https://api.twitter.com/1.1/statuses/" "user_timeline.json?screen_name=%s" "&include_rts=true" % self.value.lstrip("@")), QUERY_TYPE_LIST: ("https://api.twitter.com/1.1/lists/statuses.json" "?list_id=%s&include_rts=true" % self.value.encode("utf-8")), QUERY_TYPE_SEARCH: "https://api.twitter.com/1.1/search/tweets.json" "?q=%s" % quote(self.value.encode("utf-8")), } try: url = urls[self.type] except KeyError: raise TwitterQueryException("Invalid query type: %s" % self.type) settings.use_editable() auth_settings = (settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET, settings.TWITTER_ACCESS_TOKEN_KEY, settings.TWITTER_ACCESS_TOKEN_SECRET) if not all(auth_settings): raise TwitterQueryException("Twitter OAuth settings missing") try: tweets = requests.get(url, auth=OAuth1(*auth_settings)).json() except Exception, e: raise TwitterQueryException("Error retrieving: %s" % e) try: raise TwitterQueryException(tweets["errors"][0]["message"]) except (IndexError, KeyError, TypeError): pass if self.type == "search": tweets = tweets["statuses"] for tweet_json in tweets: remote_id = str(tweet_json["id"]) tweet, created = self.tweets.get_or_create(remote_id=remote_id) if not created: continue if "retweeted_status" in tweet_json: user = tweet_json['user'] tweet.retweeter_user_name = user["screen_name"] tweet.retweeter_full_name = user["name"] tweet.retweeter_profile_image_url = user["profile_image_url"] tweet_json = tweet_json["retweeted_status"] if self.type == QUERY_TYPE_SEARCH: tweet.user_name = tweet_json['user']['screen_name'] tweet.full_name = tweet_json['user']['name'] tweet.profile_image_url = \ tweet_json['user']["profile_image_url"] date_format = "%a %b %d %H:%M:%S +0000 %Y" else: user = tweet_json["user"] tweet.user_name = user["screen_name"] tweet.full_name = user["name"] tweet.profile_image_url = user["profile_image_url"] date_format = "%a %b %d %H:%M:%S +0000 %Y" tweet.text = urlize(tweet_json["text"]) tweet.text = re_usernames.sub(replace_usernames, tweet.text) tweet.text = re_hashtags.sub(replace_hashtags, tweet.text) if getattr(settings, 'TWITTER_STRIP_HIGH_MULTIBYTE', False): chars = [ch for ch in tweet.text if ord(ch) < 0x800] tweet.text = ''.join(chars) d = datetime.strptime(tweet_json["created_at"], date_format) d -= timedelta(seconds=timezone) tweet.created_at = make_aware(d, get_default_timezone()) tweet.save() self.interested = False self.save() class Tweet(models.Model): remote_id = models.CharField(_("Twitter ID"), max_length=50) created_at = models.DateTimeField(_("Date/time"), null=True) text = models.TextField(_("Message"), null=True) profile_image_url = models.URLField(_("Profile image URL"), null=True) user_name = models.CharField(_("User name"), max_length=100, null=True) full_name = models.CharField(_("Full name"), max_length=100, null=True) retweeter_profile_image_url = models.URLField( _("Profile image URL (Retweeted by)"), null=True) retweeter_user_name = models.CharField( _("User name (Retweeted by)"), max_length=100, null=True) retweeter_full_name = models.CharField( _("Full name (Retweeted by)"), max_length=100, null=True) query = models.ForeignKey("Query", related_name="tweets") objects = TweetManager() class Meta: verbose_name = _("Tweet") verbose_name_plural = _("Tweets") ordering = ("-created_at",) def __unicode__(self): return "%s: %s" % (self.user_name, self.text) def is_retweet(self): return self.retweeter_user_name is not None
bsd-2-clause
red-hood/calendarserver
txweb2/dav/auth.py
1
6134
## # Copyright (c) 2005-2015 Apple Inc. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. ## __all__ = [ "IPrincipal", "DavRealm", "IPrincipalCredentials", "PrincipalCredentials", "AuthenticationWrapper", ] from zope.interface import implements, Interface from twisted.internet import defer from twisted.cred import checkers, error, portal from txweb2.resource import WrapperResource from txdav.xml.element import twisted_private_namespace, registerElement from txdav.xml.element import WebDAVTextElement class AuthenticationWrapper(WrapperResource): def __init__( self, resource, portal, wireEncryptedCredentialFactories, wireUnencryptedCredentialFactories, loginInterfaces ): """ Wrap the given resource and use the parameters to set up the request to allow anyone to challenge and handle authentication. @param resource: L{DAVResource} FIXME: This should get promoted to txweb2.auth @param portal: The cred portal @param wireEncryptedCredentialFactories: Sequence of credentialFactories that can be used to authenticate by resources in this tree over a wire-encrypted channel (SSL). @param wireUnencryptedCredentialFactories: Sequence of credentialFactories that can be used to authenticate by resources in this tree over a wire-unencrypted channel (non-SSL). @param loginInterfaces: More cred stuff """ super(AuthenticationWrapper, self).__init__(resource) self.portal = portal self.wireEncryptedCredentialFactories = dict([ (factory.scheme, factory) for factory in wireEncryptedCredentialFactories ]) self.wireUnencryptedCredentialFactories = dict([ (factory.scheme, factory) for factory in wireUnencryptedCredentialFactories ]) self.loginInterfaces = loginInterfaces # FIXME: some unit tests access self.credentialFactories, so assigning here self.credentialFactories = self.wireEncryptedCredentialFactories def hook(self, req): req.portal = self.portal req.loginInterfaces = self.loginInterfaces # If not using SSL, use the factory list which excludes "Basic" if getattr(req, "chanRequest", None) is None: # This is only None in unit tests secureConnection = True else: ignored, secureConnection = req.chanRequest.getHostInfo() req.credentialFactories = ( self.wireEncryptedCredentialFactories if secureConnection else self.wireUnencryptedCredentialFactories ) class IPrincipal(Interface): pass class DavRealm(object): implements(portal.IRealm) def requestAvatar(self, avatarId, mind, *interfaces): if IPrincipal in interfaces: # Return the associated principal resources return IPrincipal, avatarId[0], avatarId[1] raise NotImplementedError("Only IPrincipal interface is supported") class IPrincipalCredentials(Interface): pass class PrincipalCredentials(object): implements(IPrincipalCredentials) def __init__(self, authnPrincipal, authzPrincipal, credentials): """ Initialize with both authentication and authorization values. Note that in most cases theses will be the same since HTTP auth makes no distinction between the two - but we may be layering some addition auth on top of this (.e.g.. proxy auth, cookies, forms etc) that make result in authentication and authorization being different. @param authnPrincipal: L{IDAVPrincipalResource} for the authenticated principal. @param authzPrincipal: L{IDAVPrincipalResource} for the authorized principal. @param credentials: L{ICredentials} for the authentication credentials. """ self.authnPrincipal = authnPrincipal self.authzPrincipal = authzPrincipal self.credentials = credentials def checkPassword(self, password): return self.credentials.checkPassword(password) class TwistedPropertyChecker(object): implements(checkers.ICredentialsChecker) credentialInterfaces = (IPrincipalCredentials,) def _cbPasswordMatch(self, matched, principalURIs): if matched: # We return both URIs return principalURIs else: raise error.UnauthorizedLogin("Bad credentials for: %s" % (principalURIs[0],)) def requestAvatarId(self, credentials): pcreds = IPrincipalCredentials(credentials) pswd = str(pcreds.authnPrincipal.readDeadProperty(TwistedPasswordProperty)) d = defer.maybeDeferred(credentials.checkPassword, pswd) d.addCallback(self._cbPasswordMatch, ( pcreds.authnPrincipal, pcreds.authzPrincipal, )) return d ## # Utilities ## class TwistedPasswordProperty (WebDAVTextElement): namespace = twisted_private_namespace name = "password" registerElement(TwistedPasswordProperty)
apache-2.0
rainslytherin/ansible
lib/ansible/runner/lookup_plugins/env.py
154
1282
# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from ansible import utils, errors from ansible.utils import template import os class LookupModule(object): def __init__(self, basedir=None, **kwargs): self.basedir = basedir def run(self, terms, inject=None, **kwargs): try: terms = template.template(self.basedir, terms, inject) except Exception, e: pass if isinstance(terms, basestring): terms = [ terms ] ret = [] for term in terms: var = term.split()[0] ret.append(os.getenv(var, '')) return ret
gpl-3.0
whn09/tensorflow
tensorflow/python/kernel_tests/morphological_ops_test.py
138
18619
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for morphological filtering operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import nn_ops import tensorflow.python.ops.nn_grad # pylint: disable=unused-import from tensorflow.python.platform import test class DilationTest(test.TestCase): def _VerifyValues(self, image, kernel, strides, rates, padding, out, use_gpu): """Verifies the output values of the dilation function. Args: image: Input tensor with shape: [batch, in_height, in_width, channels]. kernel: Filter tensor with shape: [filter_height, filter_width, channels]. strides: Output strides, specified as [stride_height, stride_width]. rates: Atrous rates, specified as [rate_height, rate_width]. padding: Padding type. out: Expected output. use_gpu: Whether we are running on GPU. """ strides = [1] + strides + [1] rates = [1] + rates + [1] with self.test_session(use_gpu=use_gpu): out_tensor = nn_ops.dilation2d( constant_op.constant(image), constant_op.constant(kernel), strides=strides, rates=rates, padding=padding, name="dilation2d") self.assertAllClose(out, out_tensor.eval()) def _testDilationValidPadding(self, use_gpu): # [1, 2, 2, 1] image = [[[[.1], [.2]], [[.3], [.4]]]] # [2, 2, 1] kernel = [[[.4], [.3]], [[.1], [.0]]] # [1, 1, 1, 1] out = [[[[.5]]]] self._VerifyValues( image, kernel, strides=[1, 1], rates=[1, 1], padding="VALID", out=out, use_gpu=use_gpu) def _testDilationSamePadding(self, use_gpu): # [1, 2, 2, 1] image = [[[[.1], [.2]], [[.3], [.4]]]] # [2, 2, 1] kernel = [[[.4], [.3]], [[.1], [.0]]] # [1, 2, 2, 1] out = [[[[.5], [.6]], [[.7], [.8]]]] self._VerifyValues( image, kernel, strides=[1, 1], rates=[1, 1], padding="SAME", out=out, use_gpu=use_gpu) def _testDilationSamePaddingDepth(self, use_gpu): # [1, 2, 2, 3] image = [[[[.1, .2, .0], [.2, .3, .1]], [[.3, .4, .2], [.4, .5, .3]]]] # [2, 2, 3] kernel = [[[.4, .5, .3], [.3, .4, .2]], [[.1, .2, .0], [.0, .1, -.1]]] # [1, 2, 2, 3] out = [[[[.5, .7, .3], [.6, .8, .4]], [[.7, .9, .5], [.8, 1., .6]]]] self._VerifyValues( image, kernel, strides=[1, 1], rates=[1, 1], padding="SAME", out=out, use_gpu=use_gpu) def _testDilationSamePaddingBatch(self, use_gpu): # [2, 2, 2, 1] image = [[[[.1], [.2]], [[.3], [.4]]], [[[.2], [.3]], [[.4], [.5]]]] # [2, 2, 1] kernel = [[[.4], [.3]], [[.1], [.0]]] # [2, 2, 2, 1] out = [[[[.5], [.6]], [[.7], [.8]]], [[[.6], [.7]], [[.8], [.9]]]] self._VerifyValues( image, kernel, strides=[1, 1], rates=[1, 1], padding="SAME", out=out, use_gpu=use_gpu) def _testDilationValidPaddingNonSquareWindow(self, use_gpu): # [1, 2, 2, 1] image = [[[[.1], [.2]], [[.3], [.4]]]] # [1, 2, 1] kernel = [[[.4], [.3]]] # [1, 2, 1, 1] out = [[[[.5]], [[.7]]]] self._VerifyValues( image, kernel, strides=[1, 1], rates=[1, 1], padding="VALID", out=out, use_gpu=use_gpu) def _testDilationSamePaddingRate(self, use_gpu): # [1, 3, 3, 1] image = [[[[.1], [.2], [.3]], [[.4], [.5], [.6]], [[.7], [.8], [.9]]]] # [2, 2, 1] kernel = [[[.4], [.3]], [[.1], [.2]]] # Because rate = 2, the effective kernel is [3, 3, 1]: # kernel_eff = [[[.4], [.0], [.3]], # [[.0], [.0], [.0]], # [[.1], [.0], [.2]]] # [1, 3, 3, 1] out = [[[[.7], [.8], [.6]], [[1.0], [1.1], [.9]], [[.8], [.9], [.9]]]] self._VerifyValues( image, kernel, strides=[1, 1], rates=[2, 2], padding="SAME", out=out, use_gpu=use_gpu) def _testDilationValidPaddingUnevenStride(self, use_gpu): # [1, 3, 3, 1] image = [[[[.1], [.2], [.3], [.4]], [[.5], [.6], [.7], [.8]], [[.9], [1.0], [1.1], [1.2]]]] # [2, 2, 1] kernel = [[[.4], [.3]], [[.1], [.2]]] # [1, 2, 2, 1] out = [[[[.8], [1.0]], [[1.2], [1.4]]]] self._VerifyValues( image, kernel, strides=[1, 2], rates=[1, 1], padding="VALID", out=out, use_gpu=use_gpu) def testDilation(self): for use_gpu in True, False: self._testDilationValidPadding(use_gpu) self._testDilationSamePadding(use_gpu) self._testDilationSamePaddingDepth(use_gpu) self._testDilationSamePaddingBatch(use_gpu) self._testDilationValidPaddingNonSquareWindow(use_gpu) self._testDilationSamePaddingRate(use_gpu) self._testDilationValidPaddingUnevenStride(use_gpu) def _ConstructAndTestGradient(self, image_shape, kernel_shape, strides, rates, padding, use_gpu): """Verifies the gradients of the dilation function. Args: image_shape: Input shape, [batch, in_height, in_width, channels]. kernel_shape: Filter shape, [filter_height, filter_width, channels]. strides: Output strides, specified as [stride_height, stride_width]. rates: Atrous rates, specified as [rate_height, rate_width]. padding: Padding type. use_gpu: Whether we are running on GPU. """ assert image_shape[3] == kernel_shape[2] np.random.seed(1) # Make it reproducible. image = np.random.random_sample(image_shape).astype(np.float32) kernel = np.random.random_sample(kernel_shape).astype(np.float32) image_init = np.random.random_sample(image_shape).astype(np.float32) kernel_init = np.random.random_sample(kernel_shape).astype(np.float32) strides = [1] + strides + [1] rates = [1] + rates + [1] with self.test_session(use_gpu=use_gpu): image_tensor = constant_op.constant( image, shape=image_shape, name="input") kernel_tensor = constant_op.constant( kernel, shape=kernel_shape, name="filter") out_tensor = nn_ops.dilation2d( image_tensor, kernel_tensor, strides=strides, rates=rates, padding=padding, name="dilation2d") out_shape = out_tensor.eval().shape # Small delta is necessary for argmax to remain the same. err = gradient_checker.compute_gradient_error( [image_tensor, kernel_tensor], [image_shape, kernel_shape], out_tensor, out_shape, [image_init, kernel_init], delta=1e-3) print("Dilation gradient error = %f" % err) self.assertLess(err, 1e-4) def _testDilationGradValidPadding_1x1x1(self, use_gpu): self._ConstructAndTestGradient( image_shape=[1, 3, 3, 1], kernel_shape=[1, 1, 1], strides=[1, 1], rates=[1, 1], padding="VALID", use_gpu=use_gpu) def _testDilationGradSamePadding_1x1x1(self, use_gpu): self._ConstructAndTestGradient( image_shape=[1, 3, 3, 1], kernel_shape=[1, 1, 1], strides=[1, 1], rates=[1, 1], padding="SAME", use_gpu=use_gpu) def _testDilationGradSamePadding_1x1x2(self, use_gpu): self._ConstructAndTestGradient( image_shape=[1, 3, 3, 2], kernel_shape=[1, 1, 2], strides=[1, 1], rates=[1, 1], padding="SAME", use_gpu=use_gpu) def _testDilationGradValidPadding_2x2x1(self, use_gpu): self._ConstructAndTestGradient( image_shape=[1, 3, 3, 1], kernel_shape=[2, 2, 1], strides=[1, 1], rates=[1, 1], padding="VALID", use_gpu=use_gpu) def _testDilationGradSamePadding_2x2x1(self, use_gpu): self._ConstructAndTestGradient( image_shape=[1, 3, 3, 1], kernel_shape=[2, 2, 1], strides=[1, 1], rates=[1, 1], padding="SAME", use_gpu=use_gpu) def _testDilationGradSamePaddingBatch_2x2x1(self, use_gpu): self._ConstructAndTestGradient( image_shape=[4, 3, 3, 1], kernel_shape=[2, 2, 1], strides=[1, 1], rates=[1, 1], padding="SAME", use_gpu=use_gpu) def _testDilationGradSamePadding_2x2x4(self, use_gpu): self._ConstructAndTestGradient( image_shape=[1, 3, 3, 4], kernel_shape=[2, 2, 4], strides=[1, 1], rates=[1, 1], padding="SAME", use_gpu=use_gpu) def testDilationGrad(self): for use_gpu in True, False: self._testDilationGradValidPadding_1x1x1(use_gpu) self._testDilationGradSamePadding_1x1x1(use_gpu) self._testDilationGradSamePadding_1x1x2(use_gpu) self._testDilationGradValidPadding_2x2x1(use_gpu) self._testDilationGradSamePadding_2x2x1(use_gpu) self._testDilationGradSamePaddingBatch_2x2x1(use_gpu) self._testDilationGradSamePadding_2x2x4(use_gpu) class ErosionTest(test.TestCase): def _VerifyValues(self, image, kernel, strides, rates, padding, out, use_gpu): """Verifies the output values of the erosion function. Args: image: Input tensor with shape: [batch, in_height, in_width, channels]. kernel: Filter tensor with shape: [filter_height, filter_width, channels]. strides: Output strides, specified as [stride_height, stride_width]. rates: Atrous rates, specified as [rate_height, rate_width]. padding: Padding type. out: Expected output. use_gpu: Whether we are running on GPU. """ strides = [1] + strides + [1] rates = [1] + rates + [1] with self.test_session(use_gpu=use_gpu): out_tensor = nn_ops.erosion2d( constant_op.constant(image), constant_op.constant(kernel), strides=strides, rates=rates, padding=padding, name="erosion2d") self.assertAllClose(out, out_tensor.eval()) def _testErosionValidPadding(self, use_gpu): # [1, 2, 2, 1] image = [[[[.1], [.2]], [[.3], [.4]]]] # [2, 2, 1] kernel = [[[.4], [.3]], [[.1], [.0]]] # [1, 1, 1, 1] out = [[[[.0]]]] self._VerifyValues( image, kernel, strides=[1, 1], rates=[1, 1], padding="VALID", out=out, use_gpu=use_gpu) def _testErosionSamePadding(self, use_gpu): # [1, 2, 2, 1] image = [[[[.1], [.2]], [[.3], [.4]]]] # [2, 2, 1] kernel = [[[.4], [.3]], [[.1], [.0]]] # [1, 2, 2, 1] out = [[[[.0], [.1]], [[.3], [.4]]]] self._VerifyValues( image, kernel, strides=[1, 1], rates=[1, 1], padding="SAME", out=out, use_gpu=use_gpu) def _testErosionSamePaddingDepth(self, use_gpu): # [1, 2, 2, 3] image = [[[[.1, .2, .0], [.2, .3, .1]], [[.3, .4, .2], [.4, .5, .3]]]] # [2, 2, 3] kernel = [[[.4, .5, .3], [.3, .4, .2]], [[.1, .2, .0], [.0, .1, -.1]]] # [1, 2, 2, 3] out = [[[[.0, .0, .0], [.1, .1, .1]], [[.3, .3, .3], [.4, .4, .4]]]] self._VerifyValues( image, kernel, strides=[1, 1], rates=[1, 1], padding="SAME", out=out, use_gpu=use_gpu) def _testErosionSamePaddingBatch(self, use_gpu): # [2, 2, 2, 1] image = [[[[.1], [.2]], [[.3], [.4]]], [[[.2], [.3]], [[.4], [.5]]]] # [2, 2, 1] kernel = [[[.4], [.3]], [[.1], [.0]]] # [2, 2, 2, 1] out = [[[[.0], [.1]], [[.3], [.4]]], [[[.1], [.2]], [[.4], [.5]]]] self._VerifyValues( image, kernel, strides=[1, 1], rates=[1, 1], padding="SAME", out=out, use_gpu=use_gpu) def _testErosionValidPaddingNonSquareWindow(self, use_gpu): # [1, 2, 2, 1] image = [[[[.1], [.2]], [[.3], [.4]]]] # [1, 2, 1] kernel = [[[.4], [.3]]] # [1, 2, 1, 1] out = [[[[-.2]], [[.0]]]] self._VerifyValues( image, kernel, strides=[1, 1], rates=[1, 1], padding="VALID", out=out, use_gpu=use_gpu) def _testErosionSamePaddingRate(self, use_gpu): # [1, 3, 3, 1] image = [[[[.1], [.2], [.3]], [[.4], [.5], [.6]], [[.7], [.8], [.9]]]] # [2, 2, 1] kernel = [[[.4], [.3]], [[.1], [.2]]] # Because rate = 2, the effective kernel is [3, 3, 1]: # kernel_eff = [[[.4], [.0], [.3]], # [[.0], [.0], [.0]], # [[.1], [.0], [.2]]] # [1, 3, 3, 1] out = [[[[.1], [.1], [.2]], [[0.1], [-.1], [.0]], [[.4], [.2], [.3]]]] self._VerifyValues( image, kernel, strides=[1, 1], rates=[2, 2], padding="SAME", out=out, use_gpu=use_gpu) def _testErosionValidPaddingUnevenStride(self, use_gpu): # [1, 3, 3, 1] image = [[[[.1], [.2], [.3], [.4]], [[.5], [.6], [.7], [.8]], [[.9], [1.0], [1.1], [1.2]]]] # [2, 2, 1] kernel = [[[.4], [.3]], [[.1], [.2]]] # [1, 2, 2, 1] out = [[[[-.1], [.1]], [[.3], [.5]]]] self._VerifyValues( image, kernel, strides=[1, 2], rates=[1, 1], padding="VALID", out=out, use_gpu=use_gpu) def testErosion(self): for use_gpu in True, False: self._testErosionValidPadding(use_gpu) self._testErosionSamePadding(use_gpu) self._testErosionSamePaddingDepth(use_gpu) self._testErosionSamePaddingBatch(use_gpu) self._testErosionValidPaddingNonSquareWindow(use_gpu) self._testErosionSamePaddingRate(use_gpu) self._testErosionValidPaddingUnevenStride(use_gpu) def _ConstructAndTestGradient(self, image_shape, kernel_shape, strides, rates, padding, use_gpu): """Verifies the gradients of the erosion function. Args: image_shape: Input shape, [batch, in_height, in_width, channels]. kernel_shape: Filter shape, [filter_height, filter_width, channels]. strides: Output strides, specified as [stride_height, stride_width]. rates: Atrous rates, specified as [rate_height, rate_width]. padding: Padding type. use_gpu: Whether we are running on GPU. """ assert image_shape[3] == kernel_shape[2] np.random.seed(1) # Make it reproducible. image = np.random.random_sample(image_shape).astype(np.float32) kernel = np.random.random_sample(kernel_shape).astype(np.float32) image_init = np.random.random_sample(image_shape).astype(np.float32) kernel_init = np.random.random_sample(kernel_shape).astype(np.float32) strides = [1] + strides + [1] rates = [1] + rates + [1] with self.test_session(use_gpu=use_gpu): image_tensor = constant_op.constant( image, shape=image_shape, name="input") kernel_tensor = constant_op.constant( kernel, shape=kernel_shape, name="filter") out_tensor = nn_ops.erosion2d( image_tensor, kernel_tensor, strides=strides, rates=rates, padding=padding, name="erosion2d") out_shape = out_tensor.eval().shape # Small delta is necessary for argmax to remain the same. err = gradient_checker.compute_gradient_error( [image_tensor, kernel_tensor], [image_shape, kernel_shape], out_tensor, out_shape, [image_init, kernel_init], delta=1e-3) print("Erosion gradient error = %f" % err) self.assertLess(err, 1e-4) def _testErosionGradValidPadding_1x1x1(self, use_gpu): self._ConstructAndTestGradient( image_shape=[1, 3, 3, 1], kernel_shape=[1, 1, 1], strides=[1, 1], rates=[1, 1], padding="VALID", use_gpu=use_gpu) def _testErosionGradSamePadding_1x1x1(self, use_gpu): self._ConstructAndTestGradient( image_shape=[1, 3, 3, 1], kernel_shape=[1, 1, 1], strides=[1, 1], rates=[1, 1], padding="SAME", use_gpu=use_gpu) def _testErosionGradSamePadding_1x1x2(self, use_gpu): self._ConstructAndTestGradient( image_shape=[1, 3, 3, 2], kernel_shape=[1, 1, 2], strides=[1, 1], rates=[1, 1], padding="SAME", use_gpu=use_gpu) def _testErosionGradValidPadding_2x2x1(self, use_gpu): self._ConstructAndTestGradient( image_shape=[1, 3, 3, 1], kernel_shape=[2, 2, 1], strides=[1, 1], rates=[1, 1], padding="VALID", use_gpu=use_gpu) def _testErosionGradSamePadding_2x2x1(self, use_gpu): self._ConstructAndTestGradient( image_shape=[1, 3, 3, 1], kernel_shape=[2, 2, 1], strides=[1, 1], rates=[1, 1], padding="SAME", use_gpu=use_gpu) def _testErosionGradSamePaddingBatch_2x2x1(self, use_gpu): self._ConstructAndTestGradient( image_shape=[4, 3, 3, 1], kernel_shape=[2, 2, 1], strides=[1, 1], rates=[1, 1], padding="SAME", use_gpu=use_gpu) def _testErosionGradSamePadding_2x2x4(self, use_gpu): self._ConstructAndTestGradient( image_shape=[1, 3, 3, 4], kernel_shape=[2, 2, 4], strides=[1, 1], rates=[1, 1], padding="SAME", use_gpu=use_gpu) def testErosionGrad(self): for use_gpu in True, False: self._testErosionGradValidPadding_1x1x1(use_gpu) self._testErosionGradSamePadding_1x1x1(use_gpu) self._testErosionGradSamePadding_1x1x2(use_gpu) self._testErosionGradValidPadding_2x2x1(use_gpu) self._testErosionGradSamePadding_2x2x1(use_gpu) self._testErosionGradSamePaddingBatch_2x2x1(use_gpu) self._testErosionGradSamePadding_2x2x4(use_gpu) if __name__ == "__main__": test.main()
apache-2.0
Ilhasoft/ureport
ureport/news/views.py
2
4300
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals from dash.categories.fields import CategoryChoiceField from dash.categories.models import Category from dash.orgs.views import OrgObjPermsMixin, OrgPermsMixin from smartmin.views import SmartCreateView, SmartCRUDL, SmartListView, SmartUpdateView from django import forms from .models import NewsItem, Video class NewsForm(forms.ModelForm): category = CategoryChoiceField(Category.objects.none()) def __init__(self, *args, **kwargs): self.org = kwargs["org"] del kwargs["org"] super(NewsForm, self).__init__(*args, **kwargs) self.fields["category"].queryset = Category.objects.filter(org=self.org).order_by("name") class Meta: model = NewsItem fields = ("is_active", "title", "description", "link", "category", "org") class VideoForm(forms.ModelForm): category = CategoryChoiceField(Category.objects.none()) def __init__(self, *args, **kwargs): self.org = kwargs["org"] del kwargs["org"] super(VideoForm, self).__init__(*args, **kwargs) self.fields["category"].queryset = Category.objects.filter(org=self.org).order_by("name") class Meta: model = Video fields = ("is_active", "title", "description", "video_id", "category", "org") class NewsItemCRUDL(SmartCRUDL): model = NewsItem actions = ("create", "update", "list") class Update(OrgObjPermsMixin, SmartUpdateView): form_class = NewsForm fields = ("is_active", "title", "description", "link", "category") def get_form_kwargs(self): kwargs = super(NewsItemCRUDL.Update, self).get_form_kwargs() kwargs["org"] = self.request.org return kwargs class List(OrgPermsMixin, SmartListView): fields = ("title", "link", "category") ordering = ("-pk",) def get_queryset(self, **kwargs): queryset = super(NewsItemCRUDL.List, self).get_queryset(**kwargs) queryset = queryset.filter(org=self.derive_org()) return queryset class Create(OrgPermsMixin, SmartCreateView): form_class = NewsForm def get_form_kwargs(self): kwargs = super(NewsItemCRUDL.Create, self).get_form_kwargs() kwargs["org"] = self.request.org return kwargs def derive_fields(self): if self.request.user.is_superuser: return ("title", "description", "link", "category", "org") return ("title", "description", "link", "category") def pre_save(self, obj): obj = super(NewsItemCRUDL.Create, self).pre_save(obj) org = self.derive_org() if org: obj.org = org return obj class VideoCRUDL(SmartCRUDL): model = Video actions = ("create", "update", "list") class Update(OrgObjPermsMixin, SmartUpdateView): form_class = VideoForm fields = ("is_active", "title", "description", "video_id", "category") def get_form_kwargs(self): kwargs = super(VideoCRUDL.Update, self).get_form_kwargs() kwargs["org"] = self.request.org return kwargs class List(OrgPermsMixin, SmartListView): fields = ("title", "video_id", "category") ordering = ("-pk",) def get_queryset(self, **kwargs): queryset = super(VideoCRUDL.List, self).get_queryset(**kwargs) queryset = queryset.filter(org=self.derive_org()) return queryset class Create(OrgPermsMixin, SmartCreateView): form_class = VideoForm def get_form_kwargs(self): kwargs = super(VideoCRUDL.Create, self).get_form_kwargs() kwargs["org"] = self.request.org return kwargs def derive_fields(self): if self.request.user.is_superuser: return ("title", "description", "video_id", "category", "org") return ("title", "description", "video_id", "category") def pre_save(self, obj): obj = super(VideoCRUDL.Create, self).pre_save(obj) org = self.derive_org() if org: obj.org = org return obj
agpl-3.0
isaksky/selenium
py/selenium/webdriver/ie/webdriver.py
55
2093
#!/usr/bin/python # # Copyright 2008-2013 Software freedom conservancy # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from selenium.webdriver.common import utils from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium.webdriver.remote.command import Command from selenium.common.exceptions import WebDriverException import base64 from .service import Service DEFAULT_TIMEOUT = 30 DEFAULT_PORT = 0 DEFAULT_HOST = None DEFAULT_LOG_LEVEL = None DEFAULT_LOG_FILE = None class WebDriver(RemoteWebDriver): def __init__(self, executable_path='IEDriverServer.exe', capabilities=None, port=DEFAULT_PORT, timeout=DEFAULT_TIMEOUT, host=DEFAULT_HOST, log_level=DEFAULT_LOG_LEVEL, log_file=DEFAULT_LOG_FILE): self.port = port if self.port == 0: self.port = utils.free_port() self.host = host self.log_level = log_level self.log_file = log_file self.iedriver = Service(executable_path, port=self.port, host=self.host, log_level=self.log_level, log_file=self.log_file) self.iedriver.start() if capabilities is None: capabilities = DesiredCapabilities.INTERNETEXPLORER RemoteWebDriver.__init__( self, command_executor='http://localhost:%d' % self.port, desired_capabilities=capabilities) self._is_remote = False def quit(self): RemoteWebDriver.quit(self) self.iedriver.stop()
apache-2.0
mccheung/kbengine
kbe/res/scripts/common/Lib/idlelib/PyParse.py
70
20491
import re import sys from collections import Mapping from functools import partial # Reason last stmt is continued (or C_NONE if it's not). (C_NONE, C_BACKSLASH, C_STRING_FIRST_LINE, C_STRING_NEXT_LINES, C_BRACKET) = range(5) if 0: # for throwaway debugging output def dump(*stuff): sys.__stdout__.write(" ".join(map(str, stuff)) + "\n") # Find what looks like the start of a popular stmt. _synchre = re.compile(r""" ^ [ \t]* (?: while | else | def | return | assert | break | class | continue | elif | try | except | raise | import | yield ) \b """, re.VERBOSE | re.MULTILINE).search # Match blank line or non-indenting comment line. _junkre = re.compile(r""" [ \t]* (?: \# \S .* )? \n """, re.VERBOSE).match # Match any flavor of string; the terminating quote is optional # so that we're robust in the face of incomplete program text. _match_stringre = re.compile(r""" \""" [^"\\]* (?: (?: \\. | "(?!"") ) [^"\\]* )* (?: \""" )? | " [^"\\\n]* (?: \\. [^"\\\n]* )* "? | ''' [^'\\]* (?: (?: \\. | '(?!'') ) [^'\\]* )* (?: ''' )? | ' [^'\\\n]* (?: \\. [^'\\\n]* )* '? """, re.VERBOSE | re.DOTALL).match # Match a line that starts with something interesting; # used to find the first item of a bracket structure. _itemre = re.compile(r""" [ \t]* [^\s#\\] # if we match, m.end()-1 is the interesting char """, re.VERBOSE).match # Match start of stmts that should be followed by a dedent. _closere = re.compile(r""" \s* (?: return | break | continue | raise | pass ) \b """, re.VERBOSE).match # Chew up non-special chars as quickly as possible. If match is # successful, m.end() less 1 is the index of the last boring char # matched. If match is unsuccessful, the string starts with an # interesting char. _chew_ordinaryre = re.compile(r""" [^[\](){}#'"\\]+ """, re.VERBOSE).match class StringTranslatePseudoMapping(Mapping): r"""Utility class to be used with str.translate() This Mapping class wraps a given dict. When a value for a key is requested via __getitem__() or get(), the key is looked up in the given dict. If found there, the value from the dict is returned. Otherwise, the default value given upon initialization is returned. This allows using str.translate() to make some replacements, and to replace all characters for which no replacement was specified with a given character instead of leaving them as-is. For example, to replace everything except whitespace with 'x': >>> whitespace_chars = ' \t\n\r' >>> preserve_dict = {ord(c): ord(c) for c in whitespace_chars} >>> mapping = StringTranslatePseudoMapping(preserve_dict, ord('x')) >>> text = "a + b\tc\nd" >>> text.translate(mapping) 'x x x\tx\nx' """ def __init__(self, non_defaults, default_value): self._non_defaults = non_defaults self._default_value = default_value def _get(key, _get=non_defaults.get, _default=default_value): return _get(key, _default) self._get = _get def __getitem__(self, item): return self._get(item) def __len__(self): return len(self._non_defaults) def __iter__(self): return iter(self._non_defaults) def get(self, key, default=None): return self._get(key) class Parser: def __init__(self, indentwidth, tabwidth): self.indentwidth = indentwidth self.tabwidth = tabwidth def set_str(self, s): assert len(s) == 0 or s[-1] == '\n' self.str = s self.study_level = 0 # Return index of a good place to begin parsing, as close to the # end of the string as possible. This will be the start of some # popular stmt like "if" or "def". Return None if none found: # the caller should pass more prior context then, if possible, or # if not (the entire program text up until the point of interest # has already been tried) pass 0 to set_lo. # # This will be reliable iff given a reliable is_char_in_string # function, meaning that when it says "no", it's absolutely # guaranteed that the char is not in a string. def find_good_parse_start(self, is_char_in_string=None, _synchre=_synchre): str, pos = self.str, None if not is_char_in_string: # no clue -- make the caller pass everything return None # Peek back from the end for a good place to start, # but don't try too often; pos will be left None, or # bumped to a legitimate synch point. limit = len(str) for tries in range(5): i = str.rfind(":\n", 0, limit) if i < 0: break i = str.rfind('\n', 0, i) + 1 # start of colon line m = _synchre(str, i, limit) if m and not is_char_in_string(m.start()): pos = m.start() break limit = i if pos is None: # Nothing looks like a block-opener, or stuff does # but is_char_in_string keeps returning true; most likely # we're in or near a giant string, the colorizer hasn't # caught up enough to be helpful, or there simply *aren't* # any interesting stmts. In any of these cases we're # going to have to parse the whole thing to be sure, so # give it one last try from the start, but stop wasting # time here regardless of the outcome. m = _synchre(str) if m and not is_char_in_string(m.start()): pos = m.start() return pos # Peeking back worked; look forward until _synchre no longer # matches. i = pos + 1 while 1: m = _synchre(str, i) if m: s, i = m.span() if not is_char_in_string(s): pos = s else: break return pos # Throw away the start of the string. Intended to be called with # find_good_parse_start's result. def set_lo(self, lo): assert lo == 0 or self.str[lo-1] == '\n' if lo > 0: self.str = self.str[lo:] # Build a translation table to map uninteresting chars to 'x', open # brackets to '(', close brackets to ')' while preserving quotes, # backslashes, newlines and hashes. This is to be passed to # str.translate() in _study1(). _tran = {} _tran.update((ord(c), ord('(')) for c in "({[") _tran.update((ord(c), ord(')')) for c in ")}]") _tran.update((ord(c), ord(c)) for c in "\"'\\\n#") _tran = StringTranslatePseudoMapping(_tran, default_value=ord('x')) # As quickly as humanly possible <wink>, find the line numbers (0- # based) of the non-continuation lines. # Creates self.{goodlines, continuation}. def _study1(self): if self.study_level >= 1: return self.study_level = 1 # Map all uninteresting characters to "x", all open brackets # to "(", all close brackets to ")", then collapse runs of # uninteresting characters. This can cut the number of chars # by a factor of 10-40, and so greatly speed the following loop. str = self.str str = str.translate(self._tran) str = str.replace('xxxxxxxx', 'x') str = str.replace('xxxx', 'x') str = str.replace('xx', 'x') str = str.replace('xx', 'x') str = str.replace('\nx', '\n') # note that replacing x\n with \n would be incorrect, because # x may be preceded by a backslash # March over the squashed version of the program, accumulating # the line numbers of non-continued stmts, and determining # whether & why the last stmt is a continuation. continuation = C_NONE level = lno = 0 # level is nesting level; lno is line number self.goodlines = goodlines = [0] push_good = goodlines.append i, n = 0, len(str) while i < n: ch = str[i] i = i+1 # cases are checked in decreasing order of frequency if ch == 'x': continue if ch == '\n': lno = lno + 1 if level == 0: push_good(lno) # else we're in an unclosed bracket structure continue if ch == '(': level = level + 1 continue if ch == ')': if level: level = level - 1 # else the program is invalid, but we can't complain continue if ch == '"' or ch == "'": # consume the string quote = ch if str[i-1:i+2] == quote * 3: quote = quote * 3 firstlno = lno w = len(quote) - 1 i = i+w while i < n: ch = str[i] i = i+1 if ch == 'x': continue if str[i-1:i+w] == quote: i = i+w break if ch == '\n': lno = lno + 1 if w == 0: # unterminated single-quoted string if level == 0: push_good(lno) break continue if ch == '\\': assert i < n if str[i] == '\n': lno = lno + 1 i = i+1 continue # else comment char or paren inside string else: # didn't break out of the loop, so we're still # inside a string if (lno - 1) == firstlno: # before the previous \n in str, we were in the first # line of the string continuation = C_STRING_FIRST_LINE else: continuation = C_STRING_NEXT_LINES continue # with outer loop if ch == '#': # consume the comment i = str.find('\n', i) assert i >= 0 continue assert ch == '\\' assert i < n if str[i] == '\n': lno = lno + 1 if i+1 == n: continuation = C_BACKSLASH i = i+1 # The last stmt may be continued for all 3 reasons. # String continuation takes precedence over bracket # continuation, which beats backslash continuation. if (continuation != C_STRING_FIRST_LINE and continuation != C_STRING_NEXT_LINES and level > 0): continuation = C_BRACKET self.continuation = continuation # Push the final line number as a sentinel value, regardless of # whether it's continued. assert (continuation == C_NONE) == (goodlines[-1] == lno) if goodlines[-1] != lno: push_good(lno) def get_continuation_type(self): self._study1() return self.continuation # study1 was sufficient to determine the continuation status, # but doing more requires looking at every character. study2 # does this for the last interesting statement in the block. # Creates: # self.stmt_start, stmt_end # slice indices of last interesting stmt # self.stmt_bracketing # the bracketing structure of the last interesting stmt; # for example, for the statement "say(boo) or die", stmt_bracketing # will be [(0, 0), (3, 1), (8, 0)]. Strings and comments are # treated as brackets, for the matter. # self.lastch # last non-whitespace character before optional trailing # comment # self.lastopenbracketpos # if continuation is C_BRACKET, index of last open bracket def _study2(self): if self.study_level >= 2: return self._study1() self.study_level = 2 # Set p and q to slice indices of last interesting stmt. str, goodlines = self.str, self.goodlines i = len(goodlines) - 1 p = len(str) # index of newest line while i: assert p # p is the index of the stmt at line number goodlines[i]. # Move p back to the stmt at line number goodlines[i-1]. q = p for nothing in range(goodlines[i-1], goodlines[i]): # tricky: sets p to 0 if no preceding newline p = str.rfind('\n', 0, p-1) + 1 # The stmt str[p:q] isn't a continuation, but may be blank # or a non-indenting comment line. if _junkre(str, p): i = i-1 else: break if i == 0: # nothing but junk! assert p == 0 q = p self.stmt_start, self.stmt_end = p, q # Analyze this stmt, to find the last open bracket (if any) # and last interesting character (if any). lastch = "" stack = [] # stack of open bracket indices push_stack = stack.append bracketing = [(p, 0)] while p < q: # suck up all except ()[]{}'"#\\ m = _chew_ordinaryre(str, p, q) if m: # we skipped at least one boring char newp = m.end() # back up over totally boring whitespace i = newp - 1 # index of last boring char while i >= p and str[i] in " \t\n": i = i-1 if i >= p: lastch = str[i] p = newp if p >= q: break ch = str[p] if ch in "([{": push_stack(p) bracketing.append((p, len(stack))) lastch = ch p = p+1 continue if ch in ")]}": if stack: del stack[-1] lastch = ch p = p+1 bracketing.append((p, len(stack))) continue if ch == '"' or ch == "'": # consume string # Note that study1 did this with a Python loop, but # we use a regexp here; the reason is speed in both # cases; the string may be huge, but study1 pre-squashed # strings to a couple of characters per line. study1 # also needed to keep track of newlines, and we don't # have to. bracketing.append((p, len(stack)+1)) lastch = ch p = _match_stringre(str, p, q).end() bracketing.append((p, len(stack))) continue if ch == '#': # consume comment and trailing newline bracketing.append((p, len(stack)+1)) p = str.find('\n', p, q) + 1 assert p > 0 bracketing.append((p, len(stack))) continue assert ch == '\\' p = p+1 # beyond backslash assert p < q if str[p] != '\n': # the program is invalid, but can't complain lastch = ch + str[p] p = p+1 # beyond escaped char # end while p < q: self.lastch = lastch if stack: self.lastopenbracketpos = stack[-1] self.stmt_bracketing = tuple(bracketing) # Assuming continuation is C_BRACKET, return the number # of spaces the next line should be indented. def compute_bracket_indent(self): self._study2() assert self.continuation == C_BRACKET j = self.lastopenbracketpos str = self.str n = len(str) origi = i = str.rfind('\n', 0, j) + 1 j = j+1 # one beyond open bracket # find first list item; set i to start of its line while j < n: m = _itemre(str, j) if m: j = m.end() - 1 # index of first interesting char extra = 0 break else: # this line is junk; advance to next line i = j = str.find('\n', j) + 1 else: # nothing interesting follows the bracket; # reproduce the bracket line's indentation + a level j = i = origi while str[j] in " \t": j = j+1 extra = self.indentwidth return len(str[i:j].expandtabs(self.tabwidth)) + extra # Return number of physical lines in last stmt (whether or not # it's an interesting stmt! this is intended to be called when # continuation is C_BACKSLASH). def get_num_lines_in_stmt(self): self._study1() goodlines = self.goodlines return goodlines[-1] - goodlines[-2] # Assuming continuation is C_BACKSLASH, return the number of spaces # the next line should be indented. Also assuming the new line is # the first one following the initial line of the stmt. def compute_backslash_indent(self): self._study2() assert self.continuation == C_BACKSLASH str = self.str i = self.stmt_start while str[i] in " \t": i = i+1 startpos = i # See whether the initial line starts an assignment stmt; i.e., # look for an = operator endpos = str.find('\n', startpos) + 1 found = level = 0 while i < endpos: ch = str[i] if ch in "([{": level = level + 1 i = i+1 elif ch in ")]}": if level: level = level - 1 i = i+1 elif ch == '"' or ch == "'": i = _match_stringre(str, i, endpos).end() elif ch == '#': break elif level == 0 and ch == '=' and \ (i == 0 or str[i-1] not in "=<>!") and \ str[i+1] != '=': found = 1 break else: i = i+1 if found: # found a legit =, but it may be the last interesting # thing on the line i = i+1 # move beyond the = found = re.match(r"\s*\\", str[i:endpos]) is None if not found: # oh well ... settle for moving beyond the first chunk # of non-whitespace chars i = startpos while str[i] not in " \t\n": i = i+1 return len(str[self.stmt_start:i].expandtabs(\ self.tabwidth)) + 1 # Return the leading whitespace on the initial line of the last # interesting stmt. def get_base_indent_string(self): self._study2() i, n = self.stmt_start, self.stmt_end j = i str = self.str while j < n and str[j] in " \t": j = j + 1 return str[i:j] # Did the last interesting stmt open a block? def is_block_opener(self): self._study2() return self.lastch == ':' # Did the last interesting stmt close a block? def is_block_closer(self): self._study2() return _closere(self.str, self.stmt_start) is not None # index of last open bracket ({[, or None if none lastopenbracketpos = None def get_last_open_bracket_pos(self): self._study2() return self.lastopenbracketpos # the structure of the bracketing of the last interesting statement, # in the format defined in _study2, or None if the text didn't contain # anything stmt_bracketing = None def get_last_stmt_bracketing(self): self._study2() return self.stmt_bracketing
lgpl-3.0
pavle-batuta/djangoTDD
lists/migrations/0001_initial.py
1
1038
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Item', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')), ('text', models.TextField(default='')), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='List', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')), ], options={ }, bases=(models.Model,), ), migrations.AddField( model_name='item', name='list', field=models.ForeignKey(default=None, to='lists.List'), preserve_default=True, ), ]
mit
h2oai/sparkling-water
py-scoring/src/ai/h2o/sparkling/ml/params/HasGamColsOnMOJO.py
1
1024
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from ai.h2o.sparkling.ml.params.H2OTypeConverters import H2OTypeConverters class HasGamColsOnMOJO: def getGamCols(self): value = self._java_obj.getGamCols() return H2OTypeConverters.toNullableListListString()(value)
apache-2.0