repo_name
stringlengths
7
65
path
stringlengths
5
185
copies
stringlengths
1
4
size
stringlengths
4
6
content
stringlengths
977
990k
license
stringclasses
14 values
hash
stringlengths
32
32
line_mean
float64
7.18
99.4
line_max
int64
31
999
alpha_frac
float64
0.25
0.95
ratio
float64
1.5
7.84
autogenerated
bool
1 class
config_or_test
bool
2 classes
has_no_keywords
bool
2 classes
has_few_assignments
bool
1 class
all-of-us/raw-data-repository
rdr_service/lib_fhir/fhirclient_3_0_0/models/valueset.py
1
21281
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/ValueSet) on 2017-03-22. # 2017, SMART Health IT. from . import domainresource class ValueSet(domainresource.DomainResource): """ A set of codes drawn from one or more code systems. A value set specifies a set of codes drawn from one or more code systems. """ resource_type = "ValueSet" def __init__(self, jsondict=None, strict=True): """ Initialize all valid properties. :raises: FHIRValidationError on validation errors, unless strict is False :param dict jsondict: A JSON dictionary to use for initialization :param bool strict: If True (the default), invalid variables will raise a TypeError """ self.compose = None """ Definition of the content of the value set (CLD). Type `ValueSetCompose` (represented as `dict` in JSON). """ self.contact = None """ Contact details for the publisher. List of `ContactDetail` items (represented as `dict` in JSON). """ self.copyright = None """ Use and/or publishing restrictions. Type `str`. """ self.date = None """ Date this was last changed. Type `FHIRDate` (represented as `str` in JSON). """ self.description = None """ Natural language description of the value set. Type `str`. """ self.expansion = None """ Used when the value set is "expanded". Type `ValueSetExpansion` (represented as `dict` in JSON). """ self.experimental = None """ For testing purposes, not real usage. Type `bool`. """ self.extensible = None """ Whether this is intended to be used with an extensible binding. Type `bool`. """ self.identifier = None """ Additional identifier for the value set. List of `Identifier` items (represented as `dict` in JSON). """ self.immutable = None """ Indicates whether or not any change to the content logical definition may occur. Type `bool`. """ self.jurisdiction = None """ Intended jurisdiction for value set (if applicable). List of `CodeableConcept` items (represented as `dict` in JSON). """ self.name = None """ Name for this value set (computer friendly). Type `str`. """ self.publisher = None """ Name of the publisher (organization or individual). Type `str`. """ self.purpose = None """ Why this value set is defined. Type `str`. """ self.status = None """ draft | active | retired | unknown. Type `str`. """ self.title = None """ Name for this value set (human friendly). Type `str`. """ self.url = None """ Logical URI to reference this value set (globally unique). Type `str`. """ self.useContext = None """ Context the content is intended to support. List of `UsageContext` items (represented as `dict` in JSON). """ self.version = None """ Business version of the value set. Type `str`. """ super(ValueSet, self).__init__(jsondict=jsondict, strict=strict) def elementProperties(self): js = super(ValueSet, self).elementProperties() js.extend([ ("compose", "compose", ValueSetCompose, False, None, False), ("contact", "contact", contactdetail.ContactDetail, True, None, False), ("copyright", "copyright", str, False, None, False), ("date", "date", fhirdate.FHIRDate, False, None, False), ("description", "description", str, False, None, False), ("expansion", "expansion", ValueSetExpansion, False, None, False), ("experimental", "experimental", bool, False, None, False), ("extensible", "extensible", bool, False, None, False), ("identifier", "identifier", identifier.Identifier, True, None, False), ("immutable", "immutable", bool, False, None, False), ("jurisdiction", "jurisdiction", codeableconcept.CodeableConcept, True, None, False), ("name", "name", str, False, None, False), ("publisher", "publisher", str, False, None, False), ("purpose", "purpose", str, False, None, False), ("status", "status", str, False, None, True), ("title", "title", str, False, None, False), ("url", "url", str, False, None, False), ("useContext", "useContext", usagecontext.UsageContext, True, None, False), ("version", "version", str, False, None, False), ]) return js from . import backboneelement class ValueSetCompose(backboneelement.BackboneElement): """ Definition of the content of the value set (CLD). A set of criteria that define the content logical definition of the value set by including or excluding codes from outside this value set. This I also known as the "Content Logical Definition" (CLD). """ resource_type = "ValueSetCompose" def __init__(self, jsondict=None, strict=True): """ Initialize all valid properties. :raises: FHIRValidationError on validation errors, unless strict is False :param dict jsondict: A JSON dictionary to use for initialization :param bool strict: If True (the default), invalid variables will raise a TypeError """ self.exclude = None """ Explicitly exclude codes from a code system or other value sets. List of `ValueSetComposeInclude` items (represented as `dict` in JSON). """ self.inactive = None """ Whether inactive codes are in the value set. Type `bool`. """ self.include = None """ Include one or more codes from a code system or other value set(s). List of `ValueSetComposeInclude` items (represented as `dict` in JSON). """ self.lockedDate = None """ Fixed date for version-less references (transitive). Type `FHIRDate` (represented as `str` in JSON). """ super(ValueSetCompose, self).__init__(jsondict=jsondict, strict=strict) def elementProperties(self): js = super(ValueSetCompose, self).elementProperties() js.extend([ ("exclude", "exclude", ValueSetComposeInclude, True, None, False), ("inactive", "inactive", bool, False, None, False), ("include", "include", ValueSetComposeInclude, True, None, True), ("lockedDate", "lockedDate", fhirdate.FHIRDate, False, None, False), ]) return js class ValueSetComposeInclude(backboneelement.BackboneElement): """ Include one or more codes from a code system or other value set(s). """ resource_type = "ValueSetComposeInclude" def __init__(self, jsondict=None, strict=True): """ Initialize all valid properties. :raises: FHIRValidationError on validation errors, unless strict is False :param dict jsondict: A JSON dictionary to use for initialization :param bool strict: If True (the default), invalid variables will raise a TypeError """ self.concept = None """ A concept defined in the system. List of `ValueSetComposeIncludeConcept` items (represented as `dict` in JSON). """ self.filter = None """ Select codes/concepts by their properties (including relationships). List of `ValueSetComposeIncludeFilter` items (represented as `dict` in JSON). """ self.system = None """ The system the codes come from. Type `str`. """ self.valueSet = None """ Select only contents included in this value set. List of `str` items. """ self.version = None """ Specific version of the code system referred to. Type `str`. """ super(ValueSetComposeInclude, self).__init__(jsondict=jsondict, strict=strict) def elementProperties(self): js = super(ValueSetComposeInclude, self).elementProperties() js.extend([ ("concept", "concept", ValueSetComposeIncludeConcept, True, None, False), ("filter", "filter", ValueSetComposeIncludeFilter, True, None, False), ("system", "system", str, False, None, False), ("valueSet", "valueSet", str, True, None, False), ("version", "version", str, False, None, False), ]) return js class ValueSetComposeIncludeConcept(backboneelement.BackboneElement): """ A concept defined in the system. Specifies a concept to be included or excluded. """ resource_type = "ValueSetComposeIncludeConcept" def __init__(self, jsondict=None, strict=True): """ Initialize all valid properties. :raises: FHIRValidationError on validation errors, unless strict is False :param dict jsondict: A JSON dictionary to use for initialization :param bool strict: If True (the default), invalid variables will raise a TypeError """ self.code = None """ Code or expression from system. Type `str`. """ self.designation = None """ Additional representations for this concept. List of `ValueSetComposeIncludeConceptDesignation` items (represented as `dict` in JSON). """ self.display = None """ Text to display for this code for this value set in this valueset. Type `str`. """ super(ValueSetComposeIncludeConcept, self).__init__(jsondict=jsondict, strict=strict) def elementProperties(self): js = super(ValueSetComposeIncludeConcept, self).elementProperties() js.extend([ ("code", "code", str, False, None, True), ("designation", "designation", ValueSetComposeIncludeConceptDesignation, True, None, False), ("display", "display", str, False, None, False), ]) return js class ValueSetComposeIncludeConceptDesignation(backboneelement.BackboneElement): """ Additional representations for this concept. Additional representations for this concept when used in this value set - other languages, aliases, specialized purposes, used for particular purposes, etc. """ resource_type = "ValueSetComposeIncludeConceptDesignation" def __init__(self, jsondict=None, strict=True): """ Initialize all valid properties. :raises: FHIRValidationError on validation errors, unless strict is False :param dict jsondict: A JSON dictionary to use for initialization :param bool strict: If True (the default), invalid variables will raise a TypeError """ self.language = None """ Human language of the designation. Type `str`. """ self.use = None """ Details how this designation would be used. Type `Coding` (represented as `dict` in JSON). """ self.value = None """ The text value for this designation. Type `str`. """ super(ValueSetComposeIncludeConceptDesignation, self).__init__(jsondict=jsondict, strict=strict) def elementProperties(self): js = super(ValueSetComposeIncludeConceptDesignation, self).elementProperties() js.extend([ ("language", "language", str, False, None, False), ("use", "use", coding.Coding, False, None, False), ("value", "value", str, False, None, True), ]) return js class ValueSetComposeIncludeFilter(backboneelement.BackboneElement): """ Select codes/concepts by their properties (including relationships). Select concepts by specify a matching criteria based on the properties (including relationships) defined by the system. If multiple filters are specified, they SHALL all be true. """ resource_type = "ValueSetComposeIncludeFilter" def __init__(self, jsondict=None, strict=True): """ Initialize all valid properties. :raises: FHIRValidationError on validation errors, unless strict is False :param dict jsondict: A JSON dictionary to use for initialization :param bool strict: If True (the default), invalid variables will raise a TypeError """ self.op = None """ = | is-a | descendent-of | is-not-a | regex | in | not-in | generalizes | exists. Type `str`. """ self.property = None """ A property defined by the code system. Type `str`. """ self.value = None """ Code from the system, or regex criteria, or boolean value for exists. Type `str`. """ super(ValueSetComposeIncludeFilter, self).__init__(jsondict=jsondict, strict=strict) def elementProperties(self): js = super(ValueSetComposeIncludeFilter, self).elementProperties() js.extend([ ("op", "op", str, False, None, True), ("property", "property", str, False, None, True), ("value", "value", str, False, None, True), ]) return js class ValueSetExpansion(backboneelement.BackboneElement): """ Used when the value set is "expanded". A value set can also be "expanded", where the value set is turned into a simple collection of enumerated codes. This element holds the expansion, if it has been performed. """ resource_type = "ValueSetExpansion" def __init__(self, jsondict=None, strict=True): """ Initialize all valid properties. :raises: FHIRValidationError on validation errors, unless strict is False :param dict jsondict: A JSON dictionary to use for initialization :param bool strict: If True (the default), invalid variables will raise a TypeError """ self.contains = None """ Codes in the value set. List of `ValueSetExpansionContains` items (represented as `dict` in JSON). """ self.identifier = None """ Uniquely identifies this expansion. Type `str`. """ self.offset = None """ Offset at which this resource starts. Type `int`. """ self.parameter = None """ Parameter that controlled the expansion process. List of `ValueSetExpansionParameter` items (represented as `dict` in JSON). """ self.timestamp = None """ Time ValueSet expansion happened. Type `FHIRDate` (represented as `str` in JSON). """ self.total = None """ Total number of codes in the expansion. Type `int`. """ super(ValueSetExpansion, self).__init__(jsondict=jsondict, strict=strict) def elementProperties(self): js = super(ValueSetExpansion, self).elementProperties() js.extend([ ("contains", "contains", ValueSetExpansionContains, True, None, False), ("identifier", "identifier", str, False, None, True), ("offset", "offset", int, False, None, False), ("parameter", "parameter", ValueSetExpansionParameter, True, None, False), ("timestamp", "timestamp", fhirdate.FHIRDate, False, None, True), ("total", "total", int, False, None, False), ]) return js class ValueSetExpansionContains(backboneelement.BackboneElement): """ Codes in the value set. The codes that are contained in the value set expansion. """ resource_type = "ValueSetExpansionContains" def __init__(self, jsondict=None, strict=True): """ Initialize all valid properties. :raises: FHIRValidationError on validation errors, unless strict is False :param dict jsondict: A JSON dictionary to use for initialization :param bool strict: If True (the default), invalid variables will raise a TypeError """ self.abstract = None """ If user cannot select this entry. Type `bool`. """ self.code = None """ Code - if blank, this is not a selectable code. Type `str`. """ self.contains = None """ Codes contained under this entry. List of `ValueSetExpansionContains` items (represented as `dict` in JSON). """ self.designation = None """ Additional representations for this item. List of `ValueSetComposeIncludeConceptDesignation` items (represented as `dict` in JSON). """ self.display = None """ User display for the concept. Type `str`. """ self.inactive = None """ If concept is inactive in the code system. Type `bool`. """ self.system = None """ System value for the code. Type `str`. """ self.version = None """ Version in which this code/display is defined. Type `str`. """ super(ValueSetExpansionContains, self).__init__(jsondict=jsondict, strict=strict) def elementProperties(self): js = super(ValueSetExpansionContains, self).elementProperties() js.extend([ ("abstract", "abstract", bool, False, None, False), ("code", "code", str, False, None, False), ("contains", "contains", ValueSetExpansionContains, True, None, False), ("designation", "designation", ValueSetComposeIncludeConceptDesignation, True, None, False), ("display", "display", str, False, None, False), ("inactive", "inactive", bool, False, None, False), ("system", "system", str, False, None, False), ("version", "version", str, False, None, False), ]) return js class ValueSetExpansionParameter(backboneelement.BackboneElement): """ Parameter that controlled the expansion process. A parameter that controlled the expansion process. These parameters may be used by users of expanded value sets to check whether the expansion is suitable for a particular purpose, or to pick the correct expansion. """ resource_type = "ValueSetExpansionParameter" def __init__(self, jsondict=None, strict=True): """ Initialize all valid properties. :raises: FHIRValidationError on validation errors, unless strict is False :param dict jsondict: A JSON dictionary to use for initialization :param bool strict: If True (the default), invalid variables will raise a TypeError """ self.name = None """ Name as assigned by the server. Type `str`. """ self.valueBoolean = None """ Value of the named parameter. Type `bool`. """ self.valueCode = None """ Value of the named parameter. Type `str`. """ self.valueDecimal = None """ Value of the named parameter. Type `float`. """ self.valueInteger = None """ Value of the named parameter. Type `int`. """ self.valueString = None """ Value of the named parameter. Type `str`. """ self.valueUri = None """ Value of the named parameter. Type `str`. """ super(ValueSetExpansionParameter, self).__init__(jsondict=jsondict, strict=strict) def elementProperties(self): js = super(ValueSetExpansionParameter, self).elementProperties() js.extend([ ("name", "name", str, False, None, True), ("valueBoolean", "valueBoolean", bool, False, "value", False), ("valueCode", "valueCode", str, False, "value", False), ("valueDecimal", "valueDecimal", float, False, "value", False), ("valueInteger", "valueInteger", int, False, "value", False), ("valueString", "valueString", str, False, "value", False), ("valueUri", "valueUri", str, False, "value", False), ]) return js import sys try: from . import codeableconcept except ImportError: codeableconcept = sys.modules[__package__ + '.codeableconcept'] try: from . import coding except ImportError: coding = sys.modules[__package__ + '.coding'] try: from . import contactdetail except ImportError: contactdetail = sys.modules[__package__ + '.contactdetail'] try: from . import fhirdate except ImportError: fhirdate = sys.modules[__package__ + '.fhirdate'] try: from . import identifier except ImportError: identifier = sys.modules[__package__ + '.identifier'] try: from . import usagecontext except ImportError: usagecontext = sys.modules[__package__ + '.usagecontext']
bsd-3-clause
9cc227f3dda773b75866cae6260fa981
36.866548
104
0.598468
4.594344
false
false
false
false
all-of-us/raw-data-repository
rdr_service/dao/metadata_dao.py
1
1239
from rdr_service.dao.base_dao import BaseDao from rdr_service.model.metadata import Metadata WORKBENCH_LAST_SYNC_KEY = 'WORKBENCH_LAST_SYNC' class MetadataDao(BaseDao): def __init__(self): super(MetadataDao, self).__init__(Metadata) def upsert_with_session(self, session, key, str_value=None, int_value=None, date_value=None): metadata = Metadata( key=key, strValue=str_value, intValue=int_value, dateValue=date_value ) exist = self.get_by_key_with_session(session, key) if exist: setattr(exist, 'strValue', str_value) setattr(exist, 'intValue', int_value) setattr(exist, 'dateValue', date_value) else: session.add(metadata) def get_by_key_with_session(self, session, key): return session.query(Metadata).filter(Metadata.key == key).first() def get_by_key(self, key): with self.session() as session: self.get_by_key_with_session(session, key) def upsert(self, key, str_value=None, int_value=None, date_value=None): with self.session() as session: self.upsert_with_session(session, key, str_value, int_value, date_value)
bsd-3-clause
9d8340f71acf52218282b83bd4d4292b
34.4
97
0.623083
3.5
false
false
false
false
all-of-us/raw-data-repository
rdr_service/lib_fhir/fhirclient_3_0_0/models/immunizationrecommendation.py
1
9048
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Generated from FHIR 3.0.0.11832 (http://hl7.org/fhir/StructureDefinition/ImmunizationRecommendation) on 2017-03-22. # 2017, SMART Health IT. from . import domainresource class ImmunizationRecommendation(domainresource.DomainResource): """ Guidance or advice relating to an immunization. A patient's point-in-time immunization and recommendation (i.e. forecasting a patient's immunization eligibility according to a published schedule) with optional supporting justification. """ resource_type = "ImmunizationRecommendation" def __init__(self, jsondict=None, strict=True): """ Initialize all valid properties. :raises: FHIRValidationError on validation errors, unless strict is False :param dict jsondict: A JSON dictionary to use for initialization :param bool strict: If True (the default), invalid variables will raise a TypeError """ self.identifier = None """ Business identifier. List of `Identifier` items (represented as `dict` in JSON). """ self.patient = None """ Who this profile is for. Type `FHIRReference` referencing `Patient` (represented as `dict` in JSON). """ self.recommendation = None """ Vaccine administration recommendations. List of `ImmunizationRecommendationRecommendation` items (represented as `dict` in JSON). """ super(ImmunizationRecommendation, self).__init__(jsondict=jsondict, strict=strict) def elementProperties(self): js = super(ImmunizationRecommendation, self).elementProperties() js.extend([ ("identifier", "identifier", identifier.Identifier, True, None, False), ("patient", "patient", fhirreference.FHIRReference, False, None, True), ("recommendation", "recommendation", ImmunizationRecommendationRecommendation, True, None, True), ]) return js from . import backboneelement class ImmunizationRecommendationRecommendation(backboneelement.BackboneElement): """ Vaccine administration recommendations. """ resource_type = "ImmunizationRecommendationRecommendation" def __init__(self, jsondict=None, strict=True): """ Initialize all valid properties. :raises: FHIRValidationError on validation errors, unless strict is False :param dict jsondict: A JSON dictionary to use for initialization :param bool strict: If True (the default), invalid variables will raise a TypeError """ self.date = None """ Date recommendation created. Type `FHIRDate` (represented as `str` in JSON). """ self.dateCriterion = None """ Dates governing proposed immunization. List of `ImmunizationRecommendationRecommendationDateCriterion` items (represented as `dict` in JSON). """ self.doseNumber = None """ Recommended dose number. Type `int`. """ self.forecastStatus = None """ Vaccine administration status. Type `CodeableConcept` (represented as `dict` in JSON). """ self.protocol = None """ Protocol used by recommendation. Type `ImmunizationRecommendationRecommendationProtocol` (represented as `dict` in JSON). """ self.supportingImmunization = None """ Past immunizations supporting recommendation. List of `FHIRReference` items referencing `Immunization` (represented as `dict` in JSON). """ self.supportingPatientInformation = None """ Patient observations supporting recommendation. List of `FHIRReference` items referencing `Observation, AllergyIntolerance` (represented as `dict` in JSON). """ self.targetDisease = None """ Disease to be immunized against. Type `CodeableConcept` (represented as `dict` in JSON). """ self.vaccineCode = None """ Vaccine recommendation applies to. Type `CodeableConcept` (represented as `dict` in JSON). """ super(ImmunizationRecommendationRecommendation, self).__init__(jsondict=jsondict, strict=strict) def elementProperties(self): js = super(ImmunizationRecommendationRecommendation, self).elementProperties() js.extend([ ("date", "date", fhirdate.FHIRDate, False, None, True), ("dateCriterion", "dateCriterion", ImmunizationRecommendationRecommendationDateCriterion, True, None, False), ("doseNumber", "doseNumber", int, False, None, False), ("forecastStatus", "forecastStatus", codeableconcept.CodeableConcept, False, None, True), ("protocol", "protocol", ImmunizationRecommendationRecommendationProtocol, False, None, False), ("supportingImmunization", "supportingImmunization", fhirreference.FHIRReference, True, None, False), ("supportingPatientInformation", "supportingPatientInformation", fhirreference.FHIRReference, True, None, False), ("targetDisease", "targetDisease", codeableconcept.CodeableConcept, False, None, False), ("vaccineCode", "vaccineCode", codeableconcept.CodeableConcept, False, None, False), ]) return js class ImmunizationRecommendationRecommendationDateCriterion(backboneelement.BackboneElement): """ Dates governing proposed immunization. Vaccine date recommendations. For example, earliest date to administer, latest date to administer, etc. """ resource_type = "ImmunizationRecommendationRecommendationDateCriterion" def __init__(self, jsondict=None, strict=True): """ Initialize all valid properties. :raises: FHIRValidationError on validation errors, unless strict is False :param dict jsondict: A JSON dictionary to use for initialization :param bool strict: If True (the default), invalid variables will raise a TypeError """ self.code = None """ Type of date. Type `CodeableConcept` (represented as `dict` in JSON). """ self.value = None """ Recommended date. Type `FHIRDate` (represented as `str` in JSON). """ super(ImmunizationRecommendationRecommendationDateCriterion, self).__init__(jsondict=jsondict, strict=strict) def elementProperties(self): js = super(ImmunizationRecommendationRecommendationDateCriterion, self).elementProperties() js.extend([ ("code", "code", codeableconcept.CodeableConcept, False, None, True), ("value", "value", fhirdate.FHIRDate, False, None, True), ]) return js class ImmunizationRecommendationRecommendationProtocol(backboneelement.BackboneElement): """ Protocol used by recommendation. Contains information about the protocol under which the vaccine was administered. """ resource_type = "ImmunizationRecommendationRecommendationProtocol" def __init__(self, jsondict=None, strict=True): """ Initialize all valid properties. :raises: FHIRValidationError on validation errors, unless strict is False :param dict jsondict: A JSON dictionary to use for initialization :param bool strict: If True (the default), invalid variables will raise a TypeError """ self.authority = None """ Who is responsible for protocol. Type `FHIRReference` referencing `Organization` (represented as `dict` in JSON). """ self.description = None """ Protocol details. Type `str`. """ self.doseSequence = None """ Dose number within sequence. Type `int`. """ self.series = None """ Name of vaccination series. Type `str`. """ super(ImmunizationRecommendationRecommendationProtocol, self).__init__(jsondict=jsondict, strict=strict) def elementProperties(self): js = super(ImmunizationRecommendationRecommendationProtocol, self).elementProperties() js.extend([ ("authority", "authority", fhirreference.FHIRReference, False, None, False), ("description", "description", str, False, None, False), ("doseSequence", "doseSequence", int, False, None, False), ("series", "series", str, False, None, False), ]) return js import sys try: from . import codeableconcept except ImportError: codeableconcept = sys.modules[__package__ + '.codeableconcept'] try: from . import fhirdate except ImportError: fhirdate = sys.modules[__package__ + '.fhirdate'] try: from . import fhirreference except ImportError: fhirreference = sys.modules[__package__ + '.fhirreference'] try: from . import identifier except ImportError: identifier = sys.modules[__package__ + '.identifier']
bsd-3-clause
335e04e5a7c7ea8ee0fa31d491cc5946
40.127273
125
0.655946
4.42445
false
false
false
false
brython-dev/brython
www/src/Lib/uuid.py
1
27421
r"""UUID objects (universally unique identifiers) according to RFC 4122. This module provides immutable UUID objects (class UUID) and the functions uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5 UUIDs as specified in RFC 4122. If all you want is a unique ID, you should probably call uuid1() or uuid4(). Note that uuid1() may compromise privacy since it creates a UUID containing the computer's network address. uuid4() creates a random UUID. Typical usage: >>> import uuid # make a UUID based on the host ID and current time >>> uuid.uuid1() # doctest: +SKIP UUID('a8098c1a-f86e-11da-bd1a-00112444be1e') # make a UUID using an MD5 hash of a namespace UUID and a name >>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org') UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e') # make a random UUID >>> uuid.uuid4() # doctest: +SKIP UUID('16fd2706-8baf-433b-82eb-8c7fada847da') # make a UUID using a SHA-1 hash of a namespace UUID and a name >>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org') UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d') # make a UUID from a string of hex digits (braces and hyphens ignored) >>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}') # convert a UUID to a string of hex digits in standard form >>> str(x) '00010203-0405-0607-0809-0a0b0c0d0e0f' # get the raw 16 bytes of the UUID >>> x.bytes b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f' # make a UUID from a 16-byte string >>> uuid.UUID(bytes=x.bytes) UUID('00010203-0405-0607-0809-0a0b0c0d0e0f') """ import os import sys from enum import Enum, _simple_enum __author__ = 'Ka-Ping Yee <ping@zesty.ca>' # The recognized platforms - known behaviors if sys.platform in ('win32', 'darwin'): _AIX = _LINUX = False else: import platform _platform_system = platform.system() _AIX = _platform_system == 'AIX' _LINUX = _platform_system == 'Linux' _MAC_DELIM = b':' _MAC_OMITS_LEADING_ZEROES = False if _AIX: _MAC_DELIM = b'.' _MAC_OMITS_LEADING_ZEROES = True RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [ 'reserved for NCS compatibility', 'specified in RFC 4122', 'reserved for Microsoft compatibility', 'reserved for future definition'] int_ = int # The built-in int type bytes_ = bytes # The built-in bytes type @_simple_enum(Enum) class SafeUUID: safe = 0 unsafe = -1 unknown = None class UUID: """Instances of the UUID class represent UUIDs as specified in RFC 4122. UUID objects are immutable, hashable, and usable as dictionary keys. Converting a UUID to a string with str() yields something in the form '12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts five possible forms: a similar string of hexadecimal digits, or a tuple of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and 48-bit values respectively) as an argument named 'fields', or a string of 16 bytes (with all the integer fields in big-endian order) as an argument named 'bytes', or a string of 16 bytes (with the first three fields in little-endian order) as an argument named 'bytes_le', or a single 128-bit integer as an argument named 'int'. UUIDs have these read-only attributes: bytes the UUID as a 16-byte string (containing the six integer fields in big-endian byte order) bytes_le the UUID as a 16-byte string (with time_low, time_mid, and time_hi_version in little-endian byte order) fields a tuple of the six integer fields of the UUID, which are also available as six individual attributes and two derived attributes: time_low the first 32 bits of the UUID time_mid the next 16 bits of the UUID time_hi_version the next 16 bits of the UUID clock_seq_hi_variant the next 8 bits of the UUID clock_seq_low the next 8 bits of the UUID node the last 48 bits of the UUID time the 60-bit timestamp clock_seq the 14-bit sequence number hex the UUID as a 32-character hexadecimal string int the UUID as a 128-bit integer urn the UUID as a URN as specified in RFC 4122 variant the UUID variant (one of the constants RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE) version the UUID version number (1 through 5, meaningful only when the variant is RFC_4122) is_safe An enum indicating whether the UUID has been generated in a way that is safe for multiprocessing applications, via uuid_generate_time_safe(3). """ __slots__ = ('int', 'is_safe', '__weakref__') def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None, int=None, version=None, *, is_safe=SafeUUID.unknown): r"""Create a UUID from either a string of 32 hexadecimal digits, a string of 16 bytes as the 'bytes' argument, a string of 16 bytes in little-endian order as the 'bytes_le' argument, a tuple of six integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version, 8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as the 'fields' argument, or a single 128-bit integer as the 'int' argument. When a string of hex digits is given, curly braces, hyphens, and a URN prefix are all optional. For example, these expressions all yield the same UUID: UUID('{12345678-1234-5678-1234-567812345678}') UUID('12345678123456781234567812345678') UUID('urn:uuid:12345678-1234-5678-1234-567812345678') UUID(bytes='\x12\x34\x56\x78'*4) UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' + '\x12\x34\x56\x78\x12\x34\x56\x78') UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678)) UUID(int=0x12345678123456781234567812345678) Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must be given. The 'version' argument is optional; if given, the resulting UUID will have its variant and version set according to RFC 4122, overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'. is_safe is an enum exposed as an attribute on the instance. It indicates whether the UUID has been generated in a way that is safe for multiprocessing applications, via uuid_generate_time_safe(3). """ if [hex, bytes, bytes_le, fields, int].count(None) != 4: raise TypeError('one of the hex, bytes, bytes_le, fields, ' 'or int arguments must be given') if hex is not None: hex = hex.replace('urn:', '').replace('uuid:', '') hex = hex.strip('{}').replace('-', '') if len(hex) != 32: raise ValueError('badly formed hexadecimal UUID string') int = int_(hex, 16) if bytes_le is not None: if len(bytes_le) != 16: raise ValueError('bytes_le is not a 16-char string') bytes = (bytes_le[4-1::-1] + bytes_le[6-1:4-1:-1] + bytes_le[8-1:6-1:-1] + bytes_le[8:]) if bytes is not None: if len(bytes) != 16: raise ValueError('bytes is not a 16-char string') assert isinstance(bytes, bytes_), repr(bytes) int = int_.from_bytes(bytes) # big endian if fields is not None: if len(fields) != 6: raise ValueError('fields is not a 6-tuple') (time_low, time_mid, time_hi_version, clock_seq_hi_variant, clock_seq_low, node) = fields if not 0 <= time_low < 1<<32: raise ValueError('field 1 out of range (need a 32-bit value)') if not 0 <= time_mid < 1<<16: raise ValueError('field 2 out of range (need a 16-bit value)') if not 0 <= time_hi_version < 1<<16: raise ValueError('field 3 out of range (need a 16-bit value)') if not 0 <= clock_seq_hi_variant < 1<<8: raise ValueError('field 4 out of range (need an 8-bit value)') if not 0 <= clock_seq_low < 1<<8: raise ValueError('field 5 out of range (need an 8-bit value)') if not 0 <= node < 1<<48: raise ValueError('field 6 out of range (need a 48-bit value)') clock_seq = (clock_seq_hi_variant << 8) | clock_seq_low int = ((time_low << 96) | (time_mid << 80) | (time_hi_version << 64) | (clock_seq << 48) | node) if int is not None: if not 0 <= int < 1<<128: raise ValueError('int is out of range (need a 128-bit value)') if version is not None: if not 1 <= version <= 5: raise ValueError('illegal version number') # Set the variant to RFC 4122. int &= ~(0xc000 << 48) int |= 0x8000 << 48 # Set the version number. int &= ~(0xf000 << 64) int |= version << 76 object.__setattr__(self, 'int', int) object.__setattr__(self, 'is_safe', is_safe) def __getstate__(self): d = {'int': self.int} if self.is_safe != SafeUUID.unknown: # is_safe is a SafeUUID instance. Return just its value, so that # it can be un-pickled in older Python versions without SafeUUID. d['is_safe'] = self.is_safe.value return d def __setstate__(self, state): object.__setattr__(self, 'int', state['int']) # is_safe was added in 3.7; it is also omitted when it is "unknown" object.__setattr__(self, 'is_safe', SafeUUID(state['is_safe']) if 'is_safe' in state else SafeUUID.unknown) def __eq__(self, other): if isinstance(other, UUID): return self.int == other.int return NotImplemented # Q. What's the value of being able to sort UUIDs? # A. Use them as keys in a B-Tree or similar mapping. def __lt__(self, other): if isinstance(other, UUID): return self.int < other.int return NotImplemented def __gt__(self, other): if isinstance(other, UUID): return self.int > other.int return NotImplemented def __le__(self, other): if isinstance(other, UUID): return self.int <= other.int return NotImplemented def __ge__(self, other): if isinstance(other, UUID): return self.int >= other.int return NotImplemented def __hash__(self): return hash(self.int) def __int__(self): return self.int def __repr__(self): return '%s(%r)' % (self.__class__.__name__, str(self)) def __setattr__(self, name, value): raise TypeError('UUID objects are immutable') def __str__(self): hex = '%032x' % self.int return '%s-%s-%s-%s-%s' % ( hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:]) @property def bytes(self): return self.int.to_bytes(16) # big endian @property def bytes_le(self): bytes = self.bytes return (bytes[4-1::-1] + bytes[6-1:4-1:-1] + bytes[8-1:6-1:-1] + bytes[8:]) @property def fields(self): return (self.time_low, self.time_mid, self.time_hi_version, self.clock_seq_hi_variant, self.clock_seq_low, self.node) @property def time_low(self): return self.int >> 96 @property def time_mid(self): return (self.int >> 80) & 0xffff @property def time_hi_version(self): return (self.int >> 64) & 0xffff @property def clock_seq_hi_variant(self): return (self.int >> 56) & 0xff @property def clock_seq_low(self): return (self.int >> 48) & 0xff @property def time(self): return (((self.time_hi_version & 0x0fff) << 48) | (self.time_mid << 32) | self.time_low) @property def clock_seq(self): return (((self.clock_seq_hi_variant & 0x3f) << 8) | self.clock_seq_low) @property def node(self): return self.int & 0xffffffffffff @property def hex(self): return '%032x' % self.int @property def urn(self): return 'urn:uuid:' + str(self) @property def variant(self): if not self.int & (0x8000 << 48): return RESERVED_NCS elif not self.int & (0x4000 << 48): return RFC_4122 elif not self.int & (0x2000 << 48): return RESERVED_MICROSOFT else: return RESERVED_FUTURE @property def version(self): # The version bits are only meaningful for RFC 4122 UUIDs. if self.variant == RFC_4122: return int((self.int >> 76) & 0xf) def _get_command_stdout(command, *args): import io, os, shutil, subprocess try: path_dirs = os.environ.get('PATH', os.defpath).split(os.pathsep) path_dirs.extend(['/sbin', '/usr/sbin']) executable = shutil.which(command, path=os.pathsep.join(path_dirs)) if executable is None: return None # LC_ALL=C to ensure English output, stderr=DEVNULL to prevent output # on stderr (Note: we don't have an example where the words we search # for are actually localized, but in theory some system could do so.) env = dict(os.environ) env['LC_ALL'] = 'C' proc = subprocess.Popen((executable,) + args, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, env=env) if not proc: return None stdout, stderr = proc.communicate() return io.BytesIO(stdout) except (OSError, subprocess.SubprocessError): return None # For MAC (a.k.a. IEEE 802, or EUI-48) addresses, the second least significant # bit of the first octet signifies whether the MAC address is universally (0) # or locally (1) administered. Network cards from hardware manufacturers will # always be universally administered to guarantee global uniqueness of the MAC # address, but any particular machine may have other interfaces which are # locally administered. An example of the latter is the bridge interface to # the Touch Bar on MacBook Pros. # # This bit works out to be the 42nd bit counting from 1 being the least # significant, or 1<<41. We'll prefer universally administered MAC addresses # over locally administered ones since the former are globally unique, but # we'll return the first of the latter found if that's all the machine has. # # See https://en.wikipedia.org/wiki/MAC_address#Universal_vs._local def _is_universal(mac): return not (mac & (1 << 41)) def _find_mac_near_keyword(command, args, keywords, get_word_index): """Searches a command's output for a MAC address near a keyword. Each line of words in the output is case-insensitively searched for any of the given keywords. Upon a match, get_word_index is invoked to pick a word from the line, given the index of the match. For example, lambda i: 0 would get the first word on the line, while lambda i: i - 1 would get the word preceding the keyword. """ stdout = _get_command_stdout(command, args) if stdout is None: return None first_local_mac = None for line in stdout: words = line.lower().rstrip().split() for i in range(len(words)): if words[i] in keywords: try: word = words[get_word_index(i)] mac = int(word.replace(_MAC_DELIM, b''), 16) except (ValueError, IndexError): # Virtual interfaces, such as those provided by # VPNs, do not have a colon-delimited MAC address # as expected, but a 16-byte HWAddr separated by # dashes. These should be ignored in favor of a # real MAC address pass else: if _is_universal(mac): return mac first_local_mac = first_local_mac or mac return first_local_mac or None def _parse_mac(word): # Accept 'HH:HH:HH:HH:HH:HH' MAC address (ex: '52:54:00:9d:0e:67'), # but reject IPv6 address (ex: 'fe80::5054:ff:fe9' or '123:2:3:4:5:6:7:8'). # # Virtual interfaces, such as those provided by VPNs, do not have a # colon-delimited MAC address as expected, but a 16-byte HWAddr separated # by dashes. These should be ignored in favor of a real MAC address parts = word.split(_MAC_DELIM) if len(parts) != 6: return if _MAC_OMITS_LEADING_ZEROES: # (Only) on AIX the macaddr value given is not prefixed by 0, e.g. # en0 1500 link#2 fa.bc.de.f7.62.4 110854824 0 160133733 0 0 # not # en0 1500 link#2 fa.bc.de.f7.62.04 110854824 0 160133733 0 0 if not all(1 <= len(part) <= 2 for part in parts): return hexstr = b''.join(part.rjust(2, b'0') for part in parts) else: if not all(len(part) == 2 for part in parts): return hexstr = b''.join(parts) try: return int(hexstr, 16) except ValueError: return def _find_mac_under_heading(command, args, heading): """Looks for a MAC address under a heading in a command's output. The first line of words in the output is searched for the given heading. Words at the same word index as the heading in subsequent lines are then examined to see if they look like MAC addresses. """ stdout = _get_command_stdout(command, args) if stdout is None: return None keywords = stdout.readline().rstrip().split() try: column_index = keywords.index(heading) except ValueError: return None first_local_mac = None for line in stdout: words = line.rstrip().split() try: word = words[column_index] except IndexError: continue mac = _parse_mac(word) if mac is None: continue if _is_universal(mac): return mac if first_local_mac is None: first_local_mac = mac return first_local_mac # The following functions call external programs to 'get' a macaddr value to # be used as basis for an uuid def _ifconfig_getnode(): """Get the hardware address on Unix by running ifconfig.""" # This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes. keywords = (b'hwaddr', b'ether', b'address:', b'lladdr') for args in ('', '-a', '-av'): mac = _find_mac_near_keyword('ifconfig', args, keywords, lambda i: i+1) if mac: return mac return None def _ip_getnode(): """Get the hardware address on Unix by running ip.""" # This works on Linux with iproute2. mac = _find_mac_near_keyword('ip', 'link', [b'link/ether'], lambda i: i+1) if mac: return mac return None def _arp_getnode(): """Get the hardware address on Unix by running arp.""" import os, socket if not hasattr(socket, "gethostbyname"): return None try: ip_addr = socket.gethostbyname(socket.gethostname()) except OSError: return None # Try getting the MAC addr from arp based on our IP address (Solaris). mac = _find_mac_near_keyword('arp', '-an', [os.fsencode(ip_addr)], lambda i: -1) if mac: return mac # This works on OpenBSD mac = _find_mac_near_keyword('arp', '-an', [os.fsencode(ip_addr)], lambda i: i+1) if mac: return mac # This works on Linux, FreeBSD and NetBSD mac = _find_mac_near_keyword('arp', '-an', [os.fsencode('(%s)' % ip_addr)], lambda i: i+2) # Return None instead of 0. if mac: return mac return None def _lanscan_getnode(): """Get the hardware address on Unix by running lanscan.""" # This might work on HP-UX. return _find_mac_near_keyword('lanscan', '-ai', [b'lan0'], lambda i: 0) def _netstat_getnode(): """Get the hardware address on Unix by running netstat.""" # This works on AIX and might work on Tru64 UNIX. return _find_mac_under_heading('netstat', '-ian', b'Address') def _ipconfig_getnode(): """[DEPRECATED] Get the hardware address on Windows.""" # bpo-40501: UuidCreateSequential() is now the only supported approach return _windll_getnode() def _netbios_getnode(): """[DEPRECATED] Get the hardware address on Windows.""" # bpo-40501: UuidCreateSequential() is now the only supported approach return _windll_getnode() # Import optional C extension at toplevel, to help disabling it when testing try: import _uuid _generate_time_safe = getattr(_uuid, "generate_time_safe", None) _UuidCreate = getattr(_uuid, "UuidCreate", None) _has_uuid_generate_time_safe = _uuid.has_uuid_generate_time_safe except ImportError: _uuid = None _generate_time_safe = None _UuidCreate = None _has_uuid_generate_time_safe = None def _load_system_functions(): """[DEPRECATED] Platform-specific functions loaded at import time""" def _unix_getnode(): """Get the hardware address on Unix using the _uuid extension module.""" if _generate_time_safe: uuid_time, _ = _generate_time_safe() return UUID(bytes=uuid_time).node def _windll_getnode(): """Get the hardware address on Windows using the _uuid extension module.""" if _UuidCreate: uuid_bytes = _UuidCreate() return UUID(bytes_le=uuid_bytes).node def _random_getnode(): """Get a random node ID.""" # RFC 4122, $4.1.6 says "For systems with no IEEE address, a randomly or # pseudo-randomly generated value may be used; see Section 4.5. The # multicast bit must be set in such addresses, in order that they will # never conflict with addresses obtained from network cards." # # The "multicast bit" of a MAC address is defined to be "the least # significant bit of the first octet". This works out to be the 41st bit # counting from 1 being the least significant bit, or 1<<40. # # See https://en.wikipedia.org/wiki/MAC_address#Unicast_vs._multicast import random return random.getrandbits(48) | (1 << 40) # _OS_GETTERS, when known, are targeted for a specific OS or platform. # The order is by 'common practice' on the specified platform. # Note: 'posix' and 'windows' _OS_GETTERS are prefixed by a dll/dlload() method # which, when successful, means none of these "external" methods are called. # _GETTERS is (also) used by test_uuid.py to SkipUnless(), e.g., # @unittest.skipUnless(_uuid._ifconfig_getnode in _uuid._GETTERS, ...) if _LINUX: _OS_GETTERS = [_ip_getnode, _ifconfig_getnode] elif sys.platform == 'darwin': _OS_GETTERS = [_ifconfig_getnode, _arp_getnode, _netstat_getnode] elif sys.platform == 'win32': # bpo-40201: _windll_getnode will always succeed, so these are not needed _OS_GETTERS = [] elif _AIX: _OS_GETTERS = [_netstat_getnode] else: _OS_GETTERS = [_ifconfig_getnode, _ip_getnode, _arp_getnode, _netstat_getnode, _lanscan_getnode] if os.name == 'posix': _GETTERS = [_unix_getnode] + _OS_GETTERS elif os.name == 'nt': _GETTERS = [_windll_getnode] + _OS_GETTERS else: _GETTERS = _OS_GETTERS _node = None def getnode(): """Get the hardware address as a 48-bit positive integer. The first time this runs, it may launch a separate program, which could be quite slow. If all attempts to obtain the hardware address fail, we choose a random 48-bit number with its eighth bit set to 1 as recommended in RFC 4122. """ global _node if _node is not None: return _node for getter in _GETTERS + [_random_getnode]: try: _node = getter() except: continue if (_node is not None) and (0 <= _node < (1 << 48)): return _node assert False, '_random_getnode() returned invalid value: {}'.format(_node) _last_timestamp = None def uuid1(node=None, clock_seq=None): """Generate a UUID from a host ID, sequence number, and the current time. If 'node' is not given, getnode() is used to obtain the hardware address. If 'clock_seq' is given, it is used as the sequence number; otherwise a random 14-bit sequence number is chosen.""" # When the system provides a version-1 UUID generator, use it (but don't # use UuidCreate here because its UUIDs don't conform to RFC 4122). if _generate_time_safe is not None and node is clock_seq is None: uuid_time, safely_generated = _generate_time_safe() try: is_safe = SafeUUID(safely_generated) except ValueError: is_safe = SafeUUID.unknown return UUID(bytes=uuid_time, is_safe=is_safe) global _last_timestamp import time nanoseconds = time.time_ns() # 0x01b21dd213814000 is the number of 100-ns intervals between the # UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00. timestamp = nanoseconds // 100 + 0x01b21dd213814000 if _last_timestamp is not None and timestamp <= _last_timestamp: timestamp = _last_timestamp + 1 _last_timestamp = timestamp if clock_seq is None: import random clock_seq = random.getrandbits(14) # instead of stable storage time_low = timestamp & 0xffffffff time_mid = (timestamp >> 32) & 0xffff time_hi_version = (timestamp >> 48) & 0x0fff clock_seq_low = clock_seq & 0xff clock_seq_hi_variant = (clock_seq >> 8) & 0x3f if node is None: node = getnode() return UUID(fields=(time_low, time_mid, time_hi_version, clock_seq_hi_variant, clock_seq_low, node), version=1) def uuid3(namespace, name): """Generate a UUID from the MD5 hash of a namespace UUID and a name.""" from hashlib import md5 digest = md5( namespace.bytes + bytes(name, "utf-8"), usedforsecurity=False ).digest() return UUID(bytes=digest[:16], version=3) def uuid4(): """Generate a random UUID.""" return UUID(bytes=os.urandom(16), version=4) def uuid5(namespace, name): """Generate a UUID from the SHA-1 hash of a namespace UUID and a name.""" from hashlib import sha1 hash = sha1(namespace.bytes + bytes(name, "utf-8")).digest() return UUID(bytes=hash[:16], version=5) # The following standard UUIDs are for use with uuid3() or uuid5(). NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8') NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8') NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8') NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8')
bsd-3-clause
6e3223b515727de4e09eae6e2340b23f
36.511628
89
0.608694
3.632883
false
false
false
false
brython-dev/brython
www/src/Lib/encodings/cp863.py
35
34950
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP863.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_map) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_map)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp863', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Map decoding_map = codecs.make_identity_dict(range(256)) decoding_map.update({ 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX 0x0084: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE 0x0086: 0x00b6, # PILCROW SIGN 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE 0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX 0x008d: 0x2017, # DOUBLE LOW LINE 0x008e: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE 0x008f: 0x00a7, # SECTION SIGN 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE 0x0091: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE 0x0092: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX 0x0094: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS 0x0095: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE 0x0098: 0x00a4, # CURRENCY SIGN 0x0099: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS 0x009b: 0x00a2, # CENT SIGN 0x009c: 0x00a3, # POUND SIGN 0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE 0x009e: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK 0x00a0: 0x00a6, # BROKEN BAR 0x00a1: 0x00b4, # ACUTE ACCENT 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE 0x00a4: 0x00a8, # DIAERESIS 0x00a5: 0x00b8, # CEDILLA 0x00a6: 0x00b3, # SUPERSCRIPT THREE 0x00a7: 0x00af, # MACRON 0x00a8: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX 0x00a9: 0x2310, # REVERSED NOT SIGN 0x00aa: 0x00ac, # NOT SIGN 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER 0x00ad: 0x00be, # VULGAR FRACTION THREE QUARTERS 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00b0: 0x2591, # LIGHT SHADE 0x00b1: 0x2592, # MEDIUM SHADE 0x00b2: 0x2593, # DARK SHADE 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x00db: 0x2588, # FULL BLOCK 0x00dc: 0x2584, # LOWER HALF BLOCK 0x00dd: 0x258c, # LEFT HALF BLOCK 0x00de: 0x2590, # RIGHT HALF BLOCK 0x00df: 0x2580, # UPPER HALF BLOCK 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA 0x00e3: 0x03c0, # GREEK SMALL LETTER PI 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA 0x00e6: 0x00b5, # MICRO SIGN 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA 0x00ec: 0x221e, # INFINITY 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON 0x00ef: 0x2229, # INTERSECTION 0x00f0: 0x2261, # IDENTICAL TO 0x00f1: 0x00b1, # PLUS-MINUS SIGN 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO 0x00f4: 0x2320, # TOP HALF INTEGRAL 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL 0x00f6: 0x00f7, # DIVISION SIGN 0x00f7: 0x2248, # ALMOST EQUAL TO 0x00f8: 0x00b0, # DEGREE SIGN 0x00f9: 0x2219, # BULLET OPERATOR 0x00fa: 0x00b7, # MIDDLE DOT 0x00fb: 0x221a, # SQUARE ROOT 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N 0x00fd: 0x00b2, # SUPERSCRIPT TWO 0x00fe: 0x25a0, # BLACK SQUARE 0x00ff: 0x00a0, # NO-BREAK SPACE }) ### Decoding Table decoding_table = ( '\x00' # 0x0000 -> NULL '\x01' # 0x0001 -> START OF HEADING '\x02' # 0x0002 -> START OF TEXT '\x03' # 0x0003 -> END OF TEXT '\x04' # 0x0004 -> END OF TRANSMISSION '\x05' # 0x0005 -> ENQUIRY '\x06' # 0x0006 -> ACKNOWLEDGE '\x07' # 0x0007 -> BELL '\x08' # 0x0008 -> BACKSPACE '\t' # 0x0009 -> HORIZONTAL TABULATION '\n' # 0x000a -> LINE FEED '\x0b' # 0x000b -> VERTICAL TABULATION '\x0c' # 0x000c -> FORM FEED '\r' # 0x000d -> CARRIAGE RETURN '\x0e' # 0x000e -> SHIFT OUT '\x0f' # 0x000f -> SHIFT IN '\x10' # 0x0010 -> DATA LINK ESCAPE '\x11' # 0x0011 -> DEVICE CONTROL ONE '\x12' # 0x0012 -> DEVICE CONTROL TWO '\x13' # 0x0013 -> DEVICE CONTROL THREE '\x14' # 0x0014 -> DEVICE CONTROL FOUR '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE '\x16' # 0x0016 -> SYNCHRONOUS IDLE '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK '\x18' # 0x0018 -> CANCEL '\x19' # 0x0019 -> END OF MEDIUM '\x1a' # 0x001a -> SUBSTITUTE '\x1b' # 0x001b -> ESCAPE '\x1c' # 0x001c -> FILE SEPARATOR '\x1d' # 0x001d -> GROUP SEPARATOR '\x1e' # 0x001e -> RECORD SEPARATOR '\x1f' # 0x001f -> UNIT SEPARATOR ' ' # 0x0020 -> SPACE '!' # 0x0021 -> EXCLAMATION MARK '"' # 0x0022 -> QUOTATION MARK '#' # 0x0023 -> NUMBER SIGN '$' # 0x0024 -> DOLLAR SIGN '%' # 0x0025 -> PERCENT SIGN '&' # 0x0026 -> AMPERSAND "'" # 0x0027 -> APOSTROPHE '(' # 0x0028 -> LEFT PARENTHESIS ')' # 0x0029 -> RIGHT PARENTHESIS '*' # 0x002a -> ASTERISK '+' # 0x002b -> PLUS SIGN ',' # 0x002c -> COMMA '-' # 0x002d -> HYPHEN-MINUS '.' # 0x002e -> FULL STOP '/' # 0x002f -> SOLIDUS '0' # 0x0030 -> DIGIT ZERO '1' # 0x0031 -> DIGIT ONE '2' # 0x0032 -> DIGIT TWO '3' # 0x0033 -> DIGIT THREE '4' # 0x0034 -> DIGIT FOUR '5' # 0x0035 -> DIGIT FIVE '6' # 0x0036 -> DIGIT SIX '7' # 0x0037 -> DIGIT SEVEN '8' # 0x0038 -> DIGIT EIGHT '9' # 0x0039 -> DIGIT NINE ':' # 0x003a -> COLON ';' # 0x003b -> SEMICOLON '<' # 0x003c -> LESS-THAN SIGN '=' # 0x003d -> EQUALS SIGN '>' # 0x003e -> GREATER-THAN SIGN '?' # 0x003f -> QUESTION MARK '@' # 0x0040 -> COMMERCIAL AT 'A' # 0x0041 -> LATIN CAPITAL LETTER A 'B' # 0x0042 -> LATIN CAPITAL LETTER B 'C' # 0x0043 -> LATIN CAPITAL LETTER C 'D' # 0x0044 -> LATIN CAPITAL LETTER D 'E' # 0x0045 -> LATIN CAPITAL LETTER E 'F' # 0x0046 -> LATIN CAPITAL LETTER F 'G' # 0x0047 -> LATIN CAPITAL LETTER G 'H' # 0x0048 -> LATIN CAPITAL LETTER H 'I' # 0x0049 -> LATIN CAPITAL LETTER I 'J' # 0x004a -> LATIN CAPITAL LETTER J 'K' # 0x004b -> LATIN CAPITAL LETTER K 'L' # 0x004c -> LATIN CAPITAL LETTER L 'M' # 0x004d -> LATIN CAPITAL LETTER M 'N' # 0x004e -> LATIN CAPITAL LETTER N 'O' # 0x004f -> LATIN CAPITAL LETTER O 'P' # 0x0050 -> LATIN CAPITAL LETTER P 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q 'R' # 0x0052 -> LATIN CAPITAL LETTER R 'S' # 0x0053 -> LATIN CAPITAL LETTER S 'T' # 0x0054 -> LATIN CAPITAL LETTER T 'U' # 0x0055 -> LATIN CAPITAL LETTER U 'V' # 0x0056 -> LATIN CAPITAL LETTER V 'W' # 0x0057 -> LATIN CAPITAL LETTER W 'X' # 0x0058 -> LATIN CAPITAL LETTER X 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y 'Z' # 0x005a -> LATIN CAPITAL LETTER Z '[' # 0x005b -> LEFT SQUARE BRACKET '\\' # 0x005c -> REVERSE SOLIDUS ']' # 0x005d -> RIGHT SQUARE BRACKET '^' # 0x005e -> CIRCUMFLEX ACCENT '_' # 0x005f -> LOW LINE '`' # 0x0060 -> GRAVE ACCENT 'a' # 0x0061 -> LATIN SMALL LETTER A 'b' # 0x0062 -> LATIN SMALL LETTER B 'c' # 0x0063 -> LATIN SMALL LETTER C 'd' # 0x0064 -> LATIN SMALL LETTER D 'e' # 0x0065 -> LATIN SMALL LETTER E 'f' # 0x0066 -> LATIN SMALL LETTER F 'g' # 0x0067 -> LATIN SMALL LETTER G 'h' # 0x0068 -> LATIN SMALL LETTER H 'i' # 0x0069 -> LATIN SMALL LETTER I 'j' # 0x006a -> LATIN SMALL LETTER J 'k' # 0x006b -> LATIN SMALL LETTER K 'l' # 0x006c -> LATIN SMALL LETTER L 'm' # 0x006d -> LATIN SMALL LETTER M 'n' # 0x006e -> LATIN SMALL LETTER N 'o' # 0x006f -> LATIN SMALL LETTER O 'p' # 0x0070 -> LATIN SMALL LETTER P 'q' # 0x0071 -> LATIN SMALL LETTER Q 'r' # 0x0072 -> LATIN SMALL LETTER R 's' # 0x0073 -> LATIN SMALL LETTER S 't' # 0x0074 -> LATIN SMALL LETTER T 'u' # 0x0075 -> LATIN SMALL LETTER U 'v' # 0x0076 -> LATIN SMALL LETTER V 'w' # 0x0077 -> LATIN SMALL LETTER W 'x' # 0x0078 -> LATIN SMALL LETTER X 'y' # 0x0079 -> LATIN SMALL LETTER Y 'z' # 0x007a -> LATIN SMALL LETTER Z '{' # 0x007b -> LEFT CURLY BRACKET '|' # 0x007c -> VERTICAL LINE '}' # 0x007d -> RIGHT CURLY BRACKET '~' # 0x007e -> TILDE '\x7f' # 0x007f -> DELETE '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX '\xc2' # 0x0084 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX '\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE '\xb6' # 0x0086 -> PILCROW SIGN '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA '\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX '\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS '\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE '\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS '\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX '\u2017' # 0x008d -> DOUBLE LOW LINE '\xc0' # 0x008e -> LATIN CAPITAL LETTER A WITH GRAVE '\xa7' # 0x008f -> SECTION SIGN '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE '\xc8' # 0x0091 -> LATIN CAPITAL LETTER E WITH GRAVE '\xca' # 0x0092 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX '\xcb' # 0x0094 -> LATIN CAPITAL LETTER E WITH DIAERESIS '\xcf' # 0x0095 -> LATIN CAPITAL LETTER I WITH DIAERESIS '\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX '\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE '\xa4' # 0x0098 -> CURRENCY SIGN '\xd4' # 0x0099 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS '\xa2' # 0x009b -> CENT SIGN '\xa3' # 0x009c -> POUND SIGN '\xd9' # 0x009d -> LATIN CAPITAL LETTER U WITH GRAVE '\xdb' # 0x009e -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX '\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK '\xa6' # 0x00a0 -> BROKEN BAR '\xb4' # 0x00a1 -> ACUTE ACCENT '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE '\xa8' # 0x00a4 -> DIAERESIS '\xb8' # 0x00a5 -> CEDILLA '\xb3' # 0x00a6 -> SUPERSCRIPT THREE '\xaf' # 0x00a7 -> MACRON '\xce' # 0x00a8 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX '\u2310' # 0x00a9 -> REVERSED NOT SIGN '\xac' # 0x00aa -> NOT SIGN '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER '\xbe' # 0x00ad -> VULGAR FRACTION THREE QUARTERS '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK '\u2591' # 0x00b0 -> LIGHT SHADE '\u2592' # 0x00b1 -> MEDIUM SHADE '\u2593' # 0x00b2 -> DARK SHADE '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT '\u2588' # 0x00db -> FULL BLOCK '\u2584' # 0x00dc -> LOWER HALF BLOCK '\u258c' # 0x00dd -> LEFT HALF BLOCK '\u2590' # 0x00de -> RIGHT HALF BLOCK '\u2580' # 0x00df -> UPPER HALF BLOCK '\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S '\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA '\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI '\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA '\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA '\xb5' # 0x00e6 -> MICRO SIGN '\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU '\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI '\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA '\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA '\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA '\u221e' # 0x00ec -> INFINITY '\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI '\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON '\u2229' # 0x00ef -> INTERSECTION '\u2261' # 0x00f0 -> IDENTICAL TO '\xb1' # 0x00f1 -> PLUS-MINUS SIGN '\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO '\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO '\u2320' # 0x00f4 -> TOP HALF INTEGRAL '\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL '\xf7' # 0x00f6 -> DIVISION SIGN '\u2248' # 0x00f7 -> ALMOST EQUAL TO '\xb0' # 0x00f8 -> DEGREE SIGN '\u2219' # 0x00f9 -> BULLET OPERATOR '\xb7' # 0x00fa -> MIDDLE DOT '\u221a' # 0x00fb -> SQUARE ROOT '\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N '\xb2' # 0x00fd -> SUPERSCRIPT TWO '\u25a0' # 0x00fe -> BLACK SQUARE '\xa0' # 0x00ff -> NO-BREAK SPACE ) ### Encoding Map encoding_map = { 0x0000: 0x0000, # NULL 0x0001: 0x0001, # START OF HEADING 0x0002: 0x0002, # START OF TEXT 0x0003: 0x0003, # END OF TEXT 0x0004: 0x0004, # END OF TRANSMISSION 0x0005: 0x0005, # ENQUIRY 0x0006: 0x0006, # ACKNOWLEDGE 0x0007: 0x0007, # BELL 0x0008: 0x0008, # BACKSPACE 0x0009: 0x0009, # HORIZONTAL TABULATION 0x000a: 0x000a, # LINE FEED 0x000b: 0x000b, # VERTICAL TABULATION 0x000c: 0x000c, # FORM FEED 0x000d: 0x000d, # CARRIAGE RETURN 0x000e: 0x000e, # SHIFT OUT 0x000f: 0x000f, # SHIFT IN 0x0010: 0x0010, # DATA LINK ESCAPE 0x0011: 0x0011, # DEVICE CONTROL ONE 0x0012: 0x0012, # DEVICE CONTROL TWO 0x0013: 0x0013, # DEVICE CONTROL THREE 0x0014: 0x0014, # DEVICE CONTROL FOUR 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE 0x0016: 0x0016, # SYNCHRONOUS IDLE 0x0017: 0x0017, # END OF TRANSMISSION BLOCK 0x0018: 0x0018, # CANCEL 0x0019: 0x0019, # END OF MEDIUM 0x001a: 0x001a, # SUBSTITUTE 0x001b: 0x001b, # ESCAPE 0x001c: 0x001c, # FILE SEPARATOR 0x001d: 0x001d, # GROUP SEPARATOR 0x001e: 0x001e, # RECORD SEPARATOR 0x001f: 0x001f, # UNIT SEPARATOR 0x0020: 0x0020, # SPACE 0x0021: 0x0021, # EXCLAMATION MARK 0x0022: 0x0022, # QUOTATION MARK 0x0023: 0x0023, # NUMBER SIGN 0x0024: 0x0024, # DOLLAR SIGN 0x0025: 0x0025, # PERCENT SIGN 0x0026: 0x0026, # AMPERSAND 0x0027: 0x0027, # APOSTROPHE 0x0028: 0x0028, # LEFT PARENTHESIS 0x0029: 0x0029, # RIGHT PARENTHESIS 0x002a: 0x002a, # ASTERISK 0x002b: 0x002b, # PLUS SIGN 0x002c: 0x002c, # COMMA 0x002d: 0x002d, # HYPHEN-MINUS 0x002e: 0x002e, # FULL STOP 0x002f: 0x002f, # SOLIDUS 0x0030: 0x0030, # DIGIT ZERO 0x0031: 0x0031, # DIGIT ONE 0x0032: 0x0032, # DIGIT TWO 0x0033: 0x0033, # DIGIT THREE 0x0034: 0x0034, # DIGIT FOUR 0x0035: 0x0035, # DIGIT FIVE 0x0036: 0x0036, # DIGIT SIX 0x0037: 0x0037, # DIGIT SEVEN 0x0038: 0x0038, # DIGIT EIGHT 0x0039: 0x0039, # DIGIT NINE 0x003a: 0x003a, # COLON 0x003b: 0x003b, # SEMICOLON 0x003c: 0x003c, # LESS-THAN SIGN 0x003d: 0x003d, # EQUALS SIGN 0x003e: 0x003e, # GREATER-THAN SIGN 0x003f: 0x003f, # QUESTION MARK 0x0040: 0x0040, # COMMERCIAL AT 0x0041: 0x0041, # LATIN CAPITAL LETTER A 0x0042: 0x0042, # LATIN CAPITAL LETTER B 0x0043: 0x0043, # LATIN CAPITAL LETTER C 0x0044: 0x0044, # LATIN CAPITAL LETTER D 0x0045: 0x0045, # LATIN CAPITAL LETTER E 0x0046: 0x0046, # LATIN CAPITAL LETTER F 0x0047: 0x0047, # LATIN CAPITAL LETTER G 0x0048: 0x0048, # LATIN CAPITAL LETTER H 0x0049: 0x0049, # LATIN CAPITAL LETTER I 0x004a: 0x004a, # LATIN CAPITAL LETTER J 0x004b: 0x004b, # LATIN CAPITAL LETTER K 0x004c: 0x004c, # LATIN CAPITAL LETTER L 0x004d: 0x004d, # LATIN CAPITAL LETTER M 0x004e: 0x004e, # LATIN CAPITAL LETTER N 0x004f: 0x004f, # LATIN CAPITAL LETTER O 0x0050: 0x0050, # LATIN CAPITAL LETTER P 0x0051: 0x0051, # LATIN CAPITAL LETTER Q 0x0052: 0x0052, # LATIN CAPITAL LETTER R 0x0053: 0x0053, # LATIN CAPITAL LETTER S 0x0054: 0x0054, # LATIN CAPITAL LETTER T 0x0055: 0x0055, # LATIN CAPITAL LETTER U 0x0056: 0x0056, # LATIN CAPITAL LETTER V 0x0057: 0x0057, # LATIN CAPITAL LETTER W 0x0058: 0x0058, # LATIN CAPITAL LETTER X 0x0059: 0x0059, # LATIN CAPITAL LETTER Y 0x005a: 0x005a, # LATIN CAPITAL LETTER Z 0x005b: 0x005b, # LEFT SQUARE BRACKET 0x005c: 0x005c, # REVERSE SOLIDUS 0x005d: 0x005d, # RIGHT SQUARE BRACKET 0x005e: 0x005e, # CIRCUMFLEX ACCENT 0x005f: 0x005f, # LOW LINE 0x0060: 0x0060, # GRAVE ACCENT 0x0061: 0x0061, # LATIN SMALL LETTER A 0x0062: 0x0062, # LATIN SMALL LETTER B 0x0063: 0x0063, # LATIN SMALL LETTER C 0x0064: 0x0064, # LATIN SMALL LETTER D 0x0065: 0x0065, # LATIN SMALL LETTER E 0x0066: 0x0066, # LATIN SMALL LETTER F 0x0067: 0x0067, # LATIN SMALL LETTER G 0x0068: 0x0068, # LATIN SMALL LETTER H 0x0069: 0x0069, # LATIN SMALL LETTER I 0x006a: 0x006a, # LATIN SMALL LETTER J 0x006b: 0x006b, # LATIN SMALL LETTER K 0x006c: 0x006c, # LATIN SMALL LETTER L 0x006d: 0x006d, # LATIN SMALL LETTER M 0x006e: 0x006e, # LATIN SMALL LETTER N 0x006f: 0x006f, # LATIN SMALL LETTER O 0x0070: 0x0070, # LATIN SMALL LETTER P 0x0071: 0x0071, # LATIN SMALL LETTER Q 0x0072: 0x0072, # LATIN SMALL LETTER R 0x0073: 0x0073, # LATIN SMALL LETTER S 0x0074: 0x0074, # LATIN SMALL LETTER T 0x0075: 0x0075, # LATIN SMALL LETTER U 0x0076: 0x0076, # LATIN SMALL LETTER V 0x0077: 0x0077, # LATIN SMALL LETTER W 0x0078: 0x0078, # LATIN SMALL LETTER X 0x0079: 0x0079, # LATIN SMALL LETTER Y 0x007a: 0x007a, # LATIN SMALL LETTER Z 0x007b: 0x007b, # LEFT CURLY BRACKET 0x007c: 0x007c, # VERTICAL LINE 0x007d: 0x007d, # RIGHT CURLY BRACKET 0x007e: 0x007e, # TILDE 0x007f: 0x007f, # DELETE 0x00a0: 0x00ff, # NO-BREAK SPACE 0x00a2: 0x009b, # CENT SIGN 0x00a3: 0x009c, # POUND SIGN 0x00a4: 0x0098, # CURRENCY SIGN 0x00a6: 0x00a0, # BROKEN BAR 0x00a7: 0x008f, # SECTION SIGN 0x00a8: 0x00a4, # DIAERESIS 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00ac: 0x00aa, # NOT SIGN 0x00af: 0x00a7, # MACRON 0x00b0: 0x00f8, # DEGREE SIGN 0x00b1: 0x00f1, # PLUS-MINUS SIGN 0x00b2: 0x00fd, # SUPERSCRIPT TWO 0x00b3: 0x00a6, # SUPERSCRIPT THREE 0x00b4: 0x00a1, # ACUTE ACCENT 0x00b5: 0x00e6, # MICRO SIGN 0x00b6: 0x0086, # PILCROW SIGN 0x00b7: 0x00fa, # MIDDLE DOT 0x00b8: 0x00a5, # CEDILLA 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF 0x00be: 0x00ad, # VULGAR FRACTION THREE QUARTERS 0x00c0: 0x008e, # LATIN CAPITAL LETTER A WITH GRAVE 0x00c2: 0x0084, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA 0x00c8: 0x0091, # LATIN CAPITAL LETTER E WITH GRAVE 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE 0x00ca: 0x0092, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX 0x00cb: 0x0094, # LATIN CAPITAL LETTER E WITH DIAERESIS 0x00ce: 0x00a8, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX 0x00cf: 0x0095, # LATIN CAPITAL LETTER I WITH DIAERESIS 0x00d4: 0x0099, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX 0x00d9: 0x009d, # LATIN CAPITAL LETTER U WITH GRAVE 0x00db: 0x009e, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX 0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX 0x00f7: 0x00f6, # DIVISION SIGN 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON 0x03c0: 0x00e3, # GREEK SMALL LETTER PI 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI 0x2017: 0x008d, # DOUBLE LOW LINE 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N 0x2219: 0x00f9, # BULLET OPERATOR 0x221a: 0x00fb, # SQUARE ROOT 0x221e: 0x00ec, # INFINITY 0x2229: 0x00ef, # INTERSECTION 0x2248: 0x00f7, # ALMOST EQUAL TO 0x2261: 0x00f0, # IDENTICAL TO 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO 0x2310: 0x00a9, # REVERSED NOT SIGN 0x2320: 0x00f4, # TOP HALF INTEGRAL 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x2580: 0x00df, # UPPER HALF BLOCK 0x2584: 0x00dc, # LOWER HALF BLOCK 0x2588: 0x00db, # FULL BLOCK 0x258c: 0x00dd, # LEFT HALF BLOCK 0x2590: 0x00de, # RIGHT HALF BLOCK 0x2591: 0x00b0, # LIGHT SHADE 0x2592: 0x00b1, # MEDIUM SHADE 0x2593: 0x00b2, # DARK SHADE 0x25a0: 0x00fe, # BLACK SQUARE }
bsd-3-clause
268ccec8d3ca57acb23f07db2d84b1c3
48.071633
97
0.590701
3.010336
false
false
false
false
brython-dev/brython
www/gallery/sudoku.py
1
1701
# Boris Borcic 2006 # Quick and concise Python 2.5 sudoku solver # # Adapted for Brython by Pierre Quentel # Load pre-computed tables # Don't use the stdlib module json (much too slow), use Javascript object # JSON instead from browser import window json = window.JSON src = open('sudoku.json').read() data = json.parse(src) w2q = data['w2q'] q2w = data['q2w'] w2q2w = data['w2q2w'] class Completed(Exception) : pass def sudoku99(problem) : givens = list(9 * j + int(k) - 1 for j, k in enumerate(problem[:81]) if '0' < k) try: search(givens, [9] * len(q2w), set(), set()) except Completed as ws : return ''.join(str(w % 9 + 1) for w in sorted(ws.args[0])) def search(w0s, q2nw, takens, ws) : while 1: i = 0 while w0s: w0 = w0s.pop() takens.add(w0) ws.add(w0) for q in w2q[w0]: q2nw[q] += 100 for w in set(w2q2w[w0]) - takens: takens.add(w) for q in w2q[w]: n = q2nw[q] = q2nw[q] - 1 if n < 2: w0s.append((set(q2w[q]) - takens).pop()) if len(ws) > 80: raise Completed(ws) w1, w0 = set(q2w[q2nw.index(2)]) - takens try: search([w1], q2nw[:], takens.copy(), ws.copy()) except KeyError : w0s.append(w0) if __name__=='__main__': #print(sudoku99('530070000600195000098000060800060003400803001700020006060000280000419005000080079')) data = '004050003'+'906400000'+'130006000'+'020310000'+'090000080'+'000047050'+\ '000070038'+'000002709'+'600090100' print(sudoku99(data))
bsd-3-clause
bdb170b167ddbad8ea48ddcecccaaa6b
27.830508
105
0.547913
2.878173
false
false
false
false
brython-dev/brython
www/src/Lib/test/pystone.py
122
7381
#! /usr/bin/env python3 """ "PYSTONE" Benchmark Program Version: Python/1.1 (corresponds to C/1.1 plus 2 Pystone fixes) Author: Reinhold P. Weicker, CACM Vol 27, No 10, 10/84 pg. 1013. Translated from ADA to C by Rick Richardson. Every method to preserve ADA-likeness has been used, at the expense of C-ness. Translated from C to Python by Guido van Rossum. Version History: Version 1.1 corrects two bugs in version 1.0: First, it leaked memory: in Proc1(), NextRecord ends up having a pointer to itself. I have corrected this by zapping NextRecord.PtrComp at the end of Proc1(). Second, Proc3() used the operator != to compare a record to None. This is rather inefficient and not true to the intention of the original benchmark (where a pointer comparison to None is intended; the != operator attempts to find a method __cmp__ to do value comparison of the record). Version 1.1 runs 5-10 percent faster than version 1.0, so benchmark figures of different versions can't be compared directly. """ LOOPS = 50000 from time import clock __version__ = "1.1" [Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6) class Record: def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0, IntComp = 0, StringComp = 0): self.PtrComp = PtrComp self.Discr = Discr self.EnumComp = EnumComp self.IntComp = IntComp self.StringComp = StringComp def copy(self): return Record(self.PtrComp, self.Discr, self.EnumComp, self.IntComp, self.StringComp) TRUE = 1 FALSE = 0 def main(loops=LOOPS): benchtime, stones = pystones(loops) print("Pystone(%s) time for %d passes = %g" % \ (__version__, loops, benchtime)) print("This machine benchmarks at %g pystones/second" % stones) def pystones(loops=LOOPS): return Proc0(loops) IntGlob = 0 BoolGlob = FALSE Char1Glob = '\0' Char2Glob = '\0' Array1Glob = [0]*51 Array2Glob = [x[:] for x in [Array1Glob]*51] PtrGlb = None PtrGlbNext = None def Proc0(loops=LOOPS): global IntGlob global BoolGlob global Char1Glob global Char2Glob global Array1Glob global Array2Glob global PtrGlb global PtrGlbNext starttime = clock() for i in range(loops): pass nulltime = clock() - starttime PtrGlbNext = Record() PtrGlb = Record() PtrGlb.PtrComp = PtrGlbNext PtrGlb.Discr = Ident1 PtrGlb.EnumComp = Ident3 PtrGlb.IntComp = 40 PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING" String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING" Array2Glob[8][7] = 10 starttime = clock() for i in range(loops): Proc5() Proc4() IntLoc1 = 2 IntLoc2 = 3 String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING" EnumLoc = Ident2 BoolGlob = not Func2(String1Loc, String2Loc) while IntLoc1 < IntLoc2: IntLoc3 = 5 * IntLoc1 - IntLoc2 IntLoc3 = Proc7(IntLoc1, IntLoc2) IntLoc1 = IntLoc1 + 1 Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3) PtrGlb = Proc1(PtrGlb) CharIndex = 'A' while CharIndex <= Char2Glob: if EnumLoc == Func1(CharIndex, 'C'): EnumLoc = Proc6(Ident1) CharIndex = chr(ord(CharIndex)+1) IntLoc3 = IntLoc2 * IntLoc1 IntLoc2 = IntLoc3 / IntLoc1 IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1 IntLoc1 = Proc2(IntLoc1) benchtime = clock() - starttime - nulltime if benchtime == 0.0: loopsPerBenchtime = 0.0 else: loopsPerBenchtime = (loops / benchtime) return benchtime, loopsPerBenchtime def Proc1(PtrParIn): PtrParIn.PtrComp = NextRecord = PtrGlb.copy() PtrParIn.IntComp = 5 NextRecord.IntComp = PtrParIn.IntComp NextRecord.PtrComp = PtrParIn.PtrComp NextRecord.PtrComp = Proc3(NextRecord.PtrComp) if NextRecord.Discr == Ident1: NextRecord.IntComp = 6 NextRecord.EnumComp = Proc6(PtrParIn.EnumComp) NextRecord.PtrComp = PtrGlb.PtrComp NextRecord.IntComp = Proc7(NextRecord.IntComp, 10) else: PtrParIn = NextRecord.copy() NextRecord.PtrComp = None return PtrParIn def Proc2(IntParIO): IntLoc = IntParIO + 10 while 1: if Char1Glob == 'A': IntLoc = IntLoc - 1 IntParIO = IntLoc - IntGlob EnumLoc = Ident1 if EnumLoc == Ident1: break return IntParIO def Proc3(PtrParOut): global IntGlob if PtrGlb is not None: PtrParOut = PtrGlb.PtrComp else: IntGlob = 100 PtrGlb.IntComp = Proc7(10, IntGlob) return PtrParOut def Proc4(): global Char2Glob BoolLoc = Char1Glob == 'A' BoolLoc = BoolLoc or BoolGlob Char2Glob = 'B' def Proc5(): global Char1Glob global BoolGlob Char1Glob = 'A' BoolGlob = FALSE def Proc6(EnumParIn): EnumParOut = EnumParIn if not Func3(EnumParIn): EnumParOut = Ident4 if EnumParIn == Ident1: EnumParOut = Ident1 elif EnumParIn == Ident2: if IntGlob > 100: EnumParOut = Ident1 else: EnumParOut = Ident4 elif EnumParIn == Ident3: EnumParOut = Ident2 elif EnumParIn == Ident4: pass elif EnumParIn == Ident5: EnumParOut = Ident3 return EnumParOut def Proc7(IntParI1, IntParI2): IntLoc = IntParI1 + 2 IntParOut = IntParI2 + IntLoc return IntParOut def Proc8(Array1Par, Array2Par, IntParI1, IntParI2): global IntGlob IntLoc = IntParI1 + 5 Array1Par[IntLoc] = IntParI2 Array1Par[IntLoc+1] = Array1Par[IntLoc] Array1Par[IntLoc+30] = IntLoc for IntIndex in range(IntLoc, IntLoc+2): Array2Par[IntLoc][IntIndex] = IntLoc Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1 Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc] IntGlob = 5 def Func1(CharPar1, CharPar2): CharLoc1 = CharPar1 CharLoc2 = CharLoc1 if CharLoc2 != CharPar2: return Ident1 else: return Ident2 def Func2(StrParI1, StrParI2): IntLoc = 1 while IntLoc <= 1: if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1: CharLoc = 'A' IntLoc = IntLoc + 1 if CharLoc >= 'W' and CharLoc <= 'Z': IntLoc = 7 if CharLoc == 'X': return TRUE else: if StrParI1 > StrParI2: IntLoc = IntLoc + 7 return TRUE else: return FALSE def Func3(EnumParIn): EnumLoc = EnumParIn if EnumLoc == Ident3: return TRUE return FALSE if __name__ == '__main__': import sys def error(msg): print(msg, end=' ', file=sys.stderr) print("usage: %s [number_of_loops]" % sys.argv[0], file=sys.stderr) sys.exit(100) nargs = len(sys.argv) - 1 if nargs > 1: error("%d arguments are too many;" % nargs) elif nargs == 1: try: loops = int(sys.argv[1]) except ValueError: error("Invalid argument %r;" % sys.argv[1]) else: loops = LOOPS main(loops)
bsd-3-clause
0bb30a92208f2332868df4c05f8e1f07
26.337037
75
0.600461
3.302461
false
false
false
false
brython-dev/brython
www/src/Lib/encodings/cp775.py
35
35173
""" Python Character Mapping Codec cp775 generated from 'VENDORS/MICSFT/PC/CP775.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_map) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_map)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp775', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Map decoding_map = codecs.make_identity_dict(range(256)) decoding_map.update({ 0x0080: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE 0x0083: 0x0101, # LATIN SMALL LETTER A WITH MACRON 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS 0x0085: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE 0x0087: 0x0107, # LATIN SMALL LETTER C WITH ACUTE 0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE 0x0089: 0x0113, # LATIN SMALL LETTER E WITH MACRON 0x008a: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA 0x008b: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA 0x008c: 0x012b, # LATIN SMALL LETTER I WITH MACRON 0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE 0x0093: 0x014d, # LATIN SMALL LETTER O WITH MACRON 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS 0x0095: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA 0x0096: 0x00a2, # CENT SIGN 0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE 0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS 0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE 0x009c: 0x00a3, # POUND SIGN 0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE 0x009e: 0x00d7, # MULTIPLICATION SIGN 0x009f: 0x00a4, # CURRENCY SIGN 0x00a0: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON 0x00a1: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE 0x00a3: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE 0x00a4: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE 0x00a5: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE 0x00a6: 0x201d, # RIGHT DOUBLE QUOTATION MARK 0x00a7: 0x00a6, # BROKEN BAR 0x00a8: 0x00a9, # COPYRIGHT SIGN 0x00a9: 0x00ae, # REGISTERED SIGN 0x00aa: 0x00ac, # NOT SIGN 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER 0x00ad: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00b0: 0x2591, # LIGHT SHADE 0x00b1: 0x2592, # MEDIUM SHADE 0x00b2: 0x2593, # DARK SHADE 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x00b5: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK 0x00b6: 0x010c, # LATIN CAPITAL LETTER C WITH CARON 0x00b7: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK 0x00b8: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT 0x00bd: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK 0x00be: 0x0160, # LATIN CAPITAL LETTER S WITH CARON 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x00c6: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK 0x00c7: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x00cf: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON 0x00d0: 0x0105, # LATIN SMALL LETTER A WITH OGONEK 0x00d1: 0x010d, # LATIN SMALL LETTER C WITH CARON 0x00d2: 0x0119, # LATIN SMALL LETTER E WITH OGONEK 0x00d3: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE 0x00d4: 0x012f, # LATIN SMALL LETTER I WITH OGONEK 0x00d5: 0x0161, # LATIN SMALL LETTER S WITH CARON 0x00d6: 0x0173, # LATIN SMALL LETTER U WITH OGONEK 0x00d7: 0x016b, # LATIN SMALL LETTER U WITH MACRON 0x00d8: 0x017e, # LATIN SMALL LETTER Z WITH CARON 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x00db: 0x2588, # FULL BLOCK 0x00dc: 0x2584, # LOWER HALF BLOCK 0x00dd: 0x258c, # LEFT HALF BLOCK 0x00de: 0x2590, # RIGHT HALF BLOCK 0x00df: 0x2580, # UPPER HALF BLOCK 0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN) 0x00e2: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON 0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE 0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE 0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE 0x00e6: 0x00b5, # MICRO SIGN 0x00e7: 0x0144, # LATIN SMALL LETTER N WITH ACUTE 0x00e8: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA 0x00e9: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA 0x00ea: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA 0x00eb: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA 0x00ec: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA 0x00ed: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON 0x00ee: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA 0x00ef: 0x2019, # RIGHT SINGLE QUOTATION MARK 0x00f0: 0x00ad, # SOFT HYPHEN 0x00f1: 0x00b1, # PLUS-MINUS SIGN 0x00f2: 0x201c, # LEFT DOUBLE QUOTATION MARK 0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS 0x00f4: 0x00b6, # PILCROW SIGN 0x00f5: 0x00a7, # SECTION SIGN 0x00f6: 0x00f7, # DIVISION SIGN 0x00f7: 0x201e, # DOUBLE LOW-9 QUOTATION MARK 0x00f8: 0x00b0, # DEGREE SIGN 0x00f9: 0x2219, # BULLET OPERATOR 0x00fa: 0x00b7, # MIDDLE DOT 0x00fb: 0x00b9, # SUPERSCRIPT ONE 0x00fc: 0x00b3, # SUPERSCRIPT THREE 0x00fd: 0x00b2, # SUPERSCRIPT TWO 0x00fe: 0x25a0, # BLACK SQUARE 0x00ff: 0x00a0, # NO-BREAK SPACE }) ### Decoding Table decoding_table = ( '\x00' # 0x0000 -> NULL '\x01' # 0x0001 -> START OF HEADING '\x02' # 0x0002 -> START OF TEXT '\x03' # 0x0003 -> END OF TEXT '\x04' # 0x0004 -> END OF TRANSMISSION '\x05' # 0x0005 -> ENQUIRY '\x06' # 0x0006 -> ACKNOWLEDGE '\x07' # 0x0007 -> BELL '\x08' # 0x0008 -> BACKSPACE '\t' # 0x0009 -> HORIZONTAL TABULATION '\n' # 0x000a -> LINE FEED '\x0b' # 0x000b -> VERTICAL TABULATION '\x0c' # 0x000c -> FORM FEED '\r' # 0x000d -> CARRIAGE RETURN '\x0e' # 0x000e -> SHIFT OUT '\x0f' # 0x000f -> SHIFT IN '\x10' # 0x0010 -> DATA LINK ESCAPE '\x11' # 0x0011 -> DEVICE CONTROL ONE '\x12' # 0x0012 -> DEVICE CONTROL TWO '\x13' # 0x0013 -> DEVICE CONTROL THREE '\x14' # 0x0014 -> DEVICE CONTROL FOUR '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE '\x16' # 0x0016 -> SYNCHRONOUS IDLE '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK '\x18' # 0x0018 -> CANCEL '\x19' # 0x0019 -> END OF MEDIUM '\x1a' # 0x001a -> SUBSTITUTE '\x1b' # 0x001b -> ESCAPE '\x1c' # 0x001c -> FILE SEPARATOR '\x1d' # 0x001d -> GROUP SEPARATOR '\x1e' # 0x001e -> RECORD SEPARATOR '\x1f' # 0x001f -> UNIT SEPARATOR ' ' # 0x0020 -> SPACE '!' # 0x0021 -> EXCLAMATION MARK '"' # 0x0022 -> QUOTATION MARK '#' # 0x0023 -> NUMBER SIGN '$' # 0x0024 -> DOLLAR SIGN '%' # 0x0025 -> PERCENT SIGN '&' # 0x0026 -> AMPERSAND "'" # 0x0027 -> APOSTROPHE '(' # 0x0028 -> LEFT PARENTHESIS ')' # 0x0029 -> RIGHT PARENTHESIS '*' # 0x002a -> ASTERISK '+' # 0x002b -> PLUS SIGN ',' # 0x002c -> COMMA '-' # 0x002d -> HYPHEN-MINUS '.' # 0x002e -> FULL STOP '/' # 0x002f -> SOLIDUS '0' # 0x0030 -> DIGIT ZERO '1' # 0x0031 -> DIGIT ONE '2' # 0x0032 -> DIGIT TWO '3' # 0x0033 -> DIGIT THREE '4' # 0x0034 -> DIGIT FOUR '5' # 0x0035 -> DIGIT FIVE '6' # 0x0036 -> DIGIT SIX '7' # 0x0037 -> DIGIT SEVEN '8' # 0x0038 -> DIGIT EIGHT '9' # 0x0039 -> DIGIT NINE ':' # 0x003a -> COLON ';' # 0x003b -> SEMICOLON '<' # 0x003c -> LESS-THAN SIGN '=' # 0x003d -> EQUALS SIGN '>' # 0x003e -> GREATER-THAN SIGN '?' # 0x003f -> QUESTION MARK '@' # 0x0040 -> COMMERCIAL AT 'A' # 0x0041 -> LATIN CAPITAL LETTER A 'B' # 0x0042 -> LATIN CAPITAL LETTER B 'C' # 0x0043 -> LATIN CAPITAL LETTER C 'D' # 0x0044 -> LATIN CAPITAL LETTER D 'E' # 0x0045 -> LATIN CAPITAL LETTER E 'F' # 0x0046 -> LATIN CAPITAL LETTER F 'G' # 0x0047 -> LATIN CAPITAL LETTER G 'H' # 0x0048 -> LATIN CAPITAL LETTER H 'I' # 0x0049 -> LATIN CAPITAL LETTER I 'J' # 0x004a -> LATIN CAPITAL LETTER J 'K' # 0x004b -> LATIN CAPITAL LETTER K 'L' # 0x004c -> LATIN CAPITAL LETTER L 'M' # 0x004d -> LATIN CAPITAL LETTER M 'N' # 0x004e -> LATIN CAPITAL LETTER N 'O' # 0x004f -> LATIN CAPITAL LETTER O 'P' # 0x0050 -> LATIN CAPITAL LETTER P 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q 'R' # 0x0052 -> LATIN CAPITAL LETTER R 'S' # 0x0053 -> LATIN CAPITAL LETTER S 'T' # 0x0054 -> LATIN CAPITAL LETTER T 'U' # 0x0055 -> LATIN CAPITAL LETTER U 'V' # 0x0056 -> LATIN CAPITAL LETTER V 'W' # 0x0057 -> LATIN CAPITAL LETTER W 'X' # 0x0058 -> LATIN CAPITAL LETTER X 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y 'Z' # 0x005a -> LATIN CAPITAL LETTER Z '[' # 0x005b -> LEFT SQUARE BRACKET '\\' # 0x005c -> REVERSE SOLIDUS ']' # 0x005d -> RIGHT SQUARE BRACKET '^' # 0x005e -> CIRCUMFLEX ACCENT '_' # 0x005f -> LOW LINE '`' # 0x0060 -> GRAVE ACCENT 'a' # 0x0061 -> LATIN SMALL LETTER A 'b' # 0x0062 -> LATIN SMALL LETTER B 'c' # 0x0063 -> LATIN SMALL LETTER C 'd' # 0x0064 -> LATIN SMALL LETTER D 'e' # 0x0065 -> LATIN SMALL LETTER E 'f' # 0x0066 -> LATIN SMALL LETTER F 'g' # 0x0067 -> LATIN SMALL LETTER G 'h' # 0x0068 -> LATIN SMALL LETTER H 'i' # 0x0069 -> LATIN SMALL LETTER I 'j' # 0x006a -> LATIN SMALL LETTER J 'k' # 0x006b -> LATIN SMALL LETTER K 'l' # 0x006c -> LATIN SMALL LETTER L 'm' # 0x006d -> LATIN SMALL LETTER M 'n' # 0x006e -> LATIN SMALL LETTER N 'o' # 0x006f -> LATIN SMALL LETTER O 'p' # 0x0070 -> LATIN SMALL LETTER P 'q' # 0x0071 -> LATIN SMALL LETTER Q 'r' # 0x0072 -> LATIN SMALL LETTER R 's' # 0x0073 -> LATIN SMALL LETTER S 't' # 0x0074 -> LATIN SMALL LETTER T 'u' # 0x0075 -> LATIN SMALL LETTER U 'v' # 0x0076 -> LATIN SMALL LETTER V 'w' # 0x0077 -> LATIN SMALL LETTER W 'x' # 0x0078 -> LATIN SMALL LETTER X 'y' # 0x0079 -> LATIN SMALL LETTER Y 'z' # 0x007a -> LATIN SMALL LETTER Z '{' # 0x007b -> LEFT CURLY BRACKET '|' # 0x007c -> VERTICAL LINE '}' # 0x007d -> RIGHT CURLY BRACKET '~' # 0x007e -> TILDE '\x7f' # 0x007f -> DELETE '\u0106' # 0x0080 -> LATIN CAPITAL LETTER C WITH ACUTE '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE '\u0101' # 0x0083 -> LATIN SMALL LETTER A WITH MACRON '\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS '\u0123' # 0x0085 -> LATIN SMALL LETTER G WITH CEDILLA '\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE '\u0107' # 0x0087 -> LATIN SMALL LETTER C WITH ACUTE '\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE '\u0113' # 0x0089 -> LATIN SMALL LETTER E WITH MACRON '\u0156' # 0x008a -> LATIN CAPITAL LETTER R WITH CEDILLA '\u0157' # 0x008b -> LATIN SMALL LETTER R WITH CEDILLA '\u012b' # 0x008c -> LATIN SMALL LETTER I WITH MACRON '\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE '\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS '\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE '\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE '\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE '\u014d' # 0x0093 -> LATIN SMALL LETTER O WITH MACRON '\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS '\u0122' # 0x0095 -> LATIN CAPITAL LETTER G WITH CEDILLA '\xa2' # 0x0096 -> CENT SIGN '\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE '\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE '\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS '\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE '\xa3' # 0x009c -> POUND SIGN '\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE '\xd7' # 0x009e -> MULTIPLICATION SIGN '\xa4' # 0x009f -> CURRENCY SIGN '\u0100' # 0x00a0 -> LATIN CAPITAL LETTER A WITH MACRON '\u012a' # 0x00a1 -> LATIN CAPITAL LETTER I WITH MACRON '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE '\u017b' # 0x00a3 -> LATIN CAPITAL LETTER Z WITH DOT ABOVE '\u017c' # 0x00a4 -> LATIN SMALL LETTER Z WITH DOT ABOVE '\u017a' # 0x00a5 -> LATIN SMALL LETTER Z WITH ACUTE '\u201d' # 0x00a6 -> RIGHT DOUBLE QUOTATION MARK '\xa6' # 0x00a7 -> BROKEN BAR '\xa9' # 0x00a8 -> COPYRIGHT SIGN '\xae' # 0x00a9 -> REGISTERED SIGN '\xac' # 0x00aa -> NOT SIGN '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER '\u0141' # 0x00ad -> LATIN CAPITAL LETTER L WITH STROKE '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK '\u2591' # 0x00b0 -> LIGHT SHADE '\u2592' # 0x00b1 -> MEDIUM SHADE '\u2593' # 0x00b2 -> DARK SHADE '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT '\u0104' # 0x00b5 -> LATIN CAPITAL LETTER A WITH OGONEK '\u010c' # 0x00b6 -> LATIN CAPITAL LETTER C WITH CARON '\u0118' # 0x00b7 -> LATIN CAPITAL LETTER E WITH OGONEK '\u0116' # 0x00b8 -> LATIN CAPITAL LETTER E WITH DOT ABOVE '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT '\u012e' # 0x00bd -> LATIN CAPITAL LETTER I WITH OGONEK '\u0160' # 0x00be -> LATIN CAPITAL LETTER S WITH CARON '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL '\u0172' # 0x00c6 -> LATIN CAPITAL LETTER U WITH OGONEK '\u016a' # 0x00c7 -> LATIN CAPITAL LETTER U WITH MACRON '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL '\u017d' # 0x00cf -> LATIN CAPITAL LETTER Z WITH CARON '\u0105' # 0x00d0 -> LATIN SMALL LETTER A WITH OGONEK '\u010d' # 0x00d1 -> LATIN SMALL LETTER C WITH CARON '\u0119' # 0x00d2 -> LATIN SMALL LETTER E WITH OGONEK '\u0117' # 0x00d3 -> LATIN SMALL LETTER E WITH DOT ABOVE '\u012f' # 0x00d4 -> LATIN SMALL LETTER I WITH OGONEK '\u0161' # 0x00d5 -> LATIN SMALL LETTER S WITH CARON '\u0173' # 0x00d6 -> LATIN SMALL LETTER U WITH OGONEK '\u016b' # 0x00d7 -> LATIN SMALL LETTER U WITH MACRON '\u017e' # 0x00d8 -> LATIN SMALL LETTER Z WITH CARON '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT '\u2588' # 0x00db -> FULL BLOCK '\u2584' # 0x00dc -> LOWER HALF BLOCK '\u258c' # 0x00dd -> LEFT HALF BLOCK '\u2590' # 0x00de -> RIGHT HALF BLOCK '\u2580' # 0x00df -> UPPER HALF BLOCK '\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN) '\u014c' # 0x00e2 -> LATIN CAPITAL LETTER O WITH MACRON '\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE '\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE '\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE '\xb5' # 0x00e6 -> MICRO SIGN '\u0144' # 0x00e7 -> LATIN SMALL LETTER N WITH ACUTE '\u0136' # 0x00e8 -> LATIN CAPITAL LETTER K WITH CEDILLA '\u0137' # 0x00e9 -> LATIN SMALL LETTER K WITH CEDILLA '\u013b' # 0x00ea -> LATIN CAPITAL LETTER L WITH CEDILLA '\u013c' # 0x00eb -> LATIN SMALL LETTER L WITH CEDILLA '\u0146' # 0x00ec -> LATIN SMALL LETTER N WITH CEDILLA '\u0112' # 0x00ed -> LATIN CAPITAL LETTER E WITH MACRON '\u0145' # 0x00ee -> LATIN CAPITAL LETTER N WITH CEDILLA '\u2019' # 0x00ef -> RIGHT SINGLE QUOTATION MARK '\xad' # 0x00f0 -> SOFT HYPHEN '\xb1' # 0x00f1 -> PLUS-MINUS SIGN '\u201c' # 0x00f2 -> LEFT DOUBLE QUOTATION MARK '\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS '\xb6' # 0x00f4 -> PILCROW SIGN '\xa7' # 0x00f5 -> SECTION SIGN '\xf7' # 0x00f6 -> DIVISION SIGN '\u201e' # 0x00f7 -> DOUBLE LOW-9 QUOTATION MARK '\xb0' # 0x00f8 -> DEGREE SIGN '\u2219' # 0x00f9 -> BULLET OPERATOR '\xb7' # 0x00fa -> MIDDLE DOT '\xb9' # 0x00fb -> SUPERSCRIPT ONE '\xb3' # 0x00fc -> SUPERSCRIPT THREE '\xb2' # 0x00fd -> SUPERSCRIPT TWO '\u25a0' # 0x00fe -> BLACK SQUARE '\xa0' # 0x00ff -> NO-BREAK SPACE ) ### Encoding Map encoding_map = { 0x0000: 0x0000, # NULL 0x0001: 0x0001, # START OF HEADING 0x0002: 0x0002, # START OF TEXT 0x0003: 0x0003, # END OF TEXT 0x0004: 0x0004, # END OF TRANSMISSION 0x0005: 0x0005, # ENQUIRY 0x0006: 0x0006, # ACKNOWLEDGE 0x0007: 0x0007, # BELL 0x0008: 0x0008, # BACKSPACE 0x0009: 0x0009, # HORIZONTAL TABULATION 0x000a: 0x000a, # LINE FEED 0x000b: 0x000b, # VERTICAL TABULATION 0x000c: 0x000c, # FORM FEED 0x000d: 0x000d, # CARRIAGE RETURN 0x000e: 0x000e, # SHIFT OUT 0x000f: 0x000f, # SHIFT IN 0x0010: 0x0010, # DATA LINK ESCAPE 0x0011: 0x0011, # DEVICE CONTROL ONE 0x0012: 0x0012, # DEVICE CONTROL TWO 0x0013: 0x0013, # DEVICE CONTROL THREE 0x0014: 0x0014, # DEVICE CONTROL FOUR 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE 0x0016: 0x0016, # SYNCHRONOUS IDLE 0x0017: 0x0017, # END OF TRANSMISSION BLOCK 0x0018: 0x0018, # CANCEL 0x0019: 0x0019, # END OF MEDIUM 0x001a: 0x001a, # SUBSTITUTE 0x001b: 0x001b, # ESCAPE 0x001c: 0x001c, # FILE SEPARATOR 0x001d: 0x001d, # GROUP SEPARATOR 0x001e: 0x001e, # RECORD SEPARATOR 0x001f: 0x001f, # UNIT SEPARATOR 0x0020: 0x0020, # SPACE 0x0021: 0x0021, # EXCLAMATION MARK 0x0022: 0x0022, # QUOTATION MARK 0x0023: 0x0023, # NUMBER SIGN 0x0024: 0x0024, # DOLLAR SIGN 0x0025: 0x0025, # PERCENT SIGN 0x0026: 0x0026, # AMPERSAND 0x0027: 0x0027, # APOSTROPHE 0x0028: 0x0028, # LEFT PARENTHESIS 0x0029: 0x0029, # RIGHT PARENTHESIS 0x002a: 0x002a, # ASTERISK 0x002b: 0x002b, # PLUS SIGN 0x002c: 0x002c, # COMMA 0x002d: 0x002d, # HYPHEN-MINUS 0x002e: 0x002e, # FULL STOP 0x002f: 0x002f, # SOLIDUS 0x0030: 0x0030, # DIGIT ZERO 0x0031: 0x0031, # DIGIT ONE 0x0032: 0x0032, # DIGIT TWO 0x0033: 0x0033, # DIGIT THREE 0x0034: 0x0034, # DIGIT FOUR 0x0035: 0x0035, # DIGIT FIVE 0x0036: 0x0036, # DIGIT SIX 0x0037: 0x0037, # DIGIT SEVEN 0x0038: 0x0038, # DIGIT EIGHT 0x0039: 0x0039, # DIGIT NINE 0x003a: 0x003a, # COLON 0x003b: 0x003b, # SEMICOLON 0x003c: 0x003c, # LESS-THAN SIGN 0x003d: 0x003d, # EQUALS SIGN 0x003e: 0x003e, # GREATER-THAN SIGN 0x003f: 0x003f, # QUESTION MARK 0x0040: 0x0040, # COMMERCIAL AT 0x0041: 0x0041, # LATIN CAPITAL LETTER A 0x0042: 0x0042, # LATIN CAPITAL LETTER B 0x0043: 0x0043, # LATIN CAPITAL LETTER C 0x0044: 0x0044, # LATIN CAPITAL LETTER D 0x0045: 0x0045, # LATIN CAPITAL LETTER E 0x0046: 0x0046, # LATIN CAPITAL LETTER F 0x0047: 0x0047, # LATIN CAPITAL LETTER G 0x0048: 0x0048, # LATIN CAPITAL LETTER H 0x0049: 0x0049, # LATIN CAPITAL LETTER I 0x004a: 0x004a, # LATIN CAPITAL LETTER J 0x004b: 0x004b, # LATIN CAPITAL LETTER K 0x004c: 0x004c, # LATIN CAPITAL LETTER L 0x004d: 0x004d, # LATIN CAPITAL LETTER M 0x004e: 0x004e, # LATIN CAPITAL LETTER N 0x004f: 0x004f, # LATIN CAPITAL LETTER O 0x0050: 0x0050, # LATIN CAPITAL LETTER P 0x0051: 0x0051, # LATIN CAPITAL LETTER Q 0x0052: 0x0052, # LATIN CAPITAL LETTER R 0x0053: 0x0053, # LATIN CAPITAL LETTER S 0x0054: 0x0054, # LATIN CAPITAL LETTER T 0x0055: 0x0055, # LATIN CAPITAL LETTER U 0x0056: 0x0056, # LATIN CAPITAL LETTER V 0x0057: 0x0057, # LATIN CAPITAL LETTER W 0x0058: 0x0058, # LATIN CAPITAL LETTER X 0x0059: 0x0059, # LATIN CAPITAL LETTER Y 0x005a: 0x005a, # LATIN CAPITAL LETTER Z 0x005b: 0x005b, # LEFT SQUARE BRACKET 0x005c: 0x005c, # REVERSE SOLIDUS 0x005d: 0x005d, # RIGHT SQUARE BRACKET 0x005e: 0x005e, # CIRCUMFLEX ACCENT 0x005f: 0x005f, # LOW LINE 0x0060: 0x0060, # GRAVE ACCENT 0x0061: 0x0061, # LATIN SMALL LETTER A 0x0062: 0x0062, # LATIN SMALL LETTER B 0x0063: 0x0063, # LATIN SMALL LETTER C 0x0064: 0x0064, # LATIN SMALL LETTER D 0x0065: 0x0065, # LATIN SMALL LETTER E 0x0066: 0x0066, # LATIN SMALL LETTER F 0x0067: 0x0067, # LATIN SMALL LETTER G 0x0068: 0x0068, # LATIN SMALL LETTER H 0x0069: 0x0069, # LATIN SMALL LETTER I 0x006a: 0x006a, # LATIN SMALL LETTER J 0x006b: 0x006b, # LATIN SMALL LETTER K 0x006c: 0x006c, # LATIN SMALL LETTER L 0x006d: 0x006d, # LATIN SMALL LETTER M 0x006e: 0x006e, # LATIN SMALL LETTER N 0x006f: 0x006f, # LATIN SMALL LETTER O 0x0070: 0x0070, # LATIN SMALL LETTER P 0x0071: 0x0071, # LATIN SMALL LETTER Q 0x0072: 0x0072, # LATIN SMALL LETTER R 0x0073: 0x0073, # LATIN SMALL LETTER S 0x0074: 0x0074, # LATIN SMALL LETTER T 0x0075: 0x0075, # LATIN SMALL LETTER U 0x0076: 0x0076, # LATIN SMALL LETTER V 0x0077: 0x0077, # LATIN SMALL LETTER W 0x0078: 0x0078, # LATIN SMALL LETTER X 0x0079: 0x0079, # LATIN SMALL LETTER Y 0x007a: 0x007a, # LATIN SMALL LETTER Z 0x007b: 0x007b, # LEFT CURLY BRACKET 0x007c: 0x007c, # VERTICAL LINE 0x007d: 0x007d, # RIGHT CURLY BRACKET 0x007e: 0x007e, # TILDE 0x007f: 0x007f, # DELETE 0x00a0: 0x00ff, # NO-BREAK SPACE 0x00a2: 0x0096, # CENT SIGN 0x00a3: 0x009c, # POUND SIGN 0x00a4: 0x009f, # CURRENCY SIGN 0x00a6: 0x00a7, # BROKEN BAR 0x00a7: 0x00f5, # SECTION SIGN 0x00a9: 0x00a8, # COPYRIGHT SIGN 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00ac: 0x00aa, # NOT SIGN 0x00ad: 0x00f0, # SOFT HYPHEN 0x00ae: 0x00a9, # REGISTERED SIGN 0x00b0: 0x00f8, # DEGREE SIGN 0x00b1: 0x00f1, # PLUS-MINUS SIGN 0x00b2: 0x00fd, # SUPERSCRIPT TWO 0x00b3: 0x00fc, # SUPERSCRIPT THREE 0x00b5: 0x00e6, # MICRO SIGN 0x00b6: 0x00f4, # PILCROW SIGN 0x00b7: 0x00fa, # MIDDLE DOT 0x00b9: 0x00fb, # SUPERSCRIPT ONE 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF 0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE 0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE 0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS 0x00d7: 0x009e, # MULTIPLICATION SIGN 0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN) 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE 0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS 0x00f7: 0x00f6, # DIVISION SIGN 0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS 0x0100: 0x00a0, # LATIN CAPITAL LETTER A WITH MACRON 0x0101: 0x0083, # LATIN SMALL LETTER A WITH MACRON 0x0104: 0x00b5, # LATIN CAPITAL LETTER A WITH OGONEK 0x0105: 0x00d0, # LATIN SMALL LETTER A WITH OGONEK 0x0106: 0x0080, # LATIN CAPITAL LETTER C WITH ACUTE 0x0107: 0x0087, # LATIN SMALL LETTER C WITH ACUTE 0x010c: 0x00b6, # LATIN CAPITAL LETTER C WITH CARON 0x010d: 0x00d1, # LATIN SMALL LETTER C WITH CARON 0x0112: 0x00ed, # LATIN CAPITAL LETTER E WITH MACRON 0x0113: 0x0089, # LATIN SMALL LETTER E WITH MACRON 0x0116: 0x00b8, # LATIN CAPITAL LETTER E WITH DOT ABOVE 0x0117: 0x00d3, # LATIN SMALL LETTER E WITH DOT ABOVE 0x0118: 0x00b7, # LATIN CAPITAL LETTER E WITH OGONEK 0x0119: 0x00d2, # LATIN SMALL LETTER E WITH OGONEK 0x0122: 0x0095, # LATIN CAPITAL LETTER G WITH CEDILLA 0x0123: 0x0085, # LATIN SMALL LETTER G WITH CEDILLA 0x012a: 0x00a1, # LATIN CAPITAL LETTER I WITH MACRON 0x012b: 0x008c, # LATIN SMALL LETTER I WITH MACRON 0x012e: 0x00bd, # LATIN CAPITAL LETTER I WITH OGONEK 0x012f: 0x00d4, # LATIN SMALL LETTER I WITH OGONEK 0x0136: 0x00e8, # LATIN CAPITAL LETTER K WITH CEDILLA 0x0137: 0x00e9, # LATIN SMALL LETTER K WITH CEDILLA 0x013b: 0x00ea, # LATIN CAPITAL LETTER L WITH CEDILLA 0x013c: 0x00eb, # LATIN SMALL LETTER L WITH CEDILLA 0x0141: 0x00ad, # LATIN CAPITAL LETTER L WITH STROKE 0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE 0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE 0x0144: 0x00e7, # LATIN SMALL LETTER N WITH ACUTE 0x0145: 0x00ee, # LATIN CAPITAL LETTER N WITH CEDILLA 0x0146: 0x00ec, # LATIN SMALL LETTER N WITH CEDILLA 0x014c: 0x00e2, # LATIN CAPITAL LETTER O WITH MACRON 0x014d: 0x0093, # LATIN SMALL LETTER O WITH MACRON 0x0156: 0x008a, # LATIN CAPITAL LETTER R WITH CEDILLA 0x0157: 0x008b, # LATIN SMALL LETTER R WITH CEDILLA 0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE 0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE 0x0160: 0x00be, # LATIN CAPITAL LETTER S WITH CARON 0x0161: 0x00d5, # LATIN SMALL LETTER S WITH CARON 0x016a: 0x00c7, # LATIN CAPITAL LETTER U WITH MACRON 0x016b: 0x00d7, # LATIN SMALL LETTER U WITH MACRON 0x0172: 0x00c6, # LATIN CAPITAL LETTER U WITH OGONEK 0x0173: 0x00d6, # LATIN SMALL LETTER U WITH OGONEK 0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE 0x017a: 0x00a5, # LATIN SMALL LETTER Z WITH ACUTE 0x017b: 0x00a3, # LATIN CAPITAL LETTER Z WITH DOT ABOVE 0x017c: 0x00a4, # LATIN SMALL LETTER Z WITH DOT ABOVE 0x017d: 0x00cf, # LATIN CAPITAL LETTER Z WITH CARON 0x017e: 0x00d8, # LATIN SMALL LETTER Z WITH CARON 0x2019: 0x00ef, # RIGHT SINGLE QUOTATION MARK 0x201c: 0x00f2, # LEFT DOUBLE QUOTATION MARK 0x201d: 0x00a6, # RIGHT DOUBLE QUOTATION MARK 0x201e: 0x00f7, # DOUBLE LOW-9 QUOTATION MARK 0x2219: 0x00f9, # BULLET OPERATOR 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x2580: 0x00df, # UPPER HALF BLOCK 0x2584: 0x00dc, # LOWER HALF BLOCK 0x2588: 0x00db, # FULL BLOCK 0x258c: 0x00dd, # LEFT HALF BLOCK 0x2590: 0x00de, # RIGHT HALF BLOCK 0x2591: 0x00b0, # LIGHT SHADE 0x2592: 0x00b1, # MEDIUM SHADE 0x2593: 0x00b2, # DARK SHADE 0x25a0: 0x00fe, # BLACK SQUARE }
bsd-3-clause
ddbed86ea7ff0bf9cba2b80864682b6e
48.463415
103
0.59051
3.020438
false
false
false
false
brython-dev/brython
www/src/Lib/test/test_importlib/source/test_file_loader.py
1
32559
from test.test_importlib import abc, util importlib = util.import_importlib('importlib') importlib_abc = util.import_importlib('importlib.abc') machinery = util.import_importlib('importlib.machinery') importlib_util = util.import_importlib('importlib.util') import errno import marshal import os import py_compile import shutil import stat import sys import types import unittest import warnings from test.support.import_helper import make_legacy_pyc, unload from test.test_py_compile import without_source_date_epoch from test.test_py_compile import SourceDateEpochTestMeta class SimpleTest(abc.LoaderTests): """Should have no issue importing a source module [basic]. And if there is a syntax error, it should raise a SyntaxError [syntax error]. """ def setUp(self): self.name = 'spam' self.filepath = os.path.join('ham', self.name + '.py') self.loader = self.machinery.SourceFileLoader(self.name, self.filepath) def test_load_module_API(self): class Tester(self.abc.FileLoader): def get_source(self, _): return 'attr = 42' def is_package(self, _): return False loader = Tester('blah', 'blah.py') self.addCleanup(unload, 'blah') with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) module = loader.load_module() # Should not raise an exception. def test_get_filename_API(self): # If fullname is not set then assume self.path is desired. class Tester(self.abc.FileLoader): def get_code(self, _): pass def get_source(self, _): pass def is_package(self, _): pass def module_repr(self, _): pass path = 'some_path' name = 'some_name' loader = Tester(name, path) self.assertEqual(path, loader.get_filename(name)) self.assertEqual(path, loader.get_filename()) self.assertEqual(path, loader.get_filename(None)) with self.assertRaises(ImportError): loader.get_filename(name + 'XXX') def test_equality(self): other = self.machinery.SourceFileLoader(self.name, self.filepath) self.assertEqual(self.loader, other) def test_inequality(self): other = self.machinery.SourceFileLoader('_' + self.name, self.filepath) self.assertNotEqual(self.loader, other) # [basic] def test_module(self): with util.create_modules('_temp') as mapping: loader = self.machinery.SourceFileLoader('_temp', mapping['_temp']) with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) module = loader.load_module('_temp') self.assertIn('_temp', sys.modules) check = {'__name__': '_temp', '__file__': mapping['_temp'], '__package__': ''} for attr, value in check.items(): self.assertEqual(getattr(module, attr), value) def test_package(self): with util.create_modules('_pkg.__init__') as mapping: loader = self.machinery.SourceFileLoader('_pkg', mapping['_pkg.__init__']) with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) module = loader.load_module('_pkg') self.assertIn('_pkg', sys.modules) check = {'__name__': '_pkg', '__file__': mapping['_pkg.__init__'], '__path__': [os.path.dirname(mapping['_pkg.__init__'])], '__package__': '_pkg'} for attr, value in check.items(): self.assertEqual(getattr(module, attr), value) def test_lacking_parent(self): with util.create_modules('_pkg.__init__', '_pkg.mod')as mapping: loader = self.machinery.SourceFileLoader('_pkg.mod', mapping['_pkg.mod']) with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) module = loader.load_module('_pkg.mod') self.assertIn('_pkg.mod', sys.modules) check = {'__name__': '_pkg.mod', '__file__': mapping['_pkg.mod'], '__package__': '_pkg'} for attr, value in check.items(): self.assertEqual(getattr(module, attr), value) def fake_mtime(self, fxn): """Fake mtime to always be higher than expected.""" return lambda name: fxn(name) + 1 def test_module_reuse(self): with util.create_modules('_temp') as mapping: loader = self.machinery.SourceFileLoader('_temp', mapping['_temp']) with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) module = loader.load_module('_temp') module_id = id(module) module_dict_id = id(module.__dict__) with open(mapping['_temp'], 'w', encoding='utf-8') as file: file.write("testing_var = 42\n") with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) module = loader.load_module('_temp') self.assertIn('testing_var', module.__dict__, "'testing_var' not in " "{0}".format(list(module.__dict__.keys()))) self.assertEqual(module, sys.modules['_temp']) self.assertEqual(id(module), module_id) self.assertEqual(id(module.__dict__), module_dict_id) def test_state_after_failure(self): # A failed reload should leave the original module intact. attributes = ('__file__', '__path__', '__package__') value = '<test>' name = '_temp' with util.create_modules(name) as mapping: orig_module = types.ModuleType(name) for attr in attributes: setattr(orig_module, attr, value) with open(mapping[name], 'w', encoding='utf-8') as file: file.write('+++ bad syntax +++') loader = self.machinery.SourceFileLoader('_temp', mapping['_temp']) with self.assertRaises(SyntaxError): loader.exec_module(orig_module) for attr in attributes: self.assertEqual(getattr(orig_module, attr), value) with self.assertRaises(SyntaxError): with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) loader.load_module(name) for attr in attributes: self.assertEqual(getattr(orig_module, attr), value) # [syntax error] def test_bad_syntax(self): with util.create_modules('_temp') as mapping: with open(mapping['_temp'], 'w', encoding='utf-8') as file: file.write('=') loader = self.machinery.SourceFileLoader('_temp', mapping['_temp']) with self.assertRaises(SyntaxError): with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) loader.load_module('_temp') self.assertNotIn('_temp', sys.modules) def test_file_from_empty_string_dir(self): # Loading a module found from an empty string entry on sys.path should # not only work, but keep all attributes relative. file_path = '_temp.py' with open(file_path, 'w', encoding='utf-8') as file: file.write("# test file for importlib") try: with util.uncache('_temp'): loader = self.machinery.SourceFileLoader('_temp', file_path) with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) mod = loader.load_module('_temp') self.assertEqual(file_path, mod.__file__) self.assertEqual(self.util.cache_from_source(file_path), mod.__cached__) finally: os.unlink(file_path) pycache = os.path.dirname(self.util.cache_from_source(file_path)) if os.path.exists(pycache): shutil.rmtree(pycache) @util.writes_bytecode_files def test_timestamp_overflow(self): # When a modification timestamp is larger than 2**32, it should be # truncated rather than raise an OverflowError. with util.create_modules('_temp') as mapping: source = mapping['_temp'] compiled = self.util.cache_from_source(source) with open(source, 'w', encoding='utf-8') as f: f.write("x = 5") try: os.utime(source, (2 ** 33 - 5, 2 ** 33 - 5)) except OverflowError: self.skipTest("cannot set modification time to large integer") except OSError as e: if e.errno != getattr(errno, 'EOVERFLOW', None): raise self.skipTest("cannot set modification time to large integer ({})".format(e)) loader = self.machinery.SourceFileLoader('_temp', mapping['_temp']) # PEP 451 module = types.ModuleType('_temp') module.__spec__ = self.util.spec_from_loader('_temp', loader) loader.exec_module(module) self.assertEqual(module.x, 5) self.assertTrue(os.path.exists(compiled)) os.unlink(compiled) # PEP 302 with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) mod = loader.load_module('_temp') # Sanity checks. self.assertEqual(mod.__cached__, compiled) self.assertEqual(mod.x, 5) # The pyc file was created. self.assertTrue(os.path.exists(compiled)) def test_unloadable(self): loader = self.machinery.SourceFileLoader('good name', {}) module = types.ModuleType('bad name') module.__spec__ = self.machinery.ModuleSpec('bad name', loader) with self.assertRaises(ImportError): loader.exec_module(module) with self.assertRaises(ImportError): with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) loader.load_module('bad name') @util.writes_bytecode_files def test_checked_hash_based_pyc(self): with util.create_modules('_temp') as mapping: source = mapping['_temp'] pyc = self.util.cache_from_source(source) with open(source, 'wb') as fp: fp.write(b'state = "old"') os.utime(source, (50, 50)) py_compile.compile( source, invalidation_mode=py_compile.PycInvalidationMode.CHECKED_HASH, ) loader = self.machinery.SourceFileLoader('_temp', source) mod = types.ModuleType('_temp') mod.__spec__ = self.util.spec_from_loader('_temp', loader) loader.exec_module(mod) self.assertEqual(mod.state, 'old') # Write a new source with the same mtime and size as before. with open(source, 'wb') as fp: fp.write(b'state = "new"') os.utime(source, (50, 50)) loader.exec_module(mod) self.assertEqual(mod.state, 'new') with open(pyc, 'rb') as fp: data = fp.read() self.assertEqual(int.from_bytes(data[4:8], 'little'), 0b11) self.assertEqual( self.util.source_hash(b'state = "new"'), data[8:16], ) @util.writes_bytecode_files def test_overridden_checked_hash_based_pyc(self): with util.create_modules('_temp') as mapping, \ unittest.mock.patch('_imp.check_hash_based_pycs', 'never'): source = mapping['_temp'] pyc = self.util.cache_from_source(source) with open(source, 'wb') as fp: fp.write(b'state = "old"') os.utime(source, (50, 50)) py_compile.compile( source, invalidation_mode=py_compile.PycInvalidationMode.CHECKED_HASH, ) loader = self.machinery.SourceFileLoader('_temp', source) mod = types.ModuleType('_temp') mod.__spec__ = self.util.spec_from_loader('_temp', loader) loader.exec_module(mod) self.assertEqual(mod.state, 'old') # Write a new source with the same mtime and size as before. with open(source, 'wb') as fp: fp.write(b'state = "new"') os.utime(source, (50, 50)) loader.exec_module(mod) self.assertEqual(mod.state, 'old') @util.writes_bytecode_files def test_unchecked_hash_based_pyc(self): with util.create_modules('_temp') as mapping: source = mapping['_temp'] pyc = self.util.cache_from_source(source) with open(source, 'wb') as fp: fp.write(b'state = "old"') os.utime(source, (50, 50)) py_compile.compile( source, invalidation_mode=py_compile.PycInvalidationMode.UNCHECKED_HASH, ) loader = self.machinery.SourceFileLoader('_temp', source) mod = types.ModuleType('_temp') mod.__spec__ = self.util.spec_from_loader('_temp', loader) loader.exec_module(mod) self.assertEqual(mod.state, 'old') # Update the source file, which should be ignored. with open(source, 'wb') as fp: fp.write(b'state = "new"') loader.exec_module(mod) self.assertEqual(mod.state, 'old') with open(pyc, 'rb') as fp: data = fp.read() self.assertEqual(int.from_bytes(data[4:8], 'little'), 0b1) self.assertEqual( self.util.source_hash(b'state = "old"'), data[8:16], ) @util.writes_bytecode_files def test_overridden_unchecked_hash_based_pyc(self): with util.create_modules('_temp') as mapping, \ unittest.mock.patch('_imp.check_hash_based_pycs', 'always'): source = mapping['_temp'] pyc = self.util.cache_from_source(source) with open(source, 'wb') as fp: fp.write(b'state = "old"') os.utime(source, (50, 50)) py_compile.compile( source, invalidation_mode=py_compile.PycInvalidationMode.UNCHECKED_HASH, ) loader = self.machinery.SourceFileLoader('_temp', source) mod = types.ModuleType('_temp') mod.__spec__ = self.util.spec_from_loader('_temp', loader) loader.exec_module(mod) self.assertEqual(mod.state, 'old') # Update the source file, which should be ignored. with open(source, 'wb') as fp: fp.write(b'state = "new"') loader.exec_module(mod) self.assertEqual(mod.state, 'new') with open(pyc, 'rb') as fp: data = fp.read() self.assertEqual(int.from_bytes(data[4:8], 'little'), 0b1) self.assertEqual( self.util.source_hash(b'state = "new"'), data[8:16], ) (Frozen_SimpleTest, Source_SimpleTest ) = util.test_both(SimpleTest, importlib=importlib, machinery=machinery, abc=importlib_abc, util=importlib_util) class SourceDateEpochTestMeta(SourceDateEpochTestMeta, type(Source_SimpleTest)): pass class SourceDateEpoch_SimpleTest(Source_SimpleTest, metaclass=SourceDateEpochTestMeta, source_date_epoch=True): pass class BadBytecodeTest: def import_(self, file, module_name): raise NotImplementedError def manipulate_bytecode(self, name, mapping, manipulator, *, del_source=False, invalidation_mode=py_compile.PycInvalidationMode.TIMESTAMP): """Manipulate the bytecode of a module by passing it into a callable that returns what to use as the new bytecode.""" try: del sys.modules['_temp'] except KeyError: pass py_compile.compile(mapping[name], invalidation_mode=invalidation_mode) if not del_source: bytecode_path = self.util.cache_from_source(mapping[name]) else: os.unlink(mapping[name]) bytecode_path = make_legacy_pyc(mapping[name]) if manipulator: with open(bytecode_path, 'rb') as file: bc = file.read() new_bc = manipulator(bc) with open(bytecode_path, 'wb') as file: if new_bc is not None: file.write(new_bc) return bytecode_path def _test_empty_file(self, test, *, del_source=False): with util.create_modules('_temp') as mapping: bc_path = self.manipulate_bytecode('_temp', mapping, lambda bc: b'', del_source=del_source) test('_temp', mapping, bc_path) @util.writes_bytecode_files def _test_partial_magic(self, test, *, del_source=False): # When their are less than 4 bytes to a .pyc, regenerate it if # possible, else raise ImportError. with util.create_modules('_temp') as mapping: bc_path = self.manipulate_bytecode('_temp', mapping, lambda bc: bc[:3], del_source=del_source) test('_temp', mapping, bc_path) def _test_magic_only(self, test, *, del_source=False): with util.create_modules('_temp') as mapping: bc_path = self.manipulate_bytecode('_temp', mapping, lambda bc: bc[:4], del_source=del_source) test('_temp', mapping, bc_path) def _test_partial_flags(self, test, *, del_source=False): with util.create_modules('_temp') as mapping: bc_path = self.manipulate_bytecode('_temp', mapping, lambda bc: bc[:7], del_source=del_source) test('_temp', mapping, bc_path) def _test_partial_hash(self, test, *, del_source=False): with util.create_modules('_temp') as mapping: bc_path = self.manipulate_bytecode( '_temp', mapping, lambda bc: bc[:13], del_source=del_source, invalidation_mode=py_compile.PycInvalidationMode.CHECKED_HASH, ) test('_temp', mapping, bc_path) with util.create_modules('_temp') as mapping: bc_path = self.manipulate_bytecode( '_temp', mapping, lambda bc: bc[:13], del_source=del_source, invalidation_mode=py_compile.PycInvalidationMode.UNCHECKED_HASH, ) test('_temp', mapping, bc_path) def _test_partial_timestamp(self, test, *, del_source=False): with util.create_modules('_temp') as mapping: bc_path = self.manipulate_bytecode('_temp', mapping, lambda bc: bc[:11], del_source=del_source) test('_temp', mapping, bc_path) def _test_partial_size(self, test, *, del_source=False): with util.create_modules('_temp') as mapping: bc_path = self.manipulate_bytecode('_temp', mapping, lambda bc: bc[:15], del_source=del_source) test('_temp', mapping, bc_path) def _test_no_marshal(self, *, del_source=False): with util.create_modules('_temp') as mapping: bc_path = self.manipulate_bytecode('_temp', mapping, lambda bc: bc[:16], del_source=del_source) file_path = mapping['_temp'] if not del_source else bc_path with self.assertRaises(EOFError): self.import_(file_path, '_temp') def _test_non_code_marshal(self, *, del_source=False): with util.create_modules('_temp') as mapping: bytecode_path = self.manipulate_bytecode('_temp', mapping, lambda bc: bc[:16] + marshal.dumps(b'abcd'), del_source=del_source) file_path = mapping['_temp'] if not del_source else bytecode_path with self.assertRaises(ImportError) as cm: self.import_(file_path, '_temp') self.assertEqual(cm.exception.name, '_temp') self.assertEqual(cm.exception.path, bytecode_path) def _test_bad_marshal(self, *, del_source=False): with util.create_modules('_temp') as mapping: bytecode_path = self.manipulate_bytecode('_temp', mapping, lambda bc: bc[:16] + b'<test>', del_source=del_source) file_path = mapping['_temp'] if not del_source else bytecode_path with self.assertRaises(EOFError): self.import_(file_path, '_temp') def _test_bad_magic(self, test, *, del_source=False): with util.create_modules('_temp') as mapping: bc_path = self.manipulate_bytecode('_temp', mapping, lambda bc: b'\x00\x00\x00\x00' + bc[4:]) test('_temp', mapping, bc_path) class BadBytecodeTestPEP451(BadBytecodeTest): def import_(self, file, module_name): loader = self.loader(module_name, file) module = types.ModuleType(module_name) module.__spec__ = self.util.spec_from_loader(module_name, loader) loader.exec_module(module) class BadBytecodeTestPEP302(BadBytecodeTest): def import_(self, file, module_name): loader = self.loader(module_name, file) with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) module = loader.load_module(module_name) self.assertIn(module_name, sys.modules) class SourceLoaderBadBytecodeTest: @classmethod def setUpClass(cls): cls.loader = cls.machinery.SourceFileLoader @util.writes_bytecode_files def test_empty_file(self): # When a .pyc is empty, regenerate it if possible, else raise # ImportError. def test(name, mapping, bytecode_path): self.import_(mapping[name], name) with open(bytecode_path, 'rb') as file: self.assertGreater(len(file.read()), 16) self._test_empty_file(test) def test_partial_magic(self): def test(name, mapping, bytecode_path): self.import_(mapping[name], name) with open(bytecode_path, 'rb') as file: self.assertGreater(len(file.read()), 16) self._test_partial_magic(test) @util.writes_bytecode_files def test_magic_only(self): # When there is only the magic number, regenerate the .pyc if possible, # else raise EOFError. def test(name, mapping, bytecode_path): self.import_(mapping[name], name) with open(bytecode_path, 'rb') as file: self.assertGreater(len(file.read()), 16) self._test_magic_only(test) @util.writes_bytecode_files def test_bad_magic(self): # When the magic number is different, the bytecode should be # regenerated. def test(name, mapping, bytecode_path): self.import_(mapping[name], name) with open(bytecode_path, 'rb') as bytecode_file: self.assertEqual(bytecode_file.read(4), self.util.MAGIC_NUMBER) self._test_bad_magic(test) @util.writes_bytecode_files def test_partial_timestamp(self): # When the timestamp is partial, regenerate the .pyc, else # raise EOFError. def test(name, mapping, bc_path): self.import_(mapping[name], name) with open(bc_path, 'rb') as file: self.assertGreater(len(file.read()), 16) self._test_partial_timestamp(test) @util.writes_bytecode_files def test_partial_flags(self): # When the flags is partial, regenerate the .pyc, else raise EOFError. def test(name, mapping, bc_path): self.import_(mapping[name], name) with open(bc_path, 'rb') as file: self.assertGreater(len(file.read()), 16) self._test_partial_flags(test) @util.writes_bytecode_files def test_partial_hash(self): # When the hash is partial, regenerate the .pyc, else raise EOFError. def test(name, mapping, bc_path): self.import_(mapping[name], name) with open(bc_path, 'rb') as file: self.assertGreater(len(file.read()), 16) self._test_partial_hash(test) @util.writes_bytecode_files def test_partial_size(self): # When the size is partial, regenerate the .pyc, else # raise EOFError. def test(name, mapping, bc_path): self.import_(mapping[name], name) with open(bc_path, 'rb') as file: self.assertGreater(len(file.read()), 16) self._test_partial_size(test) @util.writes_bytecode_files def test_no_marshal(self): # When there is only the magic number and timestamp, raise EOFError. self._test_no_marshal() @util.writes_bytecode_files def test_non_code_marshal(self): self._test_non_code_marshal() # XXX ImportError when sourceless # [bad marshal] @util.writes_bytecode_files def test_bad_marshal(self): # Bad marshal data should raise a ValueError. self._test_bad_marshal() # [bad timestamp] @util.writes_bytecode_files @without_source_date_epoch def test_old_timestamp(self): # When the timestamp is older than the source, bytecode should be # regenerated. zeros = b'\x00\x00\x00\x00' with util.create_modules('_temp') as mapping: py_compile.compile(mapping['_temp']) bytecode_path = self.util.cache_from_source(mapping['_temp']) with open(bytecode_path, 'r+b') as bytecode_file: bytecode_file.seek(8) bytecode_file.write(zeros) self.import_(mapping['_temp'], '_temp') source_mtime = os.path.getmtime(mapping['_temp']) source_timestamp = self.importlib._pack_uint32(source_mtime) with open(bytecode_path, 'rb') as bytecode_file: bytecode_file.seek(8) self.assertEqual(bytecode_file.read(4), source_timestamp) # [bytecode read-only] @util.writes_bytecode_files def test_read_only_bytecode(self): # When bytecode is read-only but should be rewritten, fail silently. with util.create_modules('_temp') as mapping: # Create bytecode that will need to be re-created. py_compile.compile(mapping['_temp']) bytecode_path = self.util.cache_from_source(mapping['_temp']) with open(bytecode_path, 'r+b') as bytecode_file: bytecode_file.seek(0) bytecode_file.write(b'\x00\x00\x00\x00') # Make the bytecode read-only. os.chmod(bytecode_path, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH) try: # Should not raise OSError! self.import_(mapping['_temp'], '_temp') finally: # Make writable for eventual clean-up. os.chmod(bytecode_path, stat.S_IWUSR) class SourceLoaderBadBytecodeTestPEP451( SourceLoaderBadBytecodeTest, BadBytecodeTestPEP451): pass (Frozen_SourceBadBytecodePEP451, Source_SourceBadBytecodePEP451 ) = util.test_both(SourceLoaderBadBytecodeTestPEP451, importlib=importlib, machinery=machinery, abc=importlib_abc, util=importlib_util) class SourceLoaderBadBytecodeTestPEP302( SourceLoaderBadBytecodeTest, BadBytecodeTestPEP302): pass (Frozen_SourceBadBytecodePEP302, Source_SourceBadBytecodePEP302 ) = util.test_both(SourceLoaderBadBytecodeTestPEP302, importlib=importlib, machinery=machinery, abc=importlib_abc, util=importlib_util) class SourcelessLoaderBadBytecodeTest: @classmethod def setUpClass(cls): cls.loader = cls.machinery.SourcelessFileLoader def test_empty_file(self): def test(name, mapping, bytecode_path): with self.assertRaises(ImportError) as cm: self.import_(bytecode_path, name) self.assertEqual(cm.exception.name, name) self.assertEqual(cm.exception.path, bytecode_path) self._test_empty_file(test, del_source=True) def test_partial_magic(self): def test(name, mapping, bytecode_path): with self.assertRaises(ImportError) as cm: self.import_(bytecode_path, name) self.assertEqual(cm.exception.name, name) self.assertEqual(cm.exception.path, bytecode_path) self._test_partial_magic(test, del_source=True) def test_magic_only(self): def test(name, mapping, bytecode_path): with self.assertRaises(EOFError): self.import_(bytecode_path, name) self._test_magic_only(test, del_source=True) def test_bad_magic(self): def test(name, mapping, bytecode_path): with self.assertRaises(ImportError) as cm: self.import_(bytecode_path, name) self.assertEqual(cm.exception.name, name) self.assertEqual(cm.exception.path, bytecode_path) self._test_bad_magic(test, del_source=True) def test_partial_timestamp(self): def test(name, mapping, bytecode_path): with self.assertRaises(EOFError): self.import_(bytecode_path, name) self._test_partial_timestamp(test, del_source=True) def test_partial_flags(self): def test(name, mapping, bytecode_path): with self.assertRaises(EOFError): self.import_(bytecode_path, name) self._test_partial_flags(test, del_source=True) def test_partial_hash(self): def test(name, mapping, bytecode_path): with self.assertRaises(EOFError): self.import_(bytecode_path, name) self._test_partial_hash(test, del_source=True) def test_partial_size(self): def test(name, mapping, bytecode_path): with self.assertRaises(EOFError): self.import_(bytecode_path, name) self._test_partial_size(test, del_source=True) def test_no_marshal(self): self._test_no_marshal(del_source=True) def test_non_code_marshal(self): self._test_non_code_marshal(del_source=True) class SourcelessLoaderBadBytecodeTestPEP451(SourcelessLoaderBadBytecodeTest, BadBytecodeTestPEP451): pass (Frozen_SourcelessBadBytecodePEP451, Source_SourcelessBadBytecodePEP451 ) = util.test_both(SourcelessLoaderBadBytecodeTestPEP451, importlib=importlib, machinery=machinery, abc=importlib_abc, util=importlib_util) class SourcelessLoaderBadBytecodeTestPEP302(SourcelessLoaderBadBytecodeTest, BadBytecodeTestPEP302): pass (Frozen_SourcelessBadBytecodePEP302, Source_SourcelessBadBytecodePEP302 ) = util.test_both(SourcelessLoaderBadBytecodeTestPEP302, importlib=importlib, machinery=machinery, abc=importlib_abc, util=importlib_util) if __name__ == '__main__': unittest.main()
bsd-3-clause
e449c7362a58545a5361784f959b068e
39.903266
93
0.56866
4.192506
false
true
false
false
brython-dev/brython
www/src/Lib/pprint.py
1
24489
# Author: Fred L. Drake, Jr. # fdrake@acm.org # # This is a simple little module I wrote to make life easier. I didn't # see anything quite like it in the library, though I may have overlooked # something. I wrote this when I was trying to read some heavily nested # tuples with fairly non-descriptive content. This is modeled very much # after Lisp/Scheme - style pretty-printing of lists. If you find it # useful, thank small children who sleep at night. """Support to pretty-print lists, tuples, & dictionaries recursively. Very simple, but useful, especially in debugging data structures. Classes ------- PrettyPrinter() Handle pretty-printing operations onto a stream using a configured set of formatting parameters. Functions --------- pformat() Format a Python object into a pretty-printed representation. pprint() Pretty-print a Python object to a stream [default is sys.stdout]. saferepr() Generate a 'standard' repr()-like value, but protect against recursive data structures. """ import collections as _collections import dataclasses as _dataclasses import re import sys as _sys import types as _types from io import StringIO as _StringIO __all__ = ["pprint","pformat","isreadable","isrecursive","saferepr", "PrettyPrinter", "pp"] def pprint(object, stream=None, indent=1, width=80, depth=None, *, compact=False, sort_dicts=True, underscore_numbers=False): """Pretty-print a Python object to a stream [default is sys.stdout].""" printer = PrettyPrinter( stream=stream, indent=indent, width=width, depth=depth, compact=compact, sort_dicts=sort_dicts, underscore_numbers=underscore_numbers) printer.pprint(object) def pformat(object, indent=1, width=80, depth=None, *, compact=False, sort_dicts=True, underscore_numbers=False): """Format a Python object into a pretty-printed representation.""" return PrettyPrinter(indent=indent, width=width, depth=depth, compact=compact, sort_dicts=sort_dicts, underscore_numbers=underscore_numbers).pformat(object) def pp(object, *args, sort_dicts=False, **kwargs): """Pretty-print a Python object""" pprint(object, *args, sort_dicts=sort_dicts, **kwargs) def saferepr(object): """Version of repr() which can handle recursive data structures.""" return PrettyPrinter()._safe_repr(object, {}, None, 0)[0] def isreadable(object): """Determine if saferepr(object) is readable by eval().""" return PrettyPrinter()._safe_repr(object, {}, None, 0)[1] def isrecursive(object): """Determine if object requires a recursive representation.""" return PrettyPrinter()._safe_repr(object, {}, None, 0)[2] class _safe_key: """Helper function for key functions when sorting unorderable objects. The wrapped-object will fallback to a Py2.x style comparison for unorderable types (sorting first comparing the type name and then by the obj ids). Does not work recursively, so dict.items() must have _safe_key applied to both the key and the value. """ __slots__ = ['obj'] def __init__(self, obj): self.obj = obj def __lt__(self, other): try: return self.obj < other.obj except TypeError: return ((str(type(self.obj)), id(self.obj)) < \ (str(type(other.obj)), id(other.obj))) def _safe_tuple(t): "Helper function for comparing 2-tuples" return _safe_key(t[0]), _safe_key(t[1]) class PrettyPrinter: def __init__(self, indent=1, width=80, depth=None, stream=None, *, compact=False, sort_dicts=True, underscore_numbers=False): """Handle pretty printing operations onto a stream using a set of configured parameters. indent Number of spaces to indent for each level of nesting. width Attempted maximum number of columns in the output. depth The maximum depth to print out nested structures. stream The desired output stream. If omitted (or false), the standard output stream available at construction will be used. compact If true, several items will be combined in one line. sort_dicts If true, dict keys are sorted. """ indent = int(indent) width = int(width) if indent < 0: raise ValueError('indent must be >= 0') if depth is not None and depth <= 0: raise ValueError('depth must be > 0') if not width: raise ValueError('width must be != 0') self._depth = depth self._indent_per_level = indent self._width = width if stream is not None: self._stream = stream else: self._stream = _sys.stdout self._compact = bool(compact) self._sort_dicts = sort_dicts self._underscore_numbers = underscore_numbers def pprint(self, object): if self._stream is not None: self._format(object, self._stream, 0, 0, {}, 0) self._stream.write("\n") def pformat(self, object): sio = _StringIO() self._format(object, sio, 0, 0, {}, 0) return sio.getvalue() def isrecursive(self, object): return self.format(object, {}, 0, 0)[2] def isreadable(self, object): s, readable, recursive = self.format(object, {}, 0, 0) return readable and not recursive def _format(self, object, stream, indent, allowance, context, level): objid = id(object) if objid in context: stream.write(_recursion(object)) self._recursive = True self._readable = False return rep = self._repr(object, context, level) max_width = self._width - indent - allowance if len(rep) > max_width: p = self._dispatch.get(type(object).__repr__, None) if p is not None: context[objid] = 1 p(self, object, stream, indent, allowance, context, level + 1) del context[objid] return elif (_dataclasses.is_dataclass(object) and not isinstance(object, type) and object.__dataclass_params__.repr and # Check dataclass has generated repr method. hasattr(object.__repr__, "__wrapped__") and "__create_fn__" in object.__repr__.__wrapped__.__qualname__): context[objid] = 1 self._pprint_dataclass(object, stream, indent, allowance, context, level + 1) del context[objid] return stream.write(rep) def _pprint_dataclass(self, object, stream, indent, allowance, context, level): cls_name = object.__class__.__name__ indent += len(cls_name) + 1 items = [(f.name, getattr(object, f.name)) for f in _dataclasses.fields(object) if f.repr] stream.write(cls_name + '(') self._format_namespace_items(items, stream, indent, allowance, context, level) stream.write(')') _dispatch = {} def _pprint_dict(self, object, stream, indent, allowance, context, level): write = stream.write write('{') if self._indent_per_level > 1: write((self._indent_per_level - 1) * ' ') length = len(object) if length: if self._sort_dicts: items = sorted(object.items(), key=_safe_tuple) else: items = object.items() self._format_dict_items(items, stream, indent, allowance + 1, context, level) write('}') _dispatch[dict.__repr__] = _pprint_dict def _pprint_ordered_dict(self, object, stream, indent, allowance, context, level): if not len(object): stream.write(repr(object)) return cls = object.__class__ stream.write(cls.__name__ + '(') self._format(list(object.items()), stream, indent + len(cls.__name__) + 1, allowance + 1, context, level) stream.write(')') _dispatch[_collections.OrderedDict.__repr__] = _pprint_ordered_dict def _pprint_list(self, object, stream, indent, allowance, context, level): stream.write('[') self._format_items(object, stream, indent, allowance + 1, context, level) stream.write(']') _dispatch[list.__repr__] = _pprint_list def _pprint_tuple(self, object, stream, indent, allowance, context, level): stream.write('(') endchar = ',)' if len(object) == 1 else ')' self._format_items(object, stream, indent, allowance + len(endchar), context, level) stream.write(endchar) _dispatch[tuple.__repr__] = _pprint_tuple def _pprint_set(self, object, stream, indent, allowance, context, level): if not len(object): stream.write(repr(object)) return typ = object.__class__ if typ is set: stream.write('{') endchar = '}' else: stream.write(typ.__name__ + '({') endchar = '})' indent += len(typ.__name__) + 1 object = sorted(object, key=_safe_key) self._format_items(object, stream, indent, allowance + len(endchar), context, level) stream.write(endchar) _dispatch[set.__repr__] = _pprint_set _dispatch[frozenset.__repr__] = _pprint_set def _pprint_str(self, object, stream, indent, allowance, context, level): write = stream.write if not len(object): write(repr(object)) return chunks = [] lines = object.splitlines(True) if level == 1: indent += 1 allowance += 1 max_width1 = max_width = self._width - indent for i, line in enumerate(lines): rep = repr(line) if i == len(lines) - 1: max_width1 -= allowance if len(rep) <= max_width1: chunks.append(rep) else: # A list of alternating (non-space, space) strings parts = re.findall(r'\S*\s*', line) assert parts assert not parts[-1] parts.pop() # drop empty last part max_width2 = max_width current = '' for j, part in enumerate(parts): candidate = current + part if j == len(parts) - 1 and i == len(lines) - 1: max_width2 -= allowance if len(repr(candidate)) > max_width2: if current: chunks.append(repr(current)) current = part else: current = candidate if current: chunks.append(repr(current)) if len(chunks) == 1: write(rep) return if level == 1: write('(') for i, rep in enumerate(chunks): if i > 0: write('\n' + ' '*indent) write(rep) if level == 1: write(')') _dispatch[str.__repr__] = _pprint_str def _pprint_bytes(self, object, stream, indent, allowance, context, level): write = stream.write if len(object) <= 4: write(repr(object)) return parens = level == 1 if parens: indent += 1 allowance += 1 write('(') delim = '' for rep in _wrap_bytes_repr(object, self._width - indent, allowance): write(delim) write(rep) if not delim: delim = '\n' + ' '*indent if parens: write(')') _dispatch[bytes.__repr__] = _pprint_bytes def _pprint_bytearray(self, object, stream, indent, allowance, context, level): write = stream.write write('bytearray(') self._pprint_bytes(bytes(object), stream, indent + 10, allowance + 1, context, level + 1) write(')') _dispatch[bytearray.__repr__] = _pprint_bytearray def _pprint_mappingproxy(self, object, stream, indent, allowance, context, level): stream.write('mappingproxy(') self._format(object.copy(), stream, indent + 13, allowance + 1, context, level) stream.write(')') _dispatch[_types.MappingProxyType.__repr__] = _pprint_mappingproxy def _pprint_simplenamespace(self, object, stream, indent, allowance, context, level): if type(object) is _types.SimpleNamespace: # The SimpleNamespace repr is "namespace" instead of the class # name, so we do the same here. For subclasses; use the class name. cls_name = 'namespace' else: cls_name = object.__class__.__name__ indent += len(cls_name) + 1 items = object.__dict__.items() stream.write(cls_name + '(') self._format_namespace_items(items, stream, indent, allowance, context, level) stream.write(')') _dispatch[_types.SimpleNamespace.__repr__] = _pprint_simplenamespace def _format_dict_items(self, items, stream, indent, allowance, context, level): write = stream.write indent += self._indent_per_level delimnl = ',\n' + ' ' * indent last_index = len(items) - 1 for i, (key, ent) in enumerate(items): last = i == last_index rep = self._repr(key, context, level) write(rep) write(': ') self._format(ent, stream, indent + len(rep) + 2, allowance if last else 1, context, level) if not last: write(delimnl) def _format_namespace_items(self, items, stream, indent, allowance, context, level): write = stream.write delimnl = ',\n' + ' ' * indent last_index = len(items) - 1 for i, (key, ent) in enumerate(items): last = i == last_index write(key) write('=') if id(ent) in context: # Special-case representation of recursion to match standard # recursive dataclass repr. write("...") else: self._format(ent, stream, indent + len(key) + 1, allowance if last else 1, context, level) if not last: write(delimnl) def _format_items(self, items, stream, indent, allowance, context, level): write = stream.write indent += self._indent_per_level if self._indent_per_level > 1: write((self._indent_per_level - 1) * ' ') delimnl = ',\n' + ' ' * indent delim = '' width = max_width = self._width - indent + 1 it = iter(items) try: next_ent = next(it) except StopIteration: return last = False while not last: ent = next_ent try: next_ent = next(it) except StopIteration: last = True max_width -= allowance width -= allowance if self._compact: rep = self._repr(ent, context, level) w = len(rep) + 2 if width < w: width = max_width if delim: delim = delimnl if width >= w: width -= w write(delim) delim = ', ' write(rep) continue write(delim) delim = delimnl self._format(ent, stream, indent, allowance if last else 1, context, level) def _repr(self, object, context, level): repr, readable, recursive = self.format(object, context.copy(), self._depth, level) if not readable: self._readable = False if recursive: self._recursive = True return repr def format(self, object, context, maxlevels, level): """Format object for a specific context, returning a string and flags indicating whether the representation is 'readable' and whether the object represents a recursive construct. """ return self._safe_repr(object, context, maxlevels, level) def _pprint_default_dict(self, object, stream, indent, allowance, context, level): if not len(object): stream.write(repr(object)) return rdf = self._repr(object.default_factory, context, level) cls = object.__class__ indent += len(cls.__name__) + 1 stream.write('%s(%s,\n%s' % (cls.__name__, rdf, ' ' * indent)) self._pprint_dict(object, stream, indent, allowance + 1, context, level) stream.write(')') _dispatch[_collections.defaultdict.__repr__] = _pprint_default_dict def _pprint_counter(self, object, stream, indent, allowance, context, level): if not len(object): stream.write(repr(object)) return cls = object.__class__ stream.write(cls.__name__ + '({') if self._indent_per_level > 1: stream.write((self._indent_per_level - 1) * ' ') items = object.most_common() self._format_dict_items(items, stream, indent + len(cls.__name__) + 1, allowance + 2, context, level) stream.write('})') _dispatch[_collections.Counter.__repr__] = _pprint_counter def _pprint_chain_map(self, object, stream, indent, allowance, context, level): if not len(object.maps): stream.write(repr(object)) return cls = object.__class__ stream.write(cls.__name__ + '(') indent += len(cls.__name__) + 1 for i, m in enumerate(object.maps): if i == len(object.maps) - 1: self._format(m, stream, indent, allowance + 1, context, level) stream.write(')') else: self._format(m, stream, indent, 1, context, level) stream.write(',\n' + ' ' * indent) _dispatch[_collections.ChainMap.__repr__] = _pprint_chain_map def _pprint_deque(self, object, stream, indent, allowance, context, level): if not len(object): stream.write(repr(object)) return cls = object.__class__ stream.write(cls.__name__ + '(') indent += len(cls.__name__) + 1 stream.write('[') if object.maxlen is None: self._format_items(object, stream, indent, allowance + 2, context, level) stream.write('])') else: self._format_items(object, stream, indent, 2, context, level) rml = self._repr(object.maxlen, context, level) stream.write('],\n%smaxlen=%s)' % (' ' * indent, rml)) _dispatch[_collections.deque.__repr__] = _pprint_deque def _pprint_user_dict(self, object, stream, indent, allowance, context, level): self._format(object.data, stream, indent, allowance, context, level - 1) _dispatch[_collections.UserDict.__repr__] = _pprint_user_dict def _pprint_user_list(self, object, stream, indent, allowance, context, level): self._format(object.data, stream, indent, allowance, context, level - 1) _dispatch[_collections.UserList.__repr__] = _pprint_user_list def _pprint_user_string(self, object, stream, indent, allowance, context, level): self._format(object.data, stream, indent, allowance, context, level - 1) _dispatch[_collections.UserString.__repr__] = _pprint_user_string def _safe_repr(self, object, context, maxlevels, level): # Return triple (repr_string, isreadable, isrecursive). typ = type(object) if typ in _builtin_scalars: return repr(object), True, False r = getattr(typ, "__repr__", None) if issubclass(typ, int) and r is int.__repr__: if self._underscore_numbers: return f"{object:_d}", True, False else: return repr(object), True, False if issubclass(typ, dict) and r is dict.__repr__: if not object: return "{}", True, False objid = id(object) if maxlevels and level >= maxlevels: return "{...}", False, objid in context if objid in context: return _recursion(object), False, True context[objid] = 1 readable = True recursive = False components = [] append = components.append level += 1 if self._sort_dicts: items = sorted(object.items(), key=_safe_tuple) else: items = object.items() for k, v in items: krepr, kreadable, krecur = self.format( k, context, maxlevels, level) vrepr, vreadable, vrecur = self.format( v, context, maxlevels, level) append("%s: %s" % (krepr, vrepr)) readable = readable and kreadable and vreadable if krecur or vrecur: recursive = True del context[objid] return "{%s}" % ", ".join(components), readable, recursive if (issubclass(typ, list) and r is list.__repr__) or \ (issubclass(typ, tuple) and r is tuple.__repr__): if issubclass(typ, list): if not object: return "[]", True, False format = "[%s]" elif len(object) == 1: format = "(%s,)" else: if not object: return "()", True, False format = "(%s)" objid = id(object) if maxlevels and level >= maxlevels: return format % "...", False, objid in context if objid in context: return _recursion(object), False, True context[objid] = 1 readable = True recursive = False components = [] append = components.append level += 1 for o in object: orepr, oreadable, orecur = self.format( o, context, maxlevels, level) append(orepr) if not oreadable: readable = False if orecur: recursive = True del context[objid] return format % ", ".join(components), readable, recursive rep = repr(object) return rep, (rep and not rep.startswith('<')), False _builtin_scalars = frozenset({str, bytes, bytearray, float, complex, bool, type(None)}) def _recursion(object): return ("<Recursion on %s with id=%s>" % (type(object).__name__, id(object))) def _perfcheck(object=None): import time if object is None: object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000 p = PrettyPrinter() t1 = time.perf_counter() p._safe_repr(object, {}, None, 0, True) t2 = time.perf_counter() p.pformat(object) t3 = time.perf_counter() print("_safe_repr:", t2 - t1) print("pformat:", t3 - t2) def _wrap_bytes_repr(object, width, allowance): current = b'' last = len(object) // 4 * 4 for i in range(0, len(object), 4): part = object[i: i+4] candidate = current + part if i == last: width -= allowance if len(repr(candidate)) > width: if current: yield repr(current) current = part else: current = candidate if current: yield repr(current) if __name__ == "__main__": _perfcheck()
bsd-3-clause
5dbee4bd5c0dfcb6b84b88553332bad7
35.496274
98
0.535465
4.267863
false
false
false
false
brython-dev/brython
www/src/Lib/test/___simplified_test_int.py
16
5648
import sys L = [ ('0', 0), ('1', 1), ('9', 9), ('10', 10), ('99', 99), ('100', 100), ('314', 314), (' 314', 314), ('314 ', 314), (' \t\t 314 \t\t ', 314), (repr(sys.maxsize), sys.maxsize), (' 1x', ValueError), (' 1 ', 1), (' 1\02 ', ValueError), ('', ValueError), (' ', ValueError), (' \t\t ', ValueError), ("\u0200", ValueError) ] class CtxManager: def __init__(self, exception, **kw): self.exception = exception self.kw = kw def __enter__(self): return self def __exit__(self, exc_type, *args): if exc_type is None or not issubclass(exc_type, self.exception): raise AssertionError('should have raised %s but raised %s' %self.exception, exc_type) return True class TestCase: def assertEqual(self, result, expected): if result != expected: raise AssertionError('expected %s, got %s' %(expected, result)) def assertIsInstance(self, obj, klass): if not isinstance(obj, klass): print(obj,'is not instance of', klass) def assertRaises(self, exception, *args, **kw): if args: callable, *args = args try: callable(*args, **kw) except Exception as exc: if not isinstance(exc, exception): print(callable, args, kw, 'does not raise', exception) else: return CtxManager(exception, **kw) def assertTrue(self, expr): if not expr: raise AssertionError('expr not True : %s' %expr) import pickle class ListTest(TestCase): type2test = list def test_basic(self): self.assertEqual(list([]), []) l0_3 = [0, 1, 2, 3] l0_3_bis = list(l0_3) self.assertEqual(l0_3, l0_3_bis) self.assertTrue(l0_3 is not l0_3_bis) self.assertEqual(list(()), []) self.assertEqual(list((0, 1, 2, 3)), [0, 1, 2, 3]) self.assertEqual(list(''), []) self.assertEqual(list('spam'), ['s', 'p', 'a', 'm']) if sys.maxsize == 0x7fffffff: # This test can currently only work on 32-bit machines. # XXX If/when PySequence_Length() returns a ssize_t, it should be # XXX re-enabled. # Verify clearing of bug #556025. # This assumes that the max data size (sys.maxint) == max # address size this also assumes that the address size is at # least 4 bytes with 8 byte addresses, the bug is not well # tested # # Note: This test is expected to SEGV under Cygwin 1.3.12 or # earlier due to a newlib bug. See the following mailing list # thread for the details: # http://sources.redhat.com/ml/newlib/2002/msg00369.html self.assertRaises(MemoryError, list, range(sys.maxsize // 2)) # This code used to segfault in Py2.4a3 x = [] x.extend(-y for y in x) self.assertEqual(x, []) def test_truth(self): super().test_truth() self.assertTrue(not []) self.assertTrue([42]) def test_identity(self): self.assertTrue([] is not []) def test_len(self): super().test_len() self.assertEqual(len([]), 0) self.assertEqual(len([0]), 1) self.assertEqual(len([0, 1, 2]), 3) def test_overflow(self): lst = [4, 5, 6, 7] n = int((sys.maxsize*2+2) // len(lst)) def mul(a, b): return a * b def imul(a, b): a *= b self.assertRaises((MemoryError, OverflowError), mul, lst, n) self.assertRaises((MemoryError, OverflowError), imul, lst, n) def test_repr_large(self): # Check the repr of large list objects def check(n): l = [0] * n s = repr(l) self.assertEqual(s, '[' + ', '.join(['0'] * n) + ']') check(10) # check our checking code check(1000000) def test_iterator_pickle(self): # Userlist iterators don't support pickling yet since # they are based on generators. data = self.type2test([4, 5, 6, 7]) it = itorg = iter(data) print(it) d = pickle.dumps(it) it = pickle.loads(d) self.assertEqual(type(itorg), type(it)) self.assertEqual(self.type2test(it), self.type2test(data)) it = pickle.loads(d) next(it) d = pickle.dumps(it) self.assertEqual(self.type2test(it), self.type2test(data)[1:]) def test_reversed_pickle(self): data = self.type2test([4, 5, 6, 7]) it = itorg = reversed(data) d = pickle.dumps(it) it = pickle.loads(d) self.assertEqual(type(itorg), type(it)) self.assertEqual(self.type2test(it), self.type2test(reversed(data))) it = pickle.loads(d) next(it) d = pickle.dumps(it) self.assertEqual(self.type2test(it), self.type2test(reversed(data))[1:]) def test_no_comdat_folding(self): # Issue 8847: In the PGO build, the MSVC linker's COMDAT folding # optimization causes failures in code that relies on distinct # function addresses. class L(list): pass with self.assertRaises(TypeError): (3,) + L([1,2]) def run(test_class): tester = test_class() for attr in dir(tester): if attr.startswith('test_'): print(attr) getattr(tester, attr)() run(ListTest)
bsd-3-clause
fa4993c7b896fe019a9c0aab502858fe
30.377778
80
0.533286
3.70118
false
true
false
false
brython-dev/brython
www/src/Lib/test/test_kqueue.py
6
8966
""" Tests for kqueue wrapper. """ import errno import os import select import socket import time import unittest if not hasattr(select, "kqueue"): raise unittest.SkipTest("test works only on BSD") class TestKQueue(unittest.TestCase): def test_create_queue(self): kq = select.kqueue() self.assertTrue(kq.fileno() > 0, kq.fileno()) self.assertTrue(not kq.closed) kq.close() self.assertTrue(kq.closed) self.assertRaises(ValueError, kq.fileno) def test_create_event(self): from operator import lt, le, gt, ge fd = os.open(os.devnull, os.O_WRONLY) self.addCleanup(os.close, fd) ev = select.kevent(fd) other = select.kevent(1000) self.assertEqual(ev.ident, fd) self.assertEqual(ev.filter, select.KQ_FILTER_READ) self.assertEqual(ev.flags, select.KQ_EV_ADD) self.assertEqual(ev.fflags, 0) self.assertEqual(ev.data, 0) self.assertEqual(ev.udata, 0) self.assertEqual(ev, ev) self.assertNotEqual(ev, other) self.assertTrue(ev < other) self.assertTrue(other >= ev) for op in lt, le, gt, ge: self.assertRaises(TypeError, op, ev, None) self.assertRaises(TypeError, op, ev, 1) self.assertRaises(TypeError, op, ev, "ev") ev = select.kevent(fd, select.KQ_FILTER_WRITE) self.assertEqual(ev.ident, fd) self.assertEqual(ev.filter, select.KQ_FILTER_WRITE) self.assertEqual(ev.flags, select.KQ_EV_ADD) self.assertEqual(ev.fflags, 0) self.assertEqual(ev.data, 0) self.assertEqual(ev.udata, 0) self.assertEqual(ev, ev) self.assertNotEqual(ev, other) ev = select.kevent(fd, select.KQ_FILTER_WRITE, select.KQ_EV_ONESHOT) self.assertEqual(ev.ident, fd) self.assertEqual(ev.filter, select.KQ_FILTER_WRITE) self.assertEqual(ev.flags, select.KQ_EV_ONESHOT) self.assertEqual(ev.fflags, 0) self.assertEqual(ev.data, 0) self.assertEqual(ev.udata, 0) self.assertEqual(ev, ev) self.assertNotEqual(ev, other) ev = select.kevent(1, 2, 3, 4, 5, 6) self.assertEqual(ev.ident, 1) self.assertEqual(ev.filter, 2) self.assertEqual(ev.flags, 3) self.assertEqual(ev.fflags, 4) self.assertEqual(ev.data, 5) self.assertEqual(ev.udata, 6) self.assertEqual(ev, ev) self.assertNotEqual(ev, other) bignum = 0x7fff ev = select.kevent(bignum, 1, 2, 3, bignum - 1, bignum) self.assertEqual(ev.ident, bignum) self.assertEqual(ev.filter, 1) self.assertEqual(ev.flags, 2) self.assertEqual(ev.fflags, 3) self.assertEqual(ev.data, bignum - 1) self.assertEqual(ev.udata, bignum) self.assertEqual(ev, ev) self.assertNotEqual(ev, other) # Issue 11973 bignum = 0xffff ev = select.kevent(0, 1, bignum) self.assertEqual(ev.ident, 0) self.assertEqual(ev.filter, 1) self.assertEqual(ev.flags, bignum) self.assertEqual(ev.fflags, 0) self.assertEqual(ev.data, 0) self.assertEqual(ev.udata, 0) self.assertEqual(ev, ev) self.assertNotEqual(ev, other) # Issue 11973 bignum = 0xffffffff ev = select.kevent(0, 1, 2, bignum) self.assertEqual(ev.ident, 0) self.assertEqual(ev.filter, 1) self.assertEqual(ev.flags, 2) self.assertEqual(ev.fflags, bignum) self.assertEqual(ev.data, 0) self.assertEqual(ev.udata, 0) self.assertEqual(ev, ev) self.assertNotEqual(ev, other) def test_queue_event(self): serverSocket = socket.create_server(('127.0.0.1', 0)) client = socket.socket() client.setblocking(False) try: client.connect(('127.0.0.1', serverSocket.getsockname()[1])) except OSError as e: self.assertEqual(e.args[0], errno.EINPROGRESS) else: #raise AssertionError("Connect should have raised EINPROGRESS") pass # FreeBSD doesn't raise an exception here server, addr = serverSocket.accept() kq = select.kqueue() kq2 = select.kqueue.fromfd(kq.fileno()) ev = select.kevent(server.fileno(), select.KQ_FILTER_WRITE, select.KQ_EV_ADD | select.KQ_EV_ENABLE) kq.control([ev], 0) ev = select.kevent(server.fileno(), select.KQ_FILTER_READ, select.KQ_EV_ADD | select.KQ_EV_ENABLE) kq.control([ev], 0) ev = select.kevent(client.fileno(), select.KQ_FILTER_WRITE, select.KQ_EV_ADD | select.KQ_EV_ENABLE) kq2.control([ev], 0) ev = select.kevent(client.fileno(), select.KQ_FILTER_READ, select.KQ_EV_ADD | select.KQ_EV_ENABLE) kq2.control([ev], 0) events = kq.control(None, 4, 1) events = set((e.ident, e.filter) for e in events) self.assertEqual(events, set([ (client.fileno(), select.KQ_FILTER_WRITE), (server.fileno(), select.KQ_FILTER_WRITE)])) client.send(b"Hello!") server.send(b"world!!!") # We may need to call it several times for i in range(10): events = kq.control(None, 4, 1) if len(events) == 4: break time.sleep(1.0) else: self.fail('timeout waiting for event notifications') events = set((e.ident, e.filter) for e in events) self.assertEqual(events, set([ (client.fileno(), select.KQ_FILTER_WRITE), (client.fileno(), select.KQ_FILTER_READ), (server.fileno(), select.KQ_FILTER_WRITE), (server.fileno(), select.KQ_FILTER_READ)])) # Remove completely client, and server read part ev = select.kevent(client.fileno(), select.KQ_FILTER_WRITE, select.KQ_EV_DELETE) kq.control([ev], 0) ev = select.kevent(client.fileno(), select.KQ_FILTER_READ, select.KQ_EV_DELETE) kq.control([ev], 0) ev = select.kevent(server.fileno(), select.KQ_FILTER_READ, select.KQ_EV_DELETE) kq.control([ev], 0, 0) events = kq.control([], 4, 0.99) events = set((e.ident, e.filter) for e in events) self.assertEqual(events, set([ (server.fileno(), select.KQ_FILTER_WRITE)])) client.close() server.close() serverSocket.close() def testPair(self): kq = select.kqueue() a, b = socket.socketpair() a.send(b'foo') event1 = select.kevent(a, select.KQ_FILTER_READ, select.KQ_EV_ADD | select.KQ_EV_ENABLE) event2 = select.kevent(b, select.KQ_FILTER_READ, select.KQ_EV_ADD | select.KQ_EV_ENABLE) r = kq.control([event1, event2], 1, 1) self.assertTrue(r) self.assertFalse(r[0].flags & select.KQ_EV_ERROR) self.assertEqual(b.recv(r[0].data), b'foo') a.close() b.close() kq.close() def test_issue30058(self): # changelist must be an iterable kq = select.kqueue() a, b = socket.socketpair() ev = select.kevent(a, select.KQ_FILTER_READ, select.KQ_EV_ADD | select.KQ_EV_ENABLE) kq.control([ev], 0) # not a list kq.control((ev,), 0) # __len__ is not consistent with __iter__ class BadList: def __len__(self): return 0 def __iter__(self): for i in range(100): yield ev kq.control(BadList(), 0) # doesn't have __len__ kq.control(iter([ev]), 0) a.close() b.close() kq.close() def test_close(self): open_file = open(__file__, "rb") self.addCleanup(open_file.close) fd = open_file.fileno() kqueue = select.kqueue() # test fileno() method and closed attribute self.assertIsInstance(kqueue.fileno(), int) self.assertFalse(kqueue.closed) # test close() kqueue.close() self.assertTrue(kqueue.closed) self.assertRaises(ValueError, kqueue.fileno) # close() can be called more than once kqueue.close() # operations must fail with ValueError("I/O operation on closed ...") self.assertRaises(ValueError, kqueue.control, None, 4) def test_fd_non_inheritable(self): kqueue = select.kqueue() self.addCleanup(kqueue.close) self.assertEqual(os.get_inheritable(kqueue.fileno()), False) if __name__ == "__main__": unittest.main()
bsd-3-clause
ce08c5894434018b16003e1ce1832d4c
33.35249
96
0.564466
3.613865
false
true
false
false
brython-dev/brython
www/src/Lib/encodings/cp862.py
35
34068
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP862.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_map) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_map)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp862', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Map decoding_map = codecs.make_identity_dict(range(256)) decoding_map.update({ 0x0080: 0x05d0, # HEBREW LETTER ALEF 0x0081: 0x05d1, # HEBREW LETTER BET 0x0082: 0x05d2, # HEBREW LETTER GIMEL 0x0083: 0x05d3, # HEBREW LETTER DALET 0x0084: 0x05d4, # HEBREW LETTER HE 0x0085: 0x05d5, # HEBREW LETTER VAV 0x0086: 0x05d6, # HEBREW LETTER ZAYIN 0x0087: 0x05d7, # HEBREW LETTER HET 0x0088: 0x05d8, # HEBREW LETTER TET 0x0089: 0x05d9, # HEBREW LETTER YOD 0x008a: 0x05da, # HEBREW LETTER FINAL KAF 0x008b: 0x05db, # HEBREW LETTER KAF 0x008c: 0x05dc, # HEBREW LETTER LAMED 0x008d: 0x05dd, # HEBREW LETTER FINAL MEM 0x008e: 0x05de, # HEBREW LETTER MEM 0x008f: 0x05df, # HEBREW LETTER FINAL NUN 0x0090: 0x05e0, # HEBREW LETTER NUN 0x0091: 0x05e1, # HEBREW LETTER SAMEKH 0x0092: 0x05e2, # HEBREW LETTER AYIN 0x0093: 0x05e3, # HEBREW LETTER FINAL PE 0x0094: 0x05e4, # HEBREW LETTER PE 0x0095: 0x05e5, # HEBREW LETTER FINAL TSADI 0x0096: 0x05e6, # HEBREW LETTER TSADI 0x0097: 0x05e7, # HEBREW LETTER QOF 0x0098: 0x05e8, # HEBREW LETTER RESH 0x0099: 0x05e9, # HEBREW LETTER SHIN 0x009a: 0x05ea, # HEBREW LETTER TAV 0x009b: 0x00a2, # CENT SIGN 0x009c: 0x00a3, # POUND SIGN 0x009d: 0x00a5, # YEN SIGN 0x009e: 0x20a7, # PESETA SIGN 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE 0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR 0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR 0x00a8: 0x00bf, # INVERTED QUESTION MARK 0x00a9: 0x2310, # REVERSED NOT SIGN 0x00aa: 0x00ac, # NOT SIGN 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00b0: 0x2591, # LIGHT SHADE 0x00b1: 0x2592, # MEDIUM SHADE 0x00b2: 0x2593, # DARK SHADE 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x00db: 0x2588, # FULL BLOCK 0x00dc: 0x2584, # LOWER HALF BLOCK 0x00dd: 0x258c, # LEFT HALF BLOCK 0x00de: 0x2590, # RIGHT HALF BLOCK 0x00df: 0x2580, # UPPER HALF BLOCK 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN) 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA 0x00e3: 0x03c0, # GREEK SMALL LETTER PI 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA 0x00e6: 0x00b5, # MICRO SIGN 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA 0x00ec: 0x221e, # INFINITY 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON 0x00ef: 0x2229, # INTERSECTION 0x00f0: 0x2261, # IDENTICAL TO 0x00f1: 0x00b1, # PLUS-MINUS SIGN 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO 0x00f4: 0x2320, # TOP HALF INTEGRAL 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL 0x00f6: 0x00f7, # DIVISION SIGN 0x00f7: 0x2248, # ALMOST EQUAL TO 0x00f8: 0x00b0, # DEGREE SIGN 0x00f9: 0x2219, # BULLET OPERATOR 0x00fa: 0x00b7, # MIDDLE DOT 0x00fb: 0x221a, # SQUARE ROOT 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N 0x00fd: 0x00b2, # SUPERSCRIPT TWO 0x00fe: 0x25a0, # BLACK SQUARE 0x00ff: 0x00a0, # NO-BREAK SPACE }) ### Decoding Table decoding_table = ( '\x00' # 0x0000 -> NULL '\x01' # 0x0001 -> START OF HEADING '\x02' # 0x0002 -> START OF TEXT '\x03' # 0x0003 -> END OF TEXT '\x04' # 0x0004 -> END OF TRANSMISSION '\x05' # 0x0005 -> ENQUIRY '\x06' # 0x0006 -> ACKNOWLEDGE '\x07' # 0x0007 -> BELL '\x08' # 0x0008 -> BACKSPACE '\t' # 0x0009 -> HORIZONTAL TABULATION '\n' # 0x000a -> LINE FEED '\x0b' # 0x000b -> VERTICAL TABULATION '\x0c' # 0x000c -> FORM FEED '\r' # 0x000d -> CARRIAGE RETURN '\x0e' # 0x000e -> SHIFT OUT '\x0f' # 0x000f -> SHIFT IN '\x10' # 0x0010 -> DATA LINK ESCAPE '\x11' # 0x0011 -> DEVICE CONTROL ONE '\x12' # 0x0012 -> DEVICE CONTROL TWO '\x13' # 0x0013 -> DEVICE CONTROL THREE '\x14' # 0x0014 -> DEVICE CONTROL FOUR '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE '\x16' # 0x0016 -> SYNCHRONOUS IDLE '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK '\x18' # 0x0018 -> CANCEL '\x19' # 0x0019 -> END OF MEDIUM '\x1a' # 0x001a -> SUBSTITUTE '\x1b' # 0x001b -> ESCAPE '\x1c' # 0x001c -> FILE SEPARATOR '\x1d' # 0x001d -> GROUP SEPARATOR '\x1e' # 0x001e -> RECORD SEPARATOR '\x1f' # 0x001f -> UNIT SEPARATOR ' ' # 0x0020 -> SPACE '!' # 0x0021 -> EXCLAMATION MARK '"' # 0x0022 -> QUOTATION MARK '#' # 0x0023 -> NUMBER SIGN '$' # 0x0024 -> DOLLAR SIGN '%' # 0x0025 -> PERCENT SIGN '&' # 0x0026 -> AMPERSAND "'" # 0x0027 -> APOSTROPHE '(' # 0x0028 -> LEFT PARENTHESIS ')' # 0x0029 -> RIGHT PARENTHESIS '*' # 0x002a -> ASTERISK '+' # 0x002b -> PLUS SIGN ',' # 0x002c -> COMMA '-' # 0x002d -> HYPHEN-MINUS '.' # 0x002e -> FULL STOP '/' # 0x002f -> SOLIDUS '0' # 0x0030 -> DIGIT ZERO '1' # 0x0031 -> DIGIT ONE '2' # 0x0032 -> DIGIT TWO '3' # 0x0033 -> DIGIT THREE '4' # 0x0034 -> DIGIT FOUR '5' # 0x0035 -> DIGIT FIVE '6' # 0x0036 -> DIGIT SIX '7' # 0x0037 -> DIGIT SEVEN '8' # 0x0038 -> DIGIT EIGHT '9' # 0x0039 -> DIGIT NINE ':' # 0x003a -> COLON ';' # 0x003b -> SEMICOLON '<' # 0x003c -> LESS-THAN SIGN '=' # 0x003d -> EQUALS SIGN '>' # 0x003e -> GREATER-THAN SIGN '?' # 0x003f -> QUESTION MARK '@' # 0x0040 -> COMMERCIAL AT 'A' # 0x0041 -> LATIN CAPITAL LETTER A 'B' # 0x0042 -> LATIN CAPITAL LETTER B 'C' # 0x0043 -> LATIN CAPITAL LETTER C 'D' # 0x0044 -> LATIN CAPITAL LETTER D 'E' # 0x0045 -> LATIN CAPITAL LETTER E 'F' # 0x0046 -> LATIN CAPITAL LETTER F 'G' # 0x0047 -> LATIN CAPITAL LETTER G 'H' # 0x0048 -> LATIN CAPITAL LETTER H 'I' # 0x0049 -> LATIN CAPITAL LETTER I 'J' # 0x004a -> LATIN CAPITAL LETTER J 'K' # 0x004b -> LATIN CAPITAL LETTER K 'L' # 0x004c -> LATIN CAPITAL LETTER L 'M' # 0x004d -> LATIN CAPITAL LETTER M 'N' # 0x004e -> LATIN CAPITAL LETTER N 'O' # 0x004f -> LATIN CAPITAL LETTER O 'P' # 0x0050 -> LATIN CAPITAL LETTER P 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q 'R' # 0x0052 -> LATIN CAPITAL LETTER R 'S' # 0x0053 -> LATIN CAPITAL LETTER S 'T' # 0x0054 -> LATIN CAPITAL LETTER T 'U' # 0x0055 -> LATIN CAPITAL LETTER U 'V' # 0x0056 -> LATIN CAPITAL LETTER V 'W' # 0x0057 -> LATIN CAPITAL LETTER W 'X' # 0x0058 -> LATIN CAPITAL LETTER X 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y 'Z' # 0x005a -> LATIN CAPITAL LETTER Z '[' # 0x005b -> LEFT SQUARE BRACKET '\\' # 0x005c -> REVERSE SOLIDUS ']' # 0x005d -> RIGHT SQUARE BRACKET '^' # 0x005e -> CIRCUMFLEX ACCENT '_' # 0x005f -> LOW LINE '`' # 0x0060 -> GRAVE ACCENT 'a' # 0x0061 -> LATIN SMALL LETTER A 'b' # 0x0062 -> LATIN SMALL LETTER B 'c' # 0x0063 -> LATIN SMALL LETTER C 'd' # 0x0064 -> LATIN SMALL LETTER D 'e' # 0x0065 -> LATIN SMALL LETTER E 'f' # 0x0066 -> LATIN SMALL LETTER F 'g' # 0x0067 -> LATIN SMALL LETTER G 'h' # 0x0068 -> LATIN SMALL LETTER H 'i' # 0x0069 -> LATIN SMALL LETTER I 'j' # 0x006a -> LATIN SMALL LETTER J 'k' # 0x006b -> LATIN SMALL LETTER K 'l' # 0x006c -> LATIN SMALL LETTER L 'm' # 0x006d -> LATIN SMALL LETTER M 'n' # 0x006e -> LATIN SMALL LETTER N 'o' # 0x006f -> LATIN SMALL LETTER O 'p' # 0x0070 -> LATIN SMALL LETTER P 'q' # 0x0071 -> LATIN SMALL LETTER Q 'r' # 0x0072 -> LATIN SMALL LETTER R 's' # 0x0073 -> LATIN SMALL LETTER S 't' # 0x0074 -> LATIN SMALL LETTER T 'u' # 0x0075 -> LATIN SMALL LETTER U 'v' # 0x0076 -> LATIN SMALL LETTER V 'w' # 0x0077 -> LATIN SMALL LETTER W 'x' # 0x0078 -> LATIN SMALL LETTER X 'y' # 0x0079 -> LATIN SMALL LETTER Y 'z' # 0x007a -> LATIN SMALL LETTER Z '{' # 0x007b -> LEFT CURLY BRACKET '|' # 0x007c -> VERTICAL LINE '}' # 0x007d -> RIGHT CURLY BRACKET '~' # 0x007e -> TILDE '\x7f' # 0x007f -> DELETE '\u05d0' # 0x0080 -> HEBREW LETTER ALEF '\u05d1' # 0x0081 -> HEBREW LETTER BET '\u05d2' # 0x0082 -> HEBREW LETTER GIMEL '\u05d3' # 0x0083 -> HEBREW LETTER DALET '\u05d4' # 0x0084 -> HEBREW LETTER HE '\u05d5' # 0x0085 -> HEBREW LETTER VAV '\u05d6' # 0x0086 -> HEBREW LETTER ZAYIN '\u05d7' # 0x0087 -> HEBREW LETTER HET '\u05d8' # 0x0088 -> HEBREW LETTER TET '\u05d9' # 0x0089 -> HEBREW LETTER YOD '\u05da' # 0x008a -> HEBREW LETTER FINAL KAF '\u05db' # 0x008b -> HEBREW LETTER KAF '\u05dc' # 0x008c -> HEBREW LETTER LAMED '\u05dd' # 0x008d -> HEBREW LETTER FINAL MEM '\u05de' # 0x008e -> HEBREW LETTER MEM '\u05df' # 0x008f -> HEBREW LETTER FINAL NUN '\u05e0' # 0x0090 -> HEBREW LETTER NUN '\u05e1' # 0x0091 -> HEBREW LETTER SAMEKH '\u05e2' # 0x0092 -> HEBREW LETTER AYIN '\u05e3' # 0x0093 -> HEBREW LETTER FINAL PE '\u05e4' # 0x0094 -> HEBREW LETTER PE '\u05e5' # 0x0095 -> HEBREW LETTER FINAL TSADI '\u05e6' # 0x0096 -> HEBREW LETTER TSADI '\u05e7' # 0x0097 -> HEBREW LETTER QOF '\u05e8' # 0x0098 -> HEBREW LETTER RESH '\u05e9' # 0x0099 -> HEBREW LETTER SHIN '\u05ea' # 0x009a -> HEBREW LETTER TAV '\xa2' # 0x009b -> CENT SIGN '\xa3' # 0x009c -> POUND SIGN '\xa5' # 0x009d -> YEN SIGN '\u20a7' # 0x009e -> PESETA SIGN '\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE '\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE '\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE '\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR '\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR '\xbf' # 0x00a8 -> INVERTED QUESTION MARK '\u2310' # 0x00a9 -> REVERSED NOT SIGN '\xac' # 0x00aa -> NOT SIGN '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER '\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK '\u2591' # 0x00b0 -> LIGHT SHADE '\u2592' # 0x00b1 -> MEDIUM SHADE '\u2593' # 0x00b2 -> DARK SHADE '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT '\u2588' # 0x00db -> FULL BLOCK '\u2584' # 0x00dc -> LOWER HALF BLOCK '\u258c' # 0x00dd -> LEFT HALF BLOCK '\u2590' # 0x00de -> RIGHT HALF BLOCK '\u2580' # 0x00df -> UPPER HALF BLOCK '\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN) '\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA '\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI '\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA '\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA '\xb5' # 0x00e6 -> MICRO SIGN '\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU '\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI '\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA '\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA '\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA '\u221e' # 0x00ec -> INFINITY '\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI '\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON '\u2229' # 0x00ef -> INTERSECTION '\u2261' # 0x00f0 -> IDENTICAL TO '\xb1' # 0x00f1 -> PLUS-MINUS SIGN '\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO '\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO '\u2320' # 0x00f4 -> TOP HALF INTEGRAL '\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL '\xf7' # 0x00f6 -> DIVISION SIGN '\u2248' # 0x00f7 -> ALMOST EQUAL TO '\xb0' # 0x00f8 -> DEGREE SIGN '\u2219' # 0x00f9 -> BULLET OPERATOR '\xb7' # 0x00fa -> MIDDLE DOT '\u221a' # 0x00fb -> SQUARE ROOT '\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N '\xb2' # 0x00fd -> SUPERSCRIPT TWO '\u25a0' # 0x00fe -> BLACK SQUARE '\xa0' # 0x00ff -> NO-BREAK SPACE ) ### Encoding Map encoding_map = { 0x0000: 0x0000, # NULL 0x0001: 0x0001, # START OF HEADING 0x0002: 0x0002, # START OF TEXT 0x0003: 0x0003, # END OF TEXT 0x0004: 0x0004, # END OF TRANSMISSION 0x0005: 0x0005, # ENQUIRY 0x0006: 0x0006, # ACKNOWLEDGE 0x0007: 0x0007, # BELL 0x0008: 0x0008, # BACKSPACE 0x0009: 0x0009, # HORIZONTAL TABULATION 0x000a: 0x000a, # LINE FEED 0x000b: 0x000b, # VERTICAL TABULATION 0x000c: 0x000c, # FORM FEED 0x000d: 0x000d, # CARRIAGE RETURN 0x000e: 0x000e, # SHIFT OUT 0x000f: 0x000f, # SHIFT IN 0x0010: 0x0010, # DATA LINK ESCAPE 0x0011: 0x0011, # DEVICE CONTROL ONE 0x0012: 0x0012, # DEVICE CONTROL TWO 0x0013: 0x0013, # DEVICE CONTROL THREE 0x0014: 0x0014, # DEVICE CONTROL FOUR 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE 0x0016: 0x0016, # SYNCHRONOUS IDLE 0x0017: 0x0017, # END OF TRANSMISSION BLOCK 0x0018: 0x0018, # CANCEL 0x0019: 0x0019, # END OF MEDIUM 0x001a: 0x001a, # SUBSTITUTE 0x001b: 0x001b, # ESCAPE 0x001c: 0x001c, # FILE SEPARATOR 0x001d: 0x001d, # GROUP SEPARATOR 0x001e: 0x001e, # RECORD SEPARATOR 0x001f: 0x001f, # UNIT SEPARATOR 0x0020: 0x0020, # SPACE 0x0021: 0x0021, # EXCLAMATION MARK 0x0022: 0x0022, # QUOTATION MARK 0x0023: 0x0023, # NUMBER SIGN 0x0024: 0x0024, # DOLLAR SIGN 0x0025: 0x0025, # PERCENT SIGN 0x0026: 0x0026, # AMPERSAND 0x0027: 0x0027, # APOSTROPHE 0x0028: 0x0028, # LEFT PARENTHESIS 0x0029: 0x0029, # RIGHT PARENTHESIS 0x002a: 0x002a, # ASTERISK 0x002b: 0x002b, # PLUS SIGN 0x002c: 0x002c, # COMMA 0x002d: 0x002d, # HYPHEN-MINUS 0x002e: 0x002e, # FULL STOP 0x002f: 0x002f, # SOLIDUS 0x0030: 0x0030, # DIGIT ZERO 0x0031: 0x0031, # DIGIT ONE 0x0032: 0x0032, # DIGIT TWO 0x0033: 0x0033, # DIGIT THREE 0x0034: 0x0034, # DIGIT FOUR 0x0035: 0x0035, # DIGIT FIVE 0x0036: 0x0036, # DIGIT SIX 0x0037: 0x0037, # DIGIT SEVEN 0x0038: 0x0038, # DIGIT EIGHT 0x0039: 0x0039, # DIGIT NINE 0x003a: 0x003a, # COLON 0x003b: 0x003b, # SEMICOLON 0x003c: 0x003c, # LESS-THAN SIGN 0x003d: 0x003d, # EQUALS SIGN 0x003e: 0x003e, # GREATER-THAN SIGN 0x003f: 0x003f, # QUESTION MARK 0x0040: 0x0040, # COMMERCIAL AT 0x0041: 0x0041, # LATIN CAPITAL LETTER A 0x0042: 0x0042, # LATIN CAPITAL LETTER B 0x0043: 0x0043, # LATIN CAPITAL LETTER C 0x0044: 0x0044, # LATIN CAPITAL LETTER D 0x0045: 0x0045, # LATIN CAPITAL LETTER E 0x0046: 0x0046, # LATIN CAPITAL LETTER F 0x0047: 0x0047, # LATIN CAPITAL LETTER G 0x0048: 0x0048, # LATIN CAPITAL LETTER H 0x0049: 0x0049, # LATIN CAPITAL LETTER I 0x004a: 0x004a, # LATIN CAPITAL LETTER J 0x004b: 0x004b, # LATIN CAPITAL LETTER K 0x004c: 0x004c, # LATIN CAPITAL LETTER L 0x004d: 0x004d, # LATIN CAPITAL LETTER M 0x004e: 0x004e, # LATIN CAPITAL LETTER N 0x004f: 0x004f, # LATIN CAPITAL LETTER O 0x0050: 0x0050, # LATIN CAPITAL LETTER P 0x0051: 0x0051, # LATIN CAPITAL LETTER Q 0x0052: 0x0052, # LATIN CAPITAL LETTER R 0x0053: 0x0053, # LATIN CAPITAL LETTER S 0x0054: 0x0054, # LATIN CAPITAL LETTER T 0x0055: 0x0055, # LATIN CAPITAL LETTER U 0x0056: 0x0056, # LATIN CAPITAL LETTER V 0x0057: 0x0057, # LATIN CAPITAL LETTER W 0x0058: 0x0058, # LATIN CAPITAL LETTER X 0x0059: 0x0059, # LATIN CAPITAL LETTER Y 0x005a: 0x005a, # LATIN CAPITAL LETTER Z 0x005b: 0x005b, # LEFT SQUARE BRACKET 0x005c: 0x005c, # REVERSE SOLIDUS 0x005d: 0x005d, # RIGHT SQUARE BRACKET 0x005e: 0x005e, # CIRCUMFLEX ACCENT 0x005f: 0x005f, # LOW LINE 0x0060: 0x0060, # GRAVE ACCENT 0x0061: 0x0061, # LATIN SMALL LETTER A 0x0062: 0x0062, # LATIN SMALL LETTER B 0x0063: 0x0063, # LATIN SMALL LETTER C 0x0064: 0x0064, # LATIN SMALL LETTER D 0x0065: 0x0065, # LATIN SMALL LETTER E 0x0066: 0x0066, # LATIN SMALL LETTER F 0x0067: 0x0067, # LATIN SMALL LETTER G 0x0068: 0x0068, # LATIN SMALL LETTER H 0x0069: 0x0069, # LATIN SMALL LETTER I 0x006a: 0x006a, # LATIN SMALL LETTER J 0x006b: 0x006b, # LATIN SMALL LETTER K 0x006c: 0x006c, # LATIN SMALL LETTER L 0x006d: 0x006d, # LATIN SMALL LETTER M 0x006e: 0x006e, # LATIN SMALL LETTER N 0x006f: 0x006f, # LATIN SMALL LETTER O 0x0070: 0x0070, # LATIN SMALL LETTER P 0x0071: 0x0071, # LATIN SMALL LETTER Q 0x0072: 0x0072, # LATIN SMALL LETTER R 0x0073: 0x0073, # LATIN SMALL LETTER S 0x0074: 0x0074, # LATIN SMALL LETTER T 0x0075: 0x0075, # LATIN SMALL LETTER U 0x0076: 0x0076, # LATIN SMALL LETTER V 0x0077: 0x0077, # LATIN SMALL LETTER W 0x0078: 0x0078, # LATIN SMALL LETTER X 0x0079: 0x0079, # LATIN SMALL LETTER Y 0x007a: 0x007a, # LATIN SMALL LETTER Z 0x007b: 0x007b, # LEFT CURLY BRACKET 0x007c: 0x007c, # VERTICAL LINE 0x007d: 0x007d, # RIGHT CURLY BRACKET 0x007e: 0x007e, # TILDE 0x007f: 0x007f, # DELETE 0x00a0: 0x00ff, # NO-BREAK SPACE 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK 0x00a2: 0x009b, # CENT SIGN 0x00a3: 0x009c, # POUND SIGN 0x00a5: 0x009d, # YEN SIGN 0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00ac: 0x00aa, # NOT SIGN 0x00b0: 0x00f8, # DEGREE SIGN 0x00b1: 0x00f1, # PLUS-MINUS SIGN 0x00b2: 0x00fd, # SUPERSCRIPT TWO 0x00b5: 0x00e6, # MICRO SIGN 0x00b7: 0x00fa, # MIDDLE DOT 0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF 0x00bf: 0x00a8, # INVERTED QUESTION MARK 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN) 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE 0x00f7: 0x00f6, # DIVISION SIGN 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON 0x03c0: 0x00e3, # GREEK SMALL LETTER PI 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI 0x05d0: 0x0080, # HEBREW LETTER ALEF 0x05d1: 0x0081, # HEBREW LETTER BET 0x05d2: 0x0082, # HEBREW LETTER GIMEL 0x05d3: 0x0083, # HEBREW LETTER DALET 0x05d4: 0x0084, # HEBREW LETTER HE 0x05d5: 0x0085, # HEBREW LETTER VAV 0x05d6: 0x0086, # HEBREW LETTER ZAYIN 0x05d7: 0x0087, # HEBREW LETTER HET 0x05d8: 0x0088, # HEBREW LETTER TET 0x05d9: 0x0089, # HEBREW LETTER YOD 0x05da: 0x008a, # HEBREW LETTER FINAL KAF 0x05db: 0x008b, # HEBREW LETTER KAF 0x05dc: 0x008c, # HEBREW LETTER LAMED 0x05dd: 0x008d, # HEBREW LETTER FINAL MEM 0x05de: 0x008e, # HEBREW LETTER MEM 0x05df: 0x008f, # HEBREW LETTER FINAL NUN 0x05e0: 0x0090, # HEBREW LETTER NUN 0x05e1: 0x0091, # HEBREW LETTER SAMEKH 0x05e2: 0x0092, # HEBREW LETTER AYIN 0x05e3: 0x0093, # HEBREW LETTER FINAL PE 0x05e4: 0x0094, # HEBREW LETTER PE 0x05e5: 0x0095, # HEBREW LETTER FINAL TSADI 0x05e6: 0x0096, # HEBREW LETTER TSADI 0x05e7: 0x0097, # HEBREW LETTER QOF 0x05e8: 0x0098, # HEBREW LETTER RESH 0x05e9: 0x0099, # HEBREW LETTER SHIN 0x05ea: 0x009a, # HEBREW LETTER TAV 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N 0x20a7: 0x009e, # PESETA SIGN 0x2219: 0x00f9, # BULLET OPERATOR 0x221a: 0x00fb, # SQUARE ROOT 0x221e: 0x00ec, # INFINITY 0x2229: 0x00ef, # INTERSECTION 0x2248: 0x00f7, # ALMOST EQUAL TO 0x2261: 0x00f0, # IDENTICAL TO 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO 0x2310: 0x00a9, # REVERSED NOT SIGN 0x2320: 0x00f4, # TOP HALF INTEGRAL 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x2580: 0x00df, # UPPER HALF BLOCK 0x2584: 0x00dc, # LOWER HALF BLOCK 0x2588: 0x00db, # FULL BLOCK 0x258c: 0x00dd, # LEFT HALF BLOCK 0x2590: 0x00de, # RIGHT HALF BLOCK 0x2591: 0x00b0, # LIGHT SHADE 0x2592: 0x00b1, # MEDIUM SHADE 0x2593: 0x00b2, # DARK SHADE 0x25a0: 0x00fe, # BLACK SQUARE }
bsd-3-clause
891643d61591cb4e9108e5ae2156d108
46.808023
97
0.586004
2.929825
false
false
false
false
brython-dev/brython
www/src/Lib/errno.py
2
3055
"""This module makes available standard errno system symbols. The value of each symbol is the corresponding integer value, e.g., on most systems, errno.ENOENT equals the integer 2. The dictionary errno.errorcode maps numeric codes to symbol names, e.g., errno.errorcode[2] could be the string 'ENOENT'. Symbols that are not relevant to the underlying system are not defined. To map error codes to error messages, use the function os.strerror(), e.g. os.strerror(2) could return 'No such file or directory'.""" E2BIG = 7 EACCES = 13 EADDRINUSE = 10048 EADDRNOTAVAIL = 10049 EAFNOSUPPORT = 10047 EAGAIN = 11 EALREADY = 10037 EBADF = 9 EBADMSG = 104 EBUSY = 16 ECANCELED = 105 ECHILD = 10 ECONNABORTED = 10053 ECONNREFUSED = 10061 ECONNRESET = 10054 EDEADLK = 36 EDEADLOCK = 36 EDESTADDRREQ = 10039 EDOM = 33 EDQUOT = 10069 EEXIST = 17 EFAULT = 14 EFBIG = 27 EHOSTDOWN = 10064 EHOSTUNREACH = 10065 EIDRM = 111 EILSEQ = 42 EINPROGRESS = 10036 EINTR = 4 EINVAL = 22 EIO = 5 EISCONN = 10056 EISDIR = 21 ELOOP = 10062 EMFILE = 24 EMLINK = 31 EMSGSIZE = 10040 ENAMETOOLONG = 38 ENETDOWN = 10050 ENETRESET = 10052 ENETUNREACH = 10051 ENFILE = 23 ENOBUFS = 10055 ENODATA = 120 ENODEV = 19 ENOENT = 2 ENOEXEC = 8 ENOLCK = 39 ENOLINK = 121 ENOMEM = 12 ENOMSG = 122 ENOPROTOOPT = 10042 ENOSPC = 28 ENOSR = 124 ENOSTR = 125 ENOSYS = 40 ENOTCONN = 10057 ENOTDIR = 20 ENOTEMPTY = 41 ENOTRECOVERABLE = 127 ENOTSOCK = 10038 ENOTSUP = 129 ENOTTY = 25 ENXIO = 6 EOPNOTSUPP = 10045 EOVERFLOW = 132 EOWNERDEAD = 133 EPERM = 1 EPFNOSUPPORT = 10046 EPIPE = 32 EPROTO = 134 EPROTONOSUPPORT = 10043 EPROTOTYPE = 10041 ERANGE = 34 EREMOTE = 10071 EROFS = 30 ESHUTDOWN = 10058 ESOCKTNOSUPPORT = 10044 ESPIPE = 29 ESRCH = 3 ESTALE = 10070 ETIME = 137 ETIMEDOUT = 10060 ETOOMANYREFS = 10059 ETXTBSY = 139 EUSERS = 10068 EWOULDBLOCK = 10035 EXDEV = 18 WSABASEERR = 10000 WSAEACCES = 10013 WSAEADDRINUSE = 10048 WSAEADDRNOTAVAIL = 10049 WSAEAFNOSUPPORT = 10047 WSAEALREADY = 10037 WSAEBADF = 10009 WSAECONNABORTED = 10053 WSAECONNREFUSED = 10061 WSAECONNRESET = 10054 WSAEDESTADDRREQ = 10039 WSAEDISCON = 10101 WSAEDQUOT = 10069 WSAEFAULT = 10014 WSAEHOSTDOWN = 10064 WSAEHOSTUNREACH = 10065 WSAEINPROGRESS = 10036 WSAEINTR = 10004 WSAEINVAL = 10022 WSAEISCONN = 10056 WSAELOOP = 10062 WSAEMFILE = 10024 WSAEMSGSIZE = 10040 WSAENAMETOOLONG = 10063 WSAENETDOWN = 10050 WSAENETRESET = 10052 WSAENETUNREACH = 10051 WSAENOBUFS = 10055 WSAENOPROTOOPT = 10042 WSAENOTCONN = 10057 WSAENOTEMPTY = 10066 WSAENOTSOCK = 10038 WSAEOPNOTSUPP = 10045 WSAEPFNOSUPPORT = 10046 WSAEPROCLIM = 10067 WSAEPROTONOSUPPORT = 10043 WSAEPROTOTYPE = 10041 WSAEREMOTE = 10071 WSAESHUTDOWN = 10058 WSAESOCKTNOSUPPORT = 10044 WSAESTALE = 10070 WSAETIMEDOUT = 10060 WSAETOOMANYREFS = 10059 WSAEUSERS = 10068 WSAEWOULDBLOCK = 10035 WSANOTINITIALISED = 10093 WSASYSNOTREADY = 10091 WSAVERNOTSUPPORTED = 10092 errorcode = {v:k for (k, v) in globals().items() if k == k.upper()}
bsd-3-clause
82ae1811eaf5f78affb5c3e8fbfb5a24
9.644599
71
0.728642
2.357253
false
false
false
false
brython-dev/brython
www/src/Lib/formatter.py
47
15143
"""Generic output formatting. Formatter objects transform an abstract flow of formatting events into specific output events on writer objects. Formatters manage several stack structures to allow various properties of a writer object to be changed and restored; writers need not be able to handle relative changes nor any sort of ``change back'' operation. Specific writer properties which may be controlled via formatter objects are horizontal alignment, font, and left margin indentations. A mechanism is provided which supports providing arbitrary, non-exclusive style settings to a writer as well. Additional interfaces facilitate formatting events which are not reversible, such as paragraph separation. Writer objects encapsulate device interfaces. Abstract devices, such as file formats, are supported as well as physical devices. The provided implementations all work with abstract devices. The interface makes available mechanisms for setting the properties which formatter objects manage and inserting data into the output. """ import sys import warnings warnings.warn('the formatter module is deprecated', DeprecationWarning, stacklevel=2) AS_IS = None class NullFormatter: """A formatter which does nothing. If the writer parameter is omitted, a NullWriter instance is created. No methods of the writer are called by NullFormatter instances. Implementations should inherit from this class if implementing a writer interface but don't need to inherit any implementation. """ def __init__(self, writer=None): if writer is None: writer = NullWriter() self.writer = writer def end_paragraph(self, blankline): pass def add_line_break(self): pass def add_hor_rule(self, *args, **kw): pass def add_label_data(self, format, counter, blankline=None): pass def add_flowing_data(self, data): pass def add_literal_data(self, data): pass def flush_softspace(self): pass def push_alignment(self, align): pass def pop_alignment(self): pass def push_font(self, x): pass def pop_font(self): pass def push_margin(self, margin): pass def pop_margin(self): pass def set_spacing(self, spacing): pass def push_style(self, *styles): pass def pop_style(self, n=1): pass def assert_line_data(self, flag=1): pass class AbstractFormatter: """The standard formatter. This implementation has demonstrated wide applicability to many writers, and may be used directly in most circumstances. It has been used to implement a full-featured World Wide Web browser. """ # Space handling policy: blank spaces at the boundary between elements # are handled by the outermost context. "Literal" data is not checked # to determine context, so spaces in literal data are handled directly # in all circumstances. def __init__(self, writer): self.writer = writer # Output device self.align = None # Current alignment self.align_stack = [] # Alignment stack self.font_stack = [] # Font state self.margin_stack = [] # Margin state self.spacing = None # Vertical spacing state self.style_stack = [] # Other state, e.g. color self.nospace = 1 # Should leading space be suppressed self.softspace = 0 # Should a space be inserted self.para_end = 1 # Just ended a paragraph self.parskip = 0 # Skipped space between paragraphs? self.hard_break = 1 # Have a hard break self.have_label = 0 def end_paragraph(self, blankline): if not self.hard_break: self.writer.send_line_break() self.have_label = 0 if self.parskip < blankline and not self.have_label: self.writer.send_paragraph(blankline - self.parskip) self.parskip = blankline self.have_label = 0 self.hard_break = self.nospace = self.para_end = 1 self.softspace = 0 def add_line_break(self): if not (self.hard_break or self.para_end): self.writer.send_line_break() self.have_label = self.parskip = 0 self.hard_break = self.nospace = 1 self.softspace = 0 def add_hor_rule(self, *args, **kw): if not self.hard_break: self.writer.send_line_break() self.writer.send_hor_rule(*args, **kw) self.hard_break = self.nospace = 1 self.have_label = self.para_end = self.softspace = self.parskip = 0 def add_label_data(self, format, counter, blankline = None): if self.have_label or not self.hard_break: self.writer.send_line_break() if not self.para_end: self.writer.send_paragraph((blankline and 1) or 0) if isinstance(format, str): self.writer.send_label_data(self.format_counter(format, counter)) else: self.writer.send_label_data(format) self.nospace = self.have_label = self.hard_break = self.para_end = 1 self.softspace = self.parskip = 0 def format_counter(self, format, counter): label = '' for c in format: if c == '1': label = label + ('%d' % counter) elif c in 'aA': if counter > 0: label = label + self.format_letter(c, counter) elif c in 'iI': if counter > 0: label = label + self.format_roman(c, counter) else: label = label + c return label def format_letter(self, case, counter): label = '' while counter > 0: counter, x = divmod(counter-1, 26) # This makes a strong assumption that lowercase letters # and uppercase letters form two contiguous blocks, with # letters in order! s = chr(ord(case) + x) label = s + label return label def format_roman(self, case, counter): ones = ['i', 'x', 'c', 'm'] fives = ['v', 'l', 'd'] label, index = '', 0 # This will die of IndexError when counter is too big while counter > 0: counter, x = divmod(counter, 10) if x == 9: label = ones[index] + ones[index+1] + label elif x == 4: label = ones[index] + fives[index] + label else: if x >= 5: s = fives[index] x = x-5 else: s = '' s = s + ones[index]*x label = s + label index = index + 1 if case == 'I': return label.upper() return label def add_flowing_data(self, data): if not data: return prespace = data[:1].isspace() postspace = data[-1:].isspace() data = " ".join(data.split()) if self.nospace and not data: return elif prespace or self.softspace: if not data: if not self.nospace: self.softspace = 1 self.parskip = 0 return if not self.nospace: data = ' ' + data self.hard_break = self.nospace = self.para_end = \ self.parskip = self.have_label = 0 self.softspace = postspace self.writer.send_flowing_data(data) def add_literal_data(self, data): if not data: return if self.softspace: self.writer.send_flowing_data(" ") self.hard_break = data[-1:] == '\n' self.nospace = self.para_end = self.softspace = \ self.parskip = self.have_label = 0 self.writer.send_literal_data(data) def flush_softspace(self): if self.softspace: self.hard_break = self.para_end = self.parskip = \ self.have_label = self.softspace = 0 self.nospace = 1 self.writer.send_flowing_data(' ') def push_alignment(self, align): if align and align != self.align: self.writer.new_alignment(align) self.align = align self.align_stack.append(align) else: self.align_stack.append(self.align) def pop_alignment(self): if self.align_stack: del self.align_stack[-1] if self.align_stack: self.align = align = self.align_stack[-1] self.writer.new_alignment(align) else: self.align = None self.writer.new_alignment(None) def push_font(self, font): size, i, b, tt = font if self.softspace: self.hard_break = self.para_end = self.softspace = 0 self.nospace = 1 self.writer.send_flowing_data(' ') if self.font_stack: csize, ci, cb, ctt = self.font_stack[-1] if size is AS_IS: size = csize if i is AS_IS: i = ci if b is AS_IS: b = cb if tt is AS_IS: tt = ctt font = (size, i, b, tt) self.font_stack.append(font) self.writer.new_font(font) def pop_font(self): if self.font_stack: del self.font_stack[-1] if self.font_stack: font = self.font_stack[-1] else: font = None self.writer.new_font(font) def push_margin(self, margin): self.margin_stack.append(margin) fstack = [m for m in self.margin_stack if m] if not margin and fstack: margin = fstack[-1] self.writer.new_margin(margin, len(fstack)) def pop_margin(self): if self.margin_stack: del self.margin_stack[-1] fstack = [m for m in self.margin_stack if m] if fstack: margin = fstack[-1] else: margin = None self.writer.new_margin(margin, len(fstack)) def set_spacing(self, spacing): self.spacing = spacing self.writer.new_spacing(spacing) def push_style(self, *styles): if self.softspace: self.hard_break = self.para_end = self.softspace = 0 self.nospace = 1 self.writer.send_flowing_data(' ') for style in styles: self.style_stack.append(style) self.writer.new_styles(tuple(self.style_stack)) def pop_style(self, n=1): del self.style_stack[-n:] self.writer.new_styles(tuple(self.style_stack)) def assert_line_data(self, flag=1): self.nospace = self.hard_break = not flag self.para_end = self.parskip = self.have_label = 0 class NullWriter: """Minimal writer interface to use in testing & inheritance. A writer which only provides the interface definition; no actions are taken on any methods. This should be the base class for all writers which do not need to inherit any implementation methods. """ def __init__(self): pass def flush(self): pass def new_alignment(self, align): pass def new_font(self, font): pass def new_margin(self, margin, level): pass def new_spacing(self, spacing): pass def new_styles(self, styles): pass def send_paragraph(self, blankline): pass def send_line_break(self): pass def send_hor_rule(self, *args, **kw): pass def send_label_data(self, data): pass def send_flowing_data(self, data): pass def send_literal_data(self, data): pass class AbstractWriter(NullWriter): """A writer which can be used in debugging formatters, but not much else. Each method simply announces itself by printing its name and arguments on standard output. """ def new_alignment(self, align): print("new_alignment(%r)" % (align,)) def new_font(self, font): print("new_font(%r)" % (font,)) def new_margin(self, margin, level): print("new_margin(%r, %d)" % (margin, level)) def new_spacing(self, spacing): print("new_spacing(%r)" % (spacing,)) def new_styles(self, styles): print("new_styles(%r)" % (styles,)) def send_paragraph(self, blankline): print("send_paragraph(%r)" % (blankline,)) def send_line_break(self): print("send_line_break()") def send_hor_rule(self, *args, **kw): print("send_hor_rule()") def send_label_data(self, data): print("send_label_data(%r)" % (data,)) def send_flowing_data(self, data): print("send_flowing_data(%r)" % (data,)) def send_literal_data(self, data): print("send_literal_data(%r)" % (data,)) class DumbWriter(NullWriter): """Simple writer class which writes output on the file object passed in as the file parameter or, if file is omitted, on standard output. The output is simply word-wrapped to the number of columns specified by the maxcol parameter. This class is suitable for reflowing a sequence of paragraphs. """ def __init__(self, file=None, maxcol=72): self.file = file or sys.stdout self.maxcol = maxcol NullWriter.__init__(self) self.reset() def reset(self): self.col = 0 self.atbreak = 0 def send_paragraph(self, blankline): self.file.write('\n'*blankline) self.col = 0 self.atbreak = 0 def send_line_break(self): self.file.write('\n') self.col = 0 self.atbreak = 0 def send_hor_rule(self, *args, **kw): self.file.write('\n') self.file.write('-'*self.maxcol) self.file.write('\n') self.col = 0 self.atbreak = 0 def send_literal_data(self, data): self.file.write(data) i = data.rfind('\n') if i >= 0: self.col = 0 data = data[i+1:] data = data.expandtabs() self.col = self.col + len(data) self.atbreak = 0 def send_flowing_data(self, data): if not data: return atbreak = self.atbreak or data[0].isspace() col = self.col maxcol = self.maxcol write = self.file.write for word in data.split(): if atbreak: if col + len(word) >= maxcol: write('\n') col = 0 else: write(' ') col = col + 1 write(word) col = col + len(word) atbreak = 1 self.col = col self.atbreak = data[-1].isspace() def test(file = None): w = DumbWriter() f = AbstractFormatter(w) if file is not None: fp = open(file) elif sys.argv[1:]: fp = open(sys.argv[1]) else: fp = sys.stdin try: for line in fp: if line == '\n': f.end_paragraph(1) else: f.add_flowing_data(line) finally: if fp is not sys.stdin: fp.close() f.end_paragraph(0) if __name__ == '__main__': test()
bsd-3-clause
b2913cf7e72ad9063fb2aca2739ecc1f
32.502212
77
0.573995
3.906863
false
false
false
false
brython-dev/brython
www/src/Lib/encodings/charmap.py
860
2084
""" Generic Python Character Mapping Codec. Use this codec directly rather than through the automatic conversion mechanisms supplied by unicode() and .encode(). Written by Marc-Andre Lemburg (mal@lemburg.com). (c) Copyright CNRI, All Rights Reserved. NO WARRANTY. """#" import codecs ### Codec APIs class Codec(codecs.Codec): # Note: Binding these as C functions will result in the class not # converting them to methods. This is intended. encode = codecs.charmap_encode decode = codecs.charmap_decode class IncrementalEncoder(codecs.IncrementalEncoder): def __init__(self, errors='strict', mapping=None): codecs.IncrementalEncoder.__init__(self, errors) self.mapping = mapping def encode(self, input, final=False): return codecs.charmap_encode(input, self.errors, self.mapping)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def __init__(self, errors='strict', mapping=None): codecs.IncrementalDecoder.__init__(self, errors) self.mapping = mapping def decode(self, input, final=False): return codecs.charmap_decode(input, self.errors, self.mapping)[0] class StreamWriter(Codec,codecs.StreamWriter): def __init__(self,stream,errors='strict',mapping=None): codecs.StreamWriter.__init__(self,stream,errors) self.mapping = mapping def encode(self,input,errors='strict'): return Codec.encode(input,errors,self.mapping) class StreamReader(Codec,codecs.StreamReader): def __init__(self,stream,errors='strict',mapping=None): codecs.StreamReader.__init__(self,stream,errors) self.mapping = mapping def decode(self,input,errors='strict'): return Codec.decode(input,errors,self.mapping) ### encodings module API def getregentry(): return codecs.CodecInfo( name='charmap', encode=Codec.encode, decode=Codec.decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamwriter=StreamWriter, streamreader=StreamReader, )
bsd-3-clause
5e7cead3682a10caf1e6a35379723da5
29.202899
73
0.695777
4.323651
false
false
false
false
brython-dev/brython
www/gallery/draw.py
8
7522
from browser import window, document, alert, html container = document['container'] # Game board zone = document['zone'] # Zone where cells move ctx = zone.getContext('2d') tools = document['tools'] # Window dimensions width = window.innerWidth height = window.innerHeight dim = min(width, height) if width>height: tools_pos = 'right' else: tools_pos = 'bottom' d = int(0.03*dim) # Distance of board to top and left of browser window padding = int(dim/25) # Board padding # Adapt container and zone dimensions to browser window dimensions container.top = container.left = d container.style.width = container.style.height = dim-2*d-2*padding container.style.padding = '%spx' %padding zwidth = dim - 2*d - 2*padding zone.height = zwidth zone.width = zwidth # Get position of zone upper left corner relative to window ztop, zleft = zone.abs_top, zone.abs_left # Global variables class Panel: def __init__(self, zone): self.X0 = self.Y0 = None # Initial mouse or finger position self.tool = 'pen' # Current tool self.drawing = False # Current state self.color = color self.bgcolor = bgcolor self.rubber_size = 5 self.line_width = 1 zone.bind('mousedown', self.click) zone.bind('mousemove', self.move) zone.bind('mouseup', self.release) zone.bind('touchstart', self.click) zone.bind('touchmove', self.move) zone.bind('touchend', self.release) def mouse_pos(self, ev): # New mouse / finger position if ev.type[:5] == 'mouse' or ev.type=='click': x, y = ev.pageX, ev.pageY else: touch = ev.targetTouches[0] x, y = touch.pageX, touch.pageY return x, y def click(self, ev): """Handler for mousedown or finger touch""" if ev.type == 'touchstart': if len(ev.targetTouches)>1: return # New mouse / finger position self.X0, self.Y0 = self.mouse_pos(ev) if self.tool == 'pen': ctx.lineWidth = self.line_width ctx.strokeStyle = self.color elif self.tool == 'select': self.store = ctx.getImageData(0, 0, zone.width, zone.height) elif self.tool == 'rubber': ctx.fillStyle = self.bgcolor self.drawing = True def release(self, ev): """Handler for mouse or finger release""" if self.tool == 'select': self.tool = 'selected' self.X1, self.Y1 = self.mouse_pos(ev) self.X0, self.X1 = min(self.X0, self.X1), max(self.X0, self.X1) self.Y0, self.Y1 = min(self.Y0, self.Y1), max(self.Y0, self.Y1) zone.style.cursor = 'move' self.drawing = False def move(self, ev): """Handler for mouse or finger move""" if not self.drawing: return # New mouse / finger position X, Y = self.mouse_pos(ev) if self.tool == 'pen': ctx.beginPath() ctx.moveTo(self.X0-zleft, self.Y0-ztop) ctx.lineTo(X-zleft, Y-ztop) ctx.stroke() ctx.closePath() self.X0, self.Y0 = X, Y elif self.tool == 'select': ctx.putImageData(self.store, 0, 0) ctx.strokeRect(self.X0-zleft, self.Y0-ztop, X-self.X0, Y-self.Y0) elif self.tool == 'selected': if X>=self.X0 and X<=self.X1 and Y>=self.Y0 and Y<=self.Y1: zone.style.cursor = 'move' else: zone.style.cursor = 'default' elif self.tool == 'rubber': ctx.strokeStyle = '#808' ctx.rect(X-zleft, Y-ztop, self.rubber_size, self.rubber_size) ctx.fill() def no_sel(ev): ev.preventDefault() ev.stopPropagation() # avoid default behaviour to select text when dragging mouse document.bind('mousedown', no_sel) document.bind('mousemove', no_sel) document.bind('touchmove', no_sel) def pick_rgb(ev, tool): div = ev.target x, y = panel.mouse_pos(ev) current = getattr(panel, tool).strip('#') color_elts = [current[i:i+2] for i in (0, 2, 4)] rgb = int(256*((x-div.abs_left)/div.width)) # move slider slider = div.get(selector='DIV')[0] slider.left = int(div.width*rgb/256) comp = hex(rgb)[2:] if len(comp)==1: comp = '0'+comp color_elts[div.num] = comp new_color = '#'+''.join(color_elts) setattr(panel, tool, new_color) color_buttons[tool].style.backgroundColor = new_color color_panel = None def pick_color(tool): global color_panel if color_panel is not None: print('remove color panel') color_panel.parent.remove(color_panel) color_panel = None return else: print('create color panel') color_panel = html.DIV(Class="color_panel") container <= color_panel color_panel.top = zwidth//10 color_panel.left = zwidth//10 color_panel.style.width = int(0.9*zwidth) color_panel.style.height = int(0.9*zwidth) color = getattr(panel, tool) print(color) for i, base_color in enumerate(['#ff0000', '#00ff00', '#0000ff']): div = html.DIV('&nbsp;', style=dict(position="absolute", left = int(0.05*zwidth), top = int((i+1)*0.2*zwidth), width = int(0.8*zwidth), backgroundColor = base_color, lineHeight = int(0.01*zwidth) ) ) div.num = i div.bind('click', lambda ev: pick_rgb(ev, tool)) color_panel <= div slider = html.DIV('&nbsp;', Class='slider') slider.width = zwidth//50 rgb = int(color[1:][2*i:2*i+2], 16) slider.left = int(div.width*rgb/256) div <= slider def select(tool): panel.tool = tool if tool=='pen': zone.style.cursor = 'default' elif tool=='select': zone.style.cursor = 'crosshair' elif tool=='rubber': zone.style.cursor = 'pointer' elif tool in ['color', 'bgcolor']: pick_color(tool) if tools_pos=='right': tools.top = container.top tools.left = container.left+container.offsetWidth+10 tools.style.width = "5em" tools.style.height = container.offsetHeight else: tools.top = container.top + container.offsetHeight+20 tools.left = container.left tools.style.width = container.offsetWidth tools.style.height = "2.5em" btn = html.BUTTON('&#9997;') btn.bind('click', lambda ev, tool='pen':select(tool)) tools <= btn btn = html.BUTTON('&#9744;') btn.bind('click', lambda ev, tool='select':select(tool)) #if tools_pos == 'right': # tools <= html.P() #tools <= btn btn = html.BUTTON('&curren;') btn.bind('click', lambda ev, tool='rubber':select(tool)) if tools_pos == 'right': tools <= html.P() tools <= btn color = '#000000' bgcolor = '#ffffff' btn_color = html.BUTTON('&nbsp;', style=dict(backgroundColor=color)) btn_color.bind('click', lambda ev, tool='color':select(tool)) if tools_pos == 'right': tools <= html.P() tools <= btn_color btn_bgcolor = html.BUTTON('&nbsp;', style=dict(backgroundColor=bgcolor)) btn_bgcolor.bind('click', lambda ev, tool='bgcolor':select(tool)) if tools_pos == 'right': tools <= html.P() tools <= btn_bgcolor color_buttons = {'color': btn_color, 'bgcolor': btn_bgcolor} panel = Panel(zone)
bsd-3-clause
e822743d1f8012c526145b3688c2228e
27.60076
75
0.580697
3.362539
false
false
false
false
brython-dev/brython
www/src/Lib/socket.py
1
37160
# Wrapper module for _socket, providing some additional facilities # implemented in Python. """\ This module provides socket operations and some related functions. On Unix, it supports IP (Internet Protocol) and Unix domain sockets. On other systems, it only supports IP. Functions specific for a socket are available as methods of the socket object. Functions: socket() -- create a new socket object socketpair() -- create a pair of new socket objects [*] fromfd() -- create a socket object from an open file descriptor [*] send_fds() -- Send file descriptor to the socket. recv_fds() -- Receive file descriptors from the socket. fromshare() -- create a socket object from data received from socket.share() [*] gethostname() -- return the current hostname gethostbyname() -- map a hostname to its IP number gethostbyaddr() -- map an IP number or hostname to DNS info getservbyname() -- map a service name and a protocol name to a port number getprotobyname() -- map a protocol name (e.g. 'tcp') to a number ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order htons(), htonl() -- convert 16, 32 bit int from host to network byte order inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89) socket.getdefaulttimeout() -- get the default timeout value socket.setdefaulttimeout() -- set the default timeout value create_connection() -- connects to an address, with an optional timeout and optional source address. [*] not available on all platforms! Special objects: SocketType -- type object for socket objects error -- exception raised for I/O errors has_ipv6 -- boolean value indicating if IPv6 is supported IntEnum constants: AF_INET, AF_UNIX -- socket domains (first argument to socket() call) SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument) Integer constants: Many other constants may be defined; these may be used in calls to the setsockopt() and getsockopt() methods. """ import _socket from _socket import * import os, sys, io, selectors from enum import IntEnum, IntFlag try: import errno except ImportError: errno = None EBADF = getattr(errno, 'EBADF', 9) EAGAIN = getattr(errno, 'EAGAIN', 11) EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11) __all__ = ["fromfd", "getfqdn", "create_connection", "create_server", "has_dualstack_ipv6", "AddressFamily", "SocketKind"] __all__.extend(os._get_exports_list(_socket)) # Set up the socket.AF_* socket.SOCK_* constants as members of IntEnums for # nicer string representations. # Note that _socket only knows about the integer values. The public interface # in this module understands the enums and translates them back from integers # where needed (e.g. .family property of a socket object). IntEnum._convert_( 'AddressFamily', __name__, lambda C: C.isupper() and C.startswith('AF_')) IntEnum._convert_( 'SocketKind', __name__, lambda C: C.isupper() and C.startswith('SOCK_')) IntFlag._convert_( 'MsgFlag', __name__, lambda C: C.isupper() and C.startswith('MSG_')) IntFlag._convert_( 'AddressInfo', __name__, lambda C: C.isupper() and C.startswith('AI_')) _LOCALHOST = '127.0.0.1' _LOCALHOST_V6 = '::1' def _intenum_converter(value, enum_klass): """Convert a numeric family value to an IntEnum member. If it's not a known member, return the numeric value itself. """ try: return enum_klass(value) except ValueError: return value # WSA error codes if sys.platform.lower().startswith("win"): errorTab = {} errorTab[6] = "Specified event object handle is invalid." errorTab[8] = "Insufficient memory available." errorTab[87] = "One or more parameters are invalid." errorTab[995] = "Overlapped operation aborted." errorTab[996] = "Overlapped I/O event object not in signaled state." errorTab[997] = "Overlapped operation will complete later." errorTab[10004] = "The operation was interrupted." errorTab[10009] = "A bad file handle was passed." errorTab[10013] = "Permission denied." errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT errorTab[10022] = "An invalid operation was attempted." errorTab[10024] = "Too many open files." errorTab[10035] = "The socket operation would block." errorTab[10036] = "A blocking operation is already in progress." errorTab[10037] = "Operation already in progress." errorTab[10038] = "Socket operation on nonsocket." errorTab[10039] = "Destination address required." errorTab[10040] = "Message too long." errorTab[10041] = "Protocol wrong type for socket." errorTab[10042] = "Bad protocol option." errorTab[10043] = "Protocol not supported." errorTab[10044] = "Socket type not supported." errorTab[10045] = "Operation not supported." errorTab[10046] = "Protocol family not supported." errorTab[10047] = "Address family not supported by protocol family." errorTab[10048] = "The network address is in use." errorTab[10049] = "Cannot assign requested address." errorTab[10050] = "Network is down." errorTab[10051] = "Network is unreachable." errorTab[10052] = "Network dropped connection on reset." errorTab[10053] = "Software caused connection abort." errorTab[10054] = "The connection has been reset." errorTab[10055] = "No buffer space available." errorTab[10056] = "Socket is already connected." errorTab[10057] = "Socket is not connected." errorTab[10058] = "The network has been shut down." errorTab[10059] = "Too many references." errorTab[10060] = "The operation timed out." errorTab[10061] = "Connection refused." errorTab[10062] = "Cannot translate name." errorTab[10063] = "The name is too long." errorTab[10064] = "The host is down." errorTab[10065] = "The host is unreachable." errorTab[10066] = "Directory not empty." errorTab[10067] = "Too many processes." errorTab[10068] = "User quota exceeded." errorTab[10069] = "Disk quota exceeded." errorTab[10070] = "Stale file handle reference." errorTab[10071] = "Item is remote." errorTab[10091] = "Network subsystem is unavailable." errorTab[10092] = "Winsock.dll version out of range." errorTab[10093] = "Successful WSAStartup not yet performed." errorTab[10101] = "Graceful shutdown in progress." errorTab[10102] = "No more results from WSALookupServiceNext." errorTab[10103] = "Call has been canceled." errorTab[10104] = "Procedure call table is invalid." errorTab[10105] = "Service provider is invalid." errorTab[10106] = "Service provider failed to initialize." errorTab[10107] = "System call failure." errorTab[10108] = "Service not found." errorTab[10109] = "Class type not found." errorTab[10110] = "No more results from WSALookupServiceNext." errorTab[10111] = "Call was canceled." errorTab[10112] = "Database query was refused." errorTab[11001] = "Host not found." errorTab[11002] = "Nonauthoritative host not found." errorTab[11003] = "This is a nonrecoverable error." errorTab[11004] = "Valid name, no data record requested type." errorTab[11005] = "QoS receivers." errorTab[11006] = "QoS senders." errorTab[11007] = "No QoS senders." errorTab[11008] = "QoS no receivers." errorTab[11009] = "QoS request confirmed." errorTab[11010] = "QoS admission error." errorTab[11011] = "QoS policy failure." errorTab[11012] = "QoS bad style." errorTab[11013] = "QoS bad object." errorTab[11014] = "QoS traffic control error." errorTab[11015] = "QoS generic error." errorTab[11016] = "QoS service type error." errorTab[11017] = "QoS flowspec error." errorTab[11018] = "Invalid QoS provider buffer." errorTab[11019] = "Invalid QoS filter style." errorTab[11020] = "Invalid QoS filter style." errorTab[11021] = "Incorrect QoS filter count." errorTab[11022] = "Invalid QoS object length." errorTab[11023] = "Incorrect QoS flow count." errorTab[11024] = "Unrecognized QoS object." errorTab[11025] = "Invalid QoS policy object." errorTab[11026] = "Invalid QoS flow descriptor." errorTab[11027] = "Invalid QoS provider-specific flowspec." errorTab[11028] = "Invalid QoS provider-specific filterspec." errorTab[11029] = "Invalid QoS shape discard mode object." errorTab[11030] = "Invalid QoS shaping rate object." errorTab[11031] = "Reserved policy QoS element type." __all__.append("errorTab") class _GiveupOnSendfile(Exception): pass class socket(_socket.socket): """A subclass of _socket.socket adding the makefile() method.""" __slots__ = ["__weakref__", "_io_refs", "_closed"] def __init__(self, family=-1, type=-1, proto=-1, fileno=None): # For user code address family and type values are IntEnum members, but # for the underlying _socket.socket they're just integers. The # constructor of _socket.socket converts the given argument to an # integer automatically. if fileno is None: if family == -1: family = AF_INET if type == -1: type = SOCK_STREAM if proto == -1: proto = 0 _socket.socket.__init__(self, family, type, proto, fileno) self._io_refs = 0 self._closed = False def __enter__(self): return self def __exit__(self, *args): if not self._closed: self.close() def __repr__(self): """Wrap __repr__() to reveal the real class name and socket address(es). """ closed = getattr(self, '_closed', False) s = "<%s.%s%s fd=%i, family=%s, type=%s, proto=%i" \ % (self.__class__.__module__, self.__class__.__qualname__, " [closed]" if closed else "", self.fileno(), self.family, self.type, self.proto) if not closed: try: laddr = self.getsockname() if laddr: s += ", laddr=%s" % str(laddr) except error: pass try: raddr = self.getpeername() if raddr: s += ", raddr=%s" % str(raddr) except error: pass s += '>' return s def __getstate__(self): raise TypeError(f"cannot pickle {self.__class__.__name__!r} object") def dup(self): """dup() -> socket object Duplicate the socket. Return a new socket object connected to the same system resource. The new socket is non-inheritable. """ fd = dup(self.fileno()) sock = self.__class__(self.family, self.type, self.proto, fileno=fd) sock.settimeout(self.gettimeout()) return sock def accept(self): """accept() -> (socket object, address info) Wait for an incoming connection. Return a new socket representing the connection, and the address of the client. For IP sockets, the address info is a pair (hostaddr, port). """ fd, addr = self._accept() sock = socket(self.family, self.type, self.proto, fileno=fd) # Issue #7995: if no default timeout is set and the listening # socket had a (non-zero) timeout, force the new socket in blocking # mode to override platform-specific socket flags inheritance. if getdefaulttimeout() is None and self.gettimeout(): sock.setblocking(True) return sock, addr def makefile(self, mode="r", buffering=None, *, encoding=None, errors=None, newline=None): """makefile(...) -> an I/O stream connected to the socket The arguments are as for io.open() after the filename, except the only supported mode values are 'r' (default), 'w' and 'b'. """ # XXX refactor to share code? if not set(mode) <= {"r", "w", "b"}: raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,)) writing = "w" in mode reading = "r" in mode or not writing assert reading or writing binary = "b" in mode rawmode = "" if reading: rawmode += "r" if writing: rawmode += "w" raw = SocketIO(self, rawmode) self._io_refs += 1 if buffering is None: buffering = -1 if buffering < 0: buffering = io.DEFAULT_BUFFER_SIZE if buffering == 0: if not binary: raise ValueError("unbuffered streams must be binary") return raw if reading and writing: buffer = io.BufferedRWPair(raw, raw, buffering) elif reading: buffer = io.BufferedReader(raw, buffering) else: assert writing buffer = io.BufferedWriter(raw, buffering) if binary: return buffer encoding = io.text_encoding(encoding) text = io.TextIOWrapper(buffer, encoding, errors, newline) text.mode = mode return text if hasattr(os, 'sendfile'): def _sendfile_use_sendfile(self, file, offset=0, count=None): self._check_sendfile_params(file, offset, count) sockno = self.fileno() try: fileno = file.fileno() except (AttributeError, io.UnsupportedOperation) as err: raise _GiveupOnSendfile(err) # not a regular file try: fsize = os.fstat(fileno).st_size except OSError as err: raise _GiveupOnSendfile(err) # not a regular file if not fsize: return 0 # empty file # Truncate to 1GiB to avoid OverflowError, see bpo-38319. blocksize = min(count or fsize, 2 ** 30) timeout = self.gettimeout() if timeout == 0: raise ValueError("non-blocking sockets are not supported") # poll/select have the advantage of not requiring any # extra file descriptor, contrarily to epoll/kqueue # (also, they require a single syscall). if hasattr(selectors, 'PollSelector'): selector = selectors.PollSelector() else: selector = selectors.SelectSelector() selector.register(sockno, selectors.EVENT_WRITE) total_sent = 0 # localize variable access to minimize overhead selector_select = selector.select os_sendfile = os.sendfile try: while True: if timeout and not selector_select(timeout): raise TimeoutError('timed out') if count: blocksize = count - total_sent if blocksize <= 0: break try: sent = os_sendfile(sockno, fileno, offset, blocksize) except BlockingIOError: if not timeout: # Block until the socket is ready to send some # data; avoids hogging CPU resources. selector_select() continue except OSError as err: if total_sent == 0: # We can get here for different reasons, the main # one being 'file' is not a regular mmap(2)-like # file, in which case we'll fall back on using # plain send(). raise _GiveupOnSendfile(err) raise err from None else: if sent == 0: break # EOF offset += sent total_sent += sent return total_sent finally: if total_sent > 0 and hasattr(file, 'seek'): file.seek(offset) else: def _sendfile_use_sendfile(self, file, offset=0, count=None): raise _GiveupOnSendfile( "os.sendfile() not available on this platform") def _sendfile_use_send(self, file, offset=0, count=None): self._check_sendfile_params(file, offset, count) if self.gettimeout() == 0: raise ValueError("non-blocking sockets are not supported") if offset: file.seek(offset) blocksize = min(count, 8192) if count else 8192 total_sent = 0 # localize variable access to minimize overhead file_read = file.read sock_send = self.send try: while True: if count: blocksize = min(count - total_sent, blocksize) if blocksize <= 0: break data = memoryview(file_read(blocksize)) if not data: break # EOF while True: try: sent = sock_send(data) except BlockingIOError: continue else: total_sent += sent if sent < len(data): data = data[sent:] else: break return total_sent finally: if total_sent > 0 and hasattr(file, 'seek'): file.seek(offset + total_sent) def _check_sendfile_params(self, file, offset, count): if 'b' not in getattr(file, 'mode', 'b'): raise ValueError("file should be opened in binary mode") if not self.type & SOCK_STREAM: raise ValueError("only SOCK_STREAM type sockets are supported") if count is not None: if not isinstance(count, int): raise TypeError( "count must be a positive integer (got {!r})".format(count)) if count <= 0: raise ValueError( "count must be a positive integer (got {!r})".format(count)) def sendfile(self, file, offset=0, count=None): """sendfile(file[, offset[, count]]) -> sent Send a file until EOF is reached by using high-performance os.sendfile() and return the total number of bytes which were sent. *file* must be a regular file object opened in binary mode. If os.sendfile() is not available (e.g. Windows) or file is not a regular file socket.send() will be used instead. *offset* tells from where to start reading the file. If specified, *count* is the total number of bytes to transmit as opposed to sending the file until EOF is reached. File position is updated on return or also in case of error in which case file.tell() can be used to figure out the number of bytes which were sent. The socket must be of SOCK_STREAM type. Non-blocking sockets are not supported. """ try: return self._sendfile_use_sendfile(file, offset, count) except _GiveupOnSendfile: return self._sendfile_use_send(file, offset, count) def _decref_socketios(self): if self._io_refs > 0: self._io_refs -= 1 if self._closed: self.close() def _real_close(self, _ss=_socket.socket): # This function should not reference any globals. See issue #808164. _ss.close(self) def close(self): # This function should not reference any globals. See issue #808164. self._closed = True if self._io_refs <= 0: self._real_close() def detach(self): """detach() -> file descriptor Close the socket object without closing the underlying file descriptor. The object cannot be used after this call, but the file descriptor can be reused for other purposes. The file descriptor is returned. """ self._closed = True return super().detach() @property def family(self): """Read-only access to the address family for this socket. """ return _intenum_converter(super().family, AddressFamily) @property def type(self): """Read-only access to the socket type. """ return _intenum_converter(super().type, SocketKind) if os.name == 'nt': def get_inheritable(self): return os.get_handle_inheritable(self.fileno()) def set_inheritable(self, inheritable): os.set_handle_inheritable(self.fileno(), inheritable) else: def get_inheritable(self): return os.get_inheritable(self.fileno()) def set_inheritable(self, inheritable): os.set_inheritable(self.fileno(), inheritable) get_inheritable.__doc__ = "Get the inheritable flag of the socket" set_inheritable.__doc__ = "Set the inheritable flag of the socket" def fromfd(fd, family, type, proto=0): """ fromfd(fd, family, type[, proto]) -> socket object Create a socket object from a duplicate of the given file descriptor. The remaining arguments are the same as for socket(). """ nfd = dup(fd) return socket(family, type, proto, nfd) if hasattr(_socket.socket, "sendmsg"): import array def send_fds(sock, buffers, fds, flags=0, address=None): """ send_fds(sock, buffers, fds[, flags[, address]]) -> integer Send the list of file descriptors fds over an AF_UNIX socket. """ return sock.sendmsg(buffers, [(_socket.SOL_SOCKET, _socket.SCM_RIGHTS, array.array("i", fds))]) __all__.append("send_fds") if hasattr(_socket.socket, "recvmsg"): import array def recv_fds(sock, bufsize, maxfds, flags=0): """ recv_fds(sock, bufsize, maxfds[, flags]) -> (data, list of file descriptors, msg_flags, address) Receive up to maxfds file descriptors returning the message data and a list containing the descriptors. """ # Array of ints fds = array.array("i") msg, ancdata, flags, addr = sock.recvmsg(bufsize, _socket.CMSG_LEN(maxfds * fds.itemsize)) for cmsg_level, cmsg_type, cmsg_data in ancdata: if (cmsg_level == _socket.SOL_SOCKET and cmsg_type == _socket.SCM_RIGHTS): fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]) return msg, list(fds), flags, addr __all__.append("recv_fds") if hasattr(_socket.socket, "share"): def fromshare(info): """ fromshare(info) -> socket object Create a socket object from the bytes object returned by socket.share(pid). """ return socket(0, 0, 0, info) __all__.append("fromshare") if hasattr(_socket, "socketpair"): def socketpair(family=None, type=SOCK_STREAM, proto=0): """socketpair([family[, type[, proto]]]) -> (socket object, socket object) Create a pair of socket objects from the sockets returned by the platform socketpair() function. The arguments are the same as for socket() except the default family is AF_UNIX if defined on the platform; otherwise, the default is AF_INET. """ if family is None: try: family = AF_UNIX except NameError: family = AF_INET a, b = _socket.socketpair(family, type, proto) a = socket(family, type, proto, a.detach()) b = socket(family, type, proto, b.detach()) return a, b else: # Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain. def socketpair(family=AF_INET, type=SOCK_STREAM, proto=0): if family == AF_INET: host = _LOCALHOST elif family == AF_INET6: host = _LOCALHOST_V6 else: raise ValueError("Only AF_INET and AF_INET6 socket address families " "are supported") if type != SOCK_STREAM: raise ValueError("Only SOCK_STREAM socket type is supported") if proto != 0: raise ValueError("Only protocol zero is supported") # We create a connected TCP socket. Note the trick with # setblocking(False) that prevents us from having to create a thread. lsock = socket(family, type, proto) try: lsock.bind((host, 0)) lsock.listen() # On IPv6, ignore flow_info and scope_id addr, port = lsock.getsockname()[:2] csock = socket(family, type, proto) try: csock.setblocking(False) try: csock.connect((addr, port)) except (BlockingIOError, InterruptedError): pass csock.setblocking(True) ssock, _ = lsock.accept() except: csock.close() raise finally: lsock.close() return (ssock, csock) __all__.append("socketpair") socketpair.__doc__ = """socketpair([family[, type[, proto]]]) -> (socket object, socket object) Create a pair of socket objects from the sockets returned by the platform socketpair() function. The arguments are the same as for socket() except the default family is AF_UNIX if defined on the platform; otherwise, the default is AF_INET. """ _blocking_errnos = { EAGAIN, EWOULDBLOCK } class SocketIO(io.RawIOBase): """Raw I/O implementation for stream sockets. This class supports the makefile() method on sockets. It provides the raw I/O interface on top of a socket object. """ # One might wonder why not let FileIO do the job instead. There are two # main reasons why FileIO is not adapted: # - it wouldn't work under Windows (where you can't used read() and # write() on a socket handle) # - it wouldn't work with socket timeouts (FileIO would ignore the # timeout and consider the socket non-blocking) # XXX More docs def __init__(self, sock, mode): if mode not in ("r", "w", "rw", "rb", "wb", "rwb"): raise ValueError("invalid mode: %r" % mode) io.RawIOBase.__init__(self) self._sock = sock if "b" not in mode: mode += "b" self._mode = mode self._reading = "r" in mode self._writing = "w" in mode self._timeout_occurred = False def readinto(self, b): """Read up to len(b) bytes into the writable buffer *b* and return the number of bytes read. If the socket is non-blocking and no bytes are available, None is returned. If *b* is non-empty, a 0 return value indicates that the connection was shutdown at the other end. """ self._checkClosed() self._checkReadable() if self._timeout_occurred: raise OSError("cannot read from timed out object") while True: try: return self._sock.recv_into(b) except timeout: self._timeout_occurred = True raise except error as e: if e.errno in _blocking_errnos: return None raise def write(self, b): """Write the given bytes or bytearray object *b* to the socket and return the number of bytes written. This can be less than len(b) if not all data could be written. If the socket is non-blocking and no bytes could be written None is returned. """ self._checkClosed() self._checkWritable() try: return self._sock.send(b) except error as e: # XXX what about EINTR? if e.errno in _blocking_errnos: return None raise def readable(self): """True if the SocketIO is open for reading. """ if self.closed: raise ValueError("I/O operation on closed socket.") return self._reading def writable(self): """True if the SocketIO is open for writing. """ if self.closed: raise ValueError("I/O operation on closed socket.") return self._writing def seekable(self): """True if the SocketIO is open for seeking. """ if self.closed: raise ValueError("I/O operation on closed socket.") return super().seekable() def fileno(self): """Return the file descriptor of the underlying socket. """ self._checkClosed() return self._sock.fileno() @property def name(self): if not self.closed: return self.fileno() else: return -1 @property def mode(self): return self._mode def close(self): """Close the SocketIO object. This doesn't close the underlying socket, except if all references to it have disappeared. """ if self.closed: return io.RawIOBase.close(self) self._sock._decref_socketios() self._sock = None def getfqdn(name=''): """Get fully qualified domain name from name. An empty argument is interpreted as meaning the local host. First the hostname returned by gethostbyaddr() is checked, then possibly existing aliases. In case no FQDN is available and `name` was given, it is returned unchanged. If `name` was empty or '0.0.0.0', hostname from gethostname() is returned. """ name = name.strip() if not name or name == '0.0.0.0': name = gethostname() try: hostname, aliases, ipaddrs = gethostbyaddr(name) except error: pass else: aliases.insert(0, hostname) for name in aliases: if '.' in name: break else: name = hostname return name _GLOBAL_DEFAULT_TIMEOUT = object() def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None, *, all_errors=False): """Connect to *address* and return the socket object. Convenience function. Connect to *address* (a 2-tuple ``(host, port)``) and return the socket object. Passing the optional *timeout* parameter will set the timeout on the socket instance before attempting to connect. If no *timeout* is supplied, the global default timeout setting returned by :func:`getdefaulttimeout` is used. If *source_address* is set it must be a tuple of (host, port) for the socket to bind as a source address before making the connection. A host of '' or port 0 tells the OS to use the default. When a connection cannot be created, raises the last error if *all_errors* is False, and an ExceptionGroup of all errors if *all_errors* is True. """ host, port = address exceptions = [] for res in getaddrinfo(host, port, 0, SOCK_STREAM): af, socktype, proto, canonname, sa = res sock = None try: sock = socket(af, socktype, proto) if timeout is not _GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(timeout) if source_address: sock.bind(source_address) sock.connect(sa) # Break explicitly a reference cycle exceptions.clear() return sock except error as exc: if not all_errors: exceptions.clear() # raise only the last error exceptions.append(exc) if sock is not None: sock.close() if len(exceptions): try: if not all_errors: raise exceptions[0] raise ExceptionGroup("create_connection failed", exceptions) finally: # Break explicitly a reference cycle exceptions.clear() else: raise error("getaddrinfo returns an empty list") def has_dualstack_ipv6(): """Return True if the platform supports creating a SOCK_STREAM socket which can handle both AF_INET and AF_INET6 (IPv4 / IPv6) connections. """ if not has_ipv6 \ or not hasattr(_socket, 'IPPROTO_IPV6') \ or not hasattr(_socket, 'IPV6_V6ONLY'): return False try: with socket(AF_INET6, SOCK_STREAM) as sock: sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 0) return True except error: return False def create_server(address, *, family=AF_INET, backlog=None, reuse_port=False, dualstack_ipv6=False): """Convenience function which creates a SOCK_STREAM type socket bound to *address* (a 2-tuple (host, port)) and return the socket object. *family* should be either AF_INET or AF_INET6. *backlog* is the queue size passed to socket.listen(). *reuse_port* dictates whether to use the SO_REUSEPORT socket option. *dualstack_ipv6*: if true and the platform supports it, it will create an AF_INET6 socket able to accept both IPv4 or IPv6 connections. When false it will explicitly disable this option on platforms that enable it by default (e.g. Linux). >>> with create_server(('', 8000)) as server: ... while True: ... conn, addr = server.accept() ... # handle new connection """ if reuse_port and not hasattr(_socket, "SO_REUSEPORT"): raise ValueError("SO_REUSEPORT not supported on this platform") if dualstack_ipv6: if not has_dualstack_ipv6(): raise ValueError("dualstack_ipv6 not supported on this platform") if family != AF_INET6: raise ValueError("dualstack_ipv6 requires AF_INET6 family") sock = socket(family, SOCK_STREAM) try: # Note about Windows. We don't set SO_REUSEADDR because: # 1) It's unnecessary: bind() will succeed even in case of a # previous closed socket on the same address and still in # TIME_WAIT state. # 2) If set, another socket is free to bind() on the same # address, effectively preventing this one from accepting # connections. Also, it may set the process in a state where # it'll no longer respond to any signals or graceful kills. # See: msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx if os.name not in ('nt', 'cygwin') and \ hasattr(_socket, 'SO_REUSEADDR'): try: sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) except error: # Fail later on bind(), for platforms which may not # support this option. pass if reuse_port: sock.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1) if has_ipv6 and family == AF_INET6: if dualstack_ipv6: sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 0) elif hasattr(_socket, "IPV6_V6ONLY") and \ hasattr(_socket, "IPPROTO_IPV6"): sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 1) try: sock.bind(address) except error as err: msg = '%s (while attempting to bind on address %r)' % \ (err.strerror, address) raise error(err.errno, msg) from None if backlog is None: sock.listen() else: sock.listen(backlog) return sock except error: sock.close() raise def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): """Resolve host and port into list of address info entries. Translate the host/port argument into a sequence of 5-tuples that contain all the necessary arguments for creating a socket connected to that service. host is a domain name, a string representation of an IPv4/v6 address or None. port is a string service name such as 'http', a numeric port number or None. By passing None as the value of host and port, you can pass NULL to the underlying C API. The family, type and proto arguments can be optionally specified in order to narrow the list of addresses returned. Passing zero as a value for each of these arguments selects the full range of results. """ # We override this function since we want to translate the numeric family # and socket type values to enum constants. addrlist = [] for res in _socket.getaddrinfo(host, port, family, type, proto, flags): af, socktype, proto, canonname, sa = res addrlist.append((_intenum_converter(af, AddressFamily), _intenum_converter(socktype, SocketKind), proto, canonname, sa)) return addrlist
bsd-3-clause
0213a9f53914dfe8e7f469042dbf5864
37.467909
95
0.595533
4.142236
false
false
false
false
brython-dev/brython
www/src/Lib/email/feedparser.py
30
22780
# Copyright (C) 2004-2006 Python Software Foundation # Authors: Baxter, Wouters and Warsaw # Contact: email-sig@python.org """FeedParser - An email feed parser. The feed parser implements an interface for incrementally parsing an email message, line by line. This has advantages for certain applications, such as those reading email messages off a socket. FeedParser.feed() is the primary interface for pushing new data into the parser. It returns when there's nothing more it can do with the available data. When you have no more data to push into the parser, call .close(). This completes the parsing and returns the root message object. The other advantage of this parser is that it will never raise a parsing exception. Instead, when it finds something unexpected, it adds a 'defect' to the current message. Defects are just instances that live on the message object's .defects attribute. """ __all__ = ['FeedParser', 'BytesFeedParser'] import re from email import errors from email._policybase import compat32 from collections import deque from io import StringIO NLCRE = re.compile(r'\r\n|\r|\n') NLCRE_bol = re.compile(r'(\r\n|\r|\n)') NLCRE_eol = re.compile(r'(\r\n|\r|\n)\Z') NLCRE_crack = re.compile(r'(\r\n|\r|\n)') # RFC 2822 $3.6.8 Optional fields. ftext is %d33-57 / %d59-126, Any character # except controls, SP, and ":". headerRE = re.compile(r'^(From |[\041-\071\073-\176]*:|[\t ])') EMPTYSTRING = '' NL = '\n' NeedMoreData = object() class BufferedSubFile(object): """A file-ish object that can have new data loaded into it. You can also push and pop line-matching predicates onto a stack. When the current predicate matches the current line, a false EOF response (i.e. empty string) is returned instead. This lets the parser adhere to a simple abstraction -- it parses until EOF closes the current message. """ def __init__(self): # Text stream of the last partial line pushed into this object. # See issue 22233 for why this is a text stream and not a list. self._partial = StringIO(newline='') # A deque of full, pushed lines self._lines = deque() # The stack of false-EOF checking predicates. self._eofstack = [] # A flag indicating whether the file has been closed or not. self._closed = False def push_eof_matcher(self, pred): self._eofstack.append(pred) def pop_eof_matcher(self): return self._eofstack.pop() def close(self): # Don't forget any trailing partial line. self._partial.seek(0) self.pushlines(self._partial.readlines()) self._partial.seek(0) self._partial.truncate() self._closed = True def readline(self): if not self._lines: if self._closed: return '' return NeedMoreData # Pop the line off the stack and see if it matches the current # false-EOF predicate. line = self._lines.popleft() # RFC 2046, section 5.1.2 requires us to recognize outer level # boundaries at any level of inner nesting. Do this, but be sure it's # in the order of most to least nested. for ateof in reversed(self._eofstack): if ateof(line): # We're at the false EOF. But push the last line back first. self._lines.appendleft(line) return '' return line def unreadline(self, line): # Let the consumer push a line back into the buffer. assert line is not NeedMoreData self._lines.appendleft(line) def push(self, data): """Push some new data into this object.""" self._partial.write(data) if '\n' not in data and '\r' not in data: # No new complete lines, wait for more. return # Crack into lines, preserving the linesep characters. self._partial.seek(0) parts = self._partial.readlines() self._partial.seek(0) self._partial.truncate() # If the last element of the list does not end in a newline, then treat # it as a partial line. We only check for '\n' here because a line # ending with '\r' might be a line that was split in the middle of a # '\r\n' sequence (see bugs 1555570 and 1721862). if not parts[-1].endswith('\n'): self._partial.write(parts.pop()) self.pushlines(parts) def pushlines(self, lines): self._lines.extend(lines) def __iter__(self): return self def __next__(self): line = self.readline() if line == '': raise StopIteration return line class FeedParser: """A feed-style parser of email.""" def __init__(self, _factory=None, *, policy=compat32): """_factory is called with no arguments to create a new message obj The policy keyword specifies a policy object that controls a number of aspects of the parser's operation. The default policy maintains backward compatibility. """ self.policy = policy self._old_style_factory = False if _factory is None: if policy.message_factory is None: from email.message import Message self._factory = Message else: self._factory = policy.message_factory else: self._factory = _factory try: _factory(policy=self.policy) except TypeError: # Assume this is an old-style factory self._old_style_factory = True self._input = BufferedSubFile() self._msgstack = [] self._parse = self._parsegen().__next__ self._cur = None self._last = None self._headersonly = False # Non-public interface for supporting Parser's headersonly flag def _set_headersonly(self): self._headersonly = True def feed(self, data): """Push more data into the parser.""" self._input.push(data) self._call_parse() def _call_parse(self): try: self._parse() except StopIteration: pass def close(self): """Parse all remaining data and return the root message object.""" self._input.close() self._call_parse() root = self._pop_message() assert not self._msgstack # Look for final set of defects if root.get_content_maintype() == 'multipart' \ and not root.is_multipart(): defect = errors.MultipartInvariantViolationDefect() self.policy.handle_defect(root, defect) return root def _new_message(self): if self._old_style_factory: msg = self._factory() else: msg = self._factory(policy=self.policy) if self._cur and self._cur.get_content_type() == 'multipart/digest': msg.set_default_type('message/rfc822') if self._msgstack: self._msgstack[-1].attach(msg) self._msgstack.append(msg) self._cur = msg self._last = msg def _pop_message(self): retval = self._msgstack.pop() if self._msgstack: self._cur = self._msgstack[-1] else: self._cur = None return retval def _parsegen(self): # Create a new message and start by parsing headers. self._new_message() headers = [] # Collect the headers, searching for a line that doesn't match the RFC # 2822 header or continuation pattern (including an empty line). for line in self._input: if line is NeedMoreData: yield NeedMoreData continue if not headerRE.match(line): # If we saw the RFC defined header/body separator # (i.e. newline), just throw it away. Otherwise the line is # part of the body so push it back. if not NLCRE.match(line): defect = errors.MissingHeaderBodySeparatorDefect() self.policy.handle_defect(self._cur, defect) self._input.unreadline(line) break headers.append(line) # Done with the headers, so parse them and figure out what we're # supposed to see in the body of the message. self._parse_headers(headers) # Headers-only parsing is a backwards compatibility hack, which was # necessary in the older parser, which could raise errors. All # remaining lines in the input are thrown into the message body. if self._headersonly: lines = [] while True: line = self._input.readline() if line is NeedMoreData: yield NeedMoreData continue if line == '': break lines.append(line) self._cur.set_payload(EMPTYSTRING.join(lines)) return if self._cur.get_content_type() == 'message/delivery-status': # message/delivery-status contains blocks of headers separated by # a blank line. We'll represent each header block as a separate # nested message object, but the processing is a bit different # than standard message/* types because there is no body for the # nested messages. A blank line separates the subparts. while True: self._input.push_eof_matcher(NLCRE.match) for retval in self._parsegen(): if retval is NeedMoreData: yield NeedMoreData continue break msg = self._pop_message() # We need to pop the EOF matcher in order to tell if we're at # the end of the current file, not the end of the last block # of message headers. self._input.pop_eof_matcher() # The input stream must be sitting at the newline or at the # EOF. We want to see if we're at the end of this subpart, so # first consume the blank line, then test the next line to see # if we're at this subpart's EOF. while True: line = self._input.readline() if line is NeedMoreData: yield NeedMoreData continue break while True: line = self._input.readline() if line is NeedMoreData: yield NeedMoreData continue break if line == '': break # Not at EOF so this is a line we're going to need. self._input.unreadline(line) return if self._cur.get_content_maintype() == 'message': # The message claims to be a message/* type, then what follows is # another RFC 2822 message. for retval in self._parsegen(): if retval is NeedMoreData: yield NeedMoreData continue break self._pop_message() return if self._cur.get_content_maintype() == 'multipart': boundary = self._cur.get_boundary() if boundary is None: # The message /claims/ to be a multipart but it has not # defined a boundary. That's a problem which we'll handle by # reading everything until the EOF and marking the message as # defective. defect = errors.NoBoundaryInMultipartDefect() self.policy.handle_defect(self._cur, defect) lines = [] for line in self._input: if line is NeedMoreData: yield NeedMoreData continue lines.append(line) self._cur.set_payload(EMPTYSTRING.join(lines)) return # Make sure a valid content type was specified per RFC 2045:6.4. if (str(self._cur.get('content-transfer-encoding', '8bit')).lower() not in ('7bit', '8bit', 'binary')): defect = errors.InvalidMultipartContentTransferEncodingDefect() self.policy.handle_defect(self._cur, defect) # Create a line match predicate which matches the inter-part # boundary as well as the end-of-multipart boundary. Don't push # this onto the input stream until we've scanned past the # preamble. separator = '--' + boundary boundaryre = re.compile( '(?P<sep>' + re.escape(separator) + r')(?P<end>--)?(?P<ws>[ \t]*)(?P<linesep>\r\n|\r|\n)?$') capturing_preamble = True preamble = [] linesep = False close_boundary_seen = False while True: line = self._input.readline() if line is NeedMoreData: yield NeedMoreData continue if line == '': break mo = boundaryre.match(line) if mo: # If we're looking at the end boundary, we're done with # this multipart. If there was a newline at the end of # the closing boundary, then we need to initialize the # epilogue with the empty string (see below). if mo.group('end'): close_boundary_seen = True linesep = mo.group('linesep') break # We saw an inter-part boundary. Were we in the preamble? if capturing_preamble: if preamble: # According to RFC 2046, the last newline belongs # to the boundary. lastline = preamble[-1] eolmo = NLCRE_eol.search(lastline) if eolmo: preamble[-1] = lastline[:-len(eolmo.group(0))] self._cur.preamble = EMPTYSTRING.join(preamble) capturing_preamble = False self._input.unreadline(line) continue # We saw a boundary separating two parts. Consume any # multiple boundary lines that may be following. Our # interpretation of RFC 2046 BNF grammar does not produce # body parts within such double boundaries. while True: line = self._input.readline() if line is NeedMoreData: yield NeedMoreData continue mo = boundaryre.match(line) if not mo: self._input.unreadline(line) break # Recurse to parse this subpart; the input stream points # at the subpart's first line. self._input.push_eof_matcher(boundaryre.match) for retval in self._parsegen(): if retval is NeedMoreData: yield NeedMoreData continue break # Because of RFC 2046, the newline preceding the boundary # separator actually belongs to the boundary, not the # previous subpart's payload (or epilogue if the previous # part is a multipart). if self._last.get_content_maintype() == 'multipart': epilogue = self._last.epilogue if epilogue == '': self._last.epilogue = None elif epilogue is not None: mo = NLCRE_eol.search(epilogue) if mo: end = len(mo.group(0)) self._last.epilogue = epilogue[:-end] else: payload = self._last._payload if isinstance(payload, str): mo = NLCRE_eol.search(payload) if mo: payload = payload[:-len(mo.group(0))] self._last._payload = payload self._input.pop_eof_matcher() self._pop_message() # Set the multipart up for newline cleansing, which will # happen if we're in a nested multipart. self._last = self._cur else: # I think we must be in the preamble assert capturing_preamble preamble.append(line) # We've seen either the EOF or the end boundary. If we're still # capturing the preamble, we never saw the start boundary. Note # that as a defect and store the captured text as the payload. if capturing_preamble: defect = errors.StartBoundaryNotFoundDefect() self.policy.handle_defect(self._cur, defect) self._cur.set_payload(EMPTYSTRING.join(preamble)) epilogue = [] for line in self._input: if line is NeedMoreData: yield NeedMoreData continue self._cur.epilogue = EMPTYSTRING.join(epilogue) return # If we're not processing the preamble, then we might have seen # EOF without seeing that end boundary...that is also a defect. if not close_boundary_seen: defect = errors.CloseBoundaryNotFoundDefect() self.policy.handle_defect(self._cur, defect) return # Everything from here to the EOF is epilogue. If the end boundary # ended in a newline, we'll need to make sure the epilogue isn't # None if linesep: epilogue = [''] else: epilogue = [] for line in self._input: if line is NeedMoreData: yield NeedMoreData continue epilogue.append(line) # Any CRLF at the front of the epilogue is not technically part of # the epilogue. Also, watch out for an empty string epilogue, # which means a single newline. if epilogue: firstline = epilogue[0] bolmo = NLCRE_bol.match(firstline) if bolmo: epilogue[0] = firstline[len(bolmo.group(0)):] self._cur.epilogue = EMPTYSTRING.join(epilogue) return # Otherwise, it's some non-multipart type, so the entire rest of the # file contents becomes the payload. lines = [] for line in self._input: if line is NeedMoreData: yield NeedMoreData continue lines.append(line) self._cur.set_payload(EMPTYSTRING.join(lines)) def _parse_headers(self, lines): # Passed a list of lines that make up the headers for the current msg lastheader = '' lastvalue = [] for lineno, line in enumerate(lines): # Check for continuation if line[0] in ' \t': if not lastheader: # The first line of the headers was a continuation. This # is illegal, so let's note the defect, store the illegal # line, and ignore it for purposes of headers. defect = errors.FirstHeaderLineIsContinuationDefect(line) self.policy.handle_defect(self._cur, defect) continue lastvalue.append(line) continue if lastheader: self._cur.set_raw(*self.policy.header_source_parse(lastvalue)) lastheader, lastvalue = '', [] # Check for envelope header, i.e. unix-from if line.startswith('From '): if lineno == 0: # Strip off the trailing newline mo = NLCRE_eol.search(line) if mo: line = line[:-len(mo.group(0))] self._cur.set_unixfrom(line) continue elif lineno == len(lines) - 1: # Something looking like a unix-from at the end - it's # probably the first line of the body, so push back the # line and stop. self._input.unreadline(line) return else: # Weirdly placed unix-from line. Note this as a defect # and ignore it. defect = errors.MisplacedEnvelopeHeaderDefect(line) self._cur.defects.append(defect) continue # Split the line on the colon separating field name from value. # There will always be a colon, because if there wasn't the part of # the parser that calls us would have started parsing the body. i = line.find(':') # If the colon is on the start of the line the header is clearly # malformed, but we might be able to salvage the rest of the # message. Track the error but keep going. if i == 0: defect = errors.InvalidHeaderDefect("Missing header name.") self._cur.defects.append(defect) continue assert i>0, "_parse_headers fed line with no : and no leading WS" lastheader = line[:i] lastvalue = [line] # Done with all the lines, so handle the last header. if lastheader: self._cur.set_raw(*self.policy.header_source_parse(lastvalue)) class BytesFeedParser(FeedParser): """Like FeedParser, but feed accepts bytes.""" def feed(self, data): super().feed(data.decode('ascii', 'surrogateescape'))
bsd-3-clause
da9650719b2f8ca3630e1d3b0e336007
41.342007
79
0.53266
4.744845
false
false
false
false
brython-dev/brython
www/src/Lib/importlib/util.py
10
11487
"""Utility code for constructing importers, etc.""" from ._abc import Loader from ._bootstrap import module_from_spec from ._bootstrap import _resolve_name from ._bootstrap import spec_from_loader from ._bootstrap import _find_spec from ._bootstrap_external import MAGIC_NUMBER from ._bootstrap_external import _RAW_MAGIC_NUMBER from ._bootstrap_external import cache_from_source from ._bootstrap_external import decode_source from ._bootstrap_external import source_from_cache from ._bootstrap_external import spec_from_file_location from contextlib import contextmanager import _imp import functools import sys import types import warnings def source_hash(source_bytes): "Return the hash of *source_bytes* as used in hash-based pyc files." return _imp.source_hash(_RAW_MAGIC_NUMBER, source_bytes) def resolve_name(name, package): """Resolve a relative module name to an absolute one.""" if not name.startswith('.'): return name elif not package: raise ImportError(f'no package specified for {repr(name)} ' '(required for relative module names)') level = 0 for character in name: if character != '.': break level += 1 return _resolve_name(name[level:], package, level) def _find_spec_from_path(name, path=None): """Return the spec for the specified module. First, sys.modules is checked to see if the module was already imported. If so, then sys.modules[name].__spec__ is returned. If that happens to be set to None, then ValueError is raised. If the module is not in sys.modules, then sys.meta_path is searched for a suitable spec with the value of 'path' given to the finders. None is returned if no spec could be found. Dotted names do not have their parent packages implicitly imported. You will most likely need to explicitly import all parent packages in the proper order for a submodule to get the correct spec. """ if name not in sys.modules: return _find_spec(name, path) else: module = sys.modules[name] if module is None: return None try: spec = module.__spec__ except AttributeError: raise ValueError('{}.__spec__ is not set'.format(name)) from None else: if spec is None: raise ValueError('{}.__spec__ is None'.format(name)) return spec def find_spec(name, package=None): """Return the spec for the specified module. First, sys.modules is checked to see if the module was already imported. If so, then sys.modules[name].__spec__ is returned. If that happens to be set to None, then ValueError is raised. If the module is not in sys.modules, then sys.meta_path is searched for a suitable spec with the value of 'path' given to the finders. None is returned if no spec could be found. If the name is for submodule (contains a dot), the parent module is automatically imported. The name and package arguments work the same as importlib.import_module(). In other words, relative module names (with leading dots) work. """ fullname = resolve_name(name, package) if name.startswith('.') else name if fullname not in sys.modules: parent_name = fullname.rpartition('.')[0] if parent_name: parent = __import__(parent_name, fromlist=['__path__']) try: parent_path = parent.__path__ except AttributeError as e: raise ModuleNotFoundError( f"__path__ attribute not found on {parent_name!r} " f"while trying to find {fullname!r}", name=fullname) from e else: parent_path = None return _find_spec(fullname, parent_path) else: module = sys.modules[fullname] if module is None: return None try: spec = module.__spec__ except AttributeError: raise ValueError('{}.__spec__ is not set'.format(name)) from None else: if spec is None: raise ValueError('{}.__spec__ is None'.format(name)) return spec @contextmanager def _module_to_load(name): is_reload = name in sys.modules module = sys.modules.get(name) if not is_reload: # This must be done before open() is called as the 'io' module # implicitly imports 'locale' and would otherwise trigger an # infinite loop. module = type(sys)(name) # This must be done before putting the module in sys.modules # (otherwise an optimization shortcut in import.c becomes wrong) module.__initializing__ = True sys.modules[name] = module try: yield module except Exception: if not is_reload: try: del sys.modules[name] except KeyError: pass finally: module.__initializing__ = False def set_package(fxn): """Set __package__ on the returned module. This function is deprecated. """ @functools.wraps(fxn) def set_package_wrapper(*args, **kwargs): warnings.warn('The import system now takes care of this automatically; ' 'this decorator is slated for removal in Python 3.12', DeprecationWarning, stacklevel=2) module = fxn(*args, **kwargs) if getattr(module, '__package__', None) is None: module.__package__ = module.__name__ if not hasattr(module, '__path__'): module.__package__ = module.__package__.rpartition('.')[0] return module return set_package_wrapper def set_loader(fxn): """Set __loader__ on the returned module. This function is deprecated. """ @functools.wraps(fxn) def set_loader_wrapper(self, *args, **kwargs): warnings.warn('The import system now takes care of this automatically; ' 'this decorator is slated for removal in Python 3.12', DeprecationWarning, stacklevel=2) module = fxn(self, *args, **kwargs) if getattr(module, '__loader__', None) is None: module.__loader__ = self return module return set_loader_wrapper def module_for_loader(fxn): """Decorator to handle selecting the proper module for loaders. The decorated function is passed the module to use instead of the module name. The module passed in to the function is either from sys.modules if it already exists or is a new module. If the module is new, then __name__ is set the first argument to the method, __loader__ is set to self, and __package__ is set accordingly (if self.is_package() is defined) will be set before it is passed to the decorated function (if self.is_package() does not work for the module it will be set post-load). If an exception is raised and the decorator created the module it is subsequently removed from sys.modules. The decorator assumes that the decorated function takes the module name as the second argument. """ warnings.warn('The import system now takes care of this automatically; ' 'this decorator is slated for removal in Python 3.12', DeprecationWarning, stacklevel=2) @functools.wraps(fxn) def module_for_loader_wrapper(self, fullname, *args, **kwargs): with _module_to_load(fullname) as module: module.__loader__ = self try: is_package = self.is_package(fullname) except (ImportError, AttributeError): pass else: if is_package: module.__package__ = fullname else: module.__package__ = fullname.rpartition('.')[0] # If __package__ was not set above, __import__() will do it later. return fxn(self, module, *args, **kwargs) return module_for_loader_wrapper class _LazyModule(types.ModuleType): """A subclass of the module type which triggers loading upon attribute access.""" def __getattribute__(self, attr): """Trigger the load of the module and return the attribute.""" # All module metadata must be garnered from __spec__ in order to avoid # using mutated values. # Stop triggering this method. self.__class__ = types.ModuleType # Get the original name to make sure no object substitution occurred # in sys.modules. original_name = self.__spec__.name # Figure out exactly what attributes were mutated between the creation # of the module and now. attrs_then = self.__spec__.loader_state['__dict__'] attrs_now = self.__dict__ attrs_updated = {} for key, value in attrs_now.items(): # Code that set the attribute may have kept a reference to the # assigned object, making identity more important than equality. if key not in attrs_then: attrs_updated[key] = value elif id(attrs_now[key]) != id(attrs_then[key]): attrs_updated[key] = value self.__spec__.loader.exec_module(self) # If exec_module() was used directly there is no guarantee the module # object was put into sys.modules. if original_name in sys.modules: if id(self) != id(sys.modules[original_name]): raise ValueError(f"module object for {original_name!r} " "substituted in sys.modules during a lazy " "load") # Update after loading since that's what would happen in an eager # loading situation. self.__dict__.update(attrs_updated) return getattr(self, attr) def __delattr__(self, attr): """Trigger the load and then perform the deletion.""" # To trigger the load and raise an exception if the attribute # doesn't exist. self.__getattribute__(attr) delattr(self, attr) class LazyLoader(Loader): """A loader that creates a module which defers loading until attribute access.""" @staticmethod def __check_eager_loader(loader): if not hasattr(loader, 'exec_module'): raise TypeError('loader must define exec_module()') @classmethod def factory(cls, loader): """Construct a callable which returns the eager loader made lazy.""" cls.__check_eager_loader(loader) return lambda *args, **kwargs: cls(loader(*args, **kwargs)) def __init__(self, loader): self.__check_eager_loader(loader) self.loader = loader def create_module(self, spec): return self.loader.create_module(spec) def exec_module(self, module): """Make the module load lazily.""" module.__spec__.loader = self.loader module.__loader__ = self.loader # Don't need to worry about deep-copying as trying to set an attribute # on an object would have triggered the load, # e.g. ``module.__spec__.loader = None`` would trigger a load from # trying to access module.__spec__. loader_state = {} loader_state['__dict__'] = module.__dict__.copy() loader_state['__class__'] = module.__class__ module.__spec__.loader_state = loader_state module.__class__ = _LazyModule
bsd-3-clause
1f5f4a1931a1d262cec0d952cfe357ad
37.036424
85
0.618961
4.49589
false
false
false
false
brython-dev/brython
www/src/Lib/getpass.py
14
5990
"""Utilities to get a password and/or the current user name. getpass(prompt[, stream]) - Prompt for a password, with echo turned off. getuser() - Get the user name from the environment or password database. GetPassWarning - This UserWarning is issued when getpass() cannot prevent echoing of the password contents while reading. On Windows, the msvcrt module will be used. """ # Authors: Piers Lauder (original) # Guido van Rossum (Windows support and cleanup) # Gregory P. Smith (tty support & GetPassWarning) import contextlib import io import os import sys import warnings __all__ = ["getpass","getuser","GetPassWarning"] class GetPassWarning(UserWarning): pass def unix_getpass(prompt='Password: ', stream=None): """Prompt for a password, with echo turned off. Args: prompt: Written on stream to ask for the input. Default: 'Password: ' stream: A writable file object to display the prompt. Defaults to the tty. If no tty is available defaults to sys.stderr. Returns: The seKr3t input. Raises: EOFError: If our input tty or stdin was closed. GetPassWarning: When we were unable to turn echo off on the input. Always restores terminal settings before returning. """ passwd = None with contextlib.ExitStack() as stack: try: # Always try reading and writing directly on the tty first. fd = os.open('/dev/tty', os.O_RDWR|os.O_NOCTTY) tty = io.FileIO(fd, 'w+') stack.enter_context(tty) input = io.TextIOWrapper(tty) stack.enter_context(input) if not stream: stream = input except OSError: # If that fails, see if stdin can be controlled. stack.close() try: fd = sys.stdin.fileno() except (AttributeError, ValueError): fd = None passwd = fallback_getpass(prompt, stream) input = sys.stdin if not stream: stream = sys.stderr if fd is not None: try: old = termios.tcgetattr(fd) # a copy to save new = old[:] new[3] &= ~termios.ECHO # 3 == 'lflags' tcsetattr_flags = termios.TCSAFLUSH if hasattr(termios, 'TCSASOFT'): tcsetattr_flags |= termios.TCSASOFT try: termios.tcsetattr(fd, tcsetattr_flags, new) passwd = _raw_input(prompt, stream, input=input) finally: termios.tcsetattr(fd, tcsetattr_flags, old) stream.flush() # issue7208 except termios.error: if passwd is not None: # _raw_input succeeded. The final tcsetattr failed. Reraise # instead of leaving the terminal in an unknown state. raise # We can't control the tty or stdin. Give up and use normal IO. # fallback_getpass() raises an appropriate warning. if stream is not input: # clean up unused file objects before blocking stack.close() passwd = fallback_getpass(prompt, stream) stream.write('\n') return passwd def win_getpass(prompt='Password: ', stream=None): """Prompt for password with echo off, using Windows getwch().""" if sys.stdin is not sys.__stdin__: return fallback_getpass(prompt, stream) for c in prompt: msvcrt.putwch(c) pw = "" while 1: c = msvcrt.getwch() if c == '\r' or c == '\n': break if c == '\003': raise KeyboardInterrupt if c == '\b': pw = pw[:-1] else: pw = pw + c msvcrt.putwch('\r') msvcrt.putwch('\n') return pw def fallback_getpass(prompt='Password: ', stream=None): warnings.warn("Can not control echo on the terminal.", GetPassWarning, stacklevel=2) if not stream: stream = sys.stderr print("Warning: Password input may be echoed.", file=stream) return _raw_input(prompt, stream) def _raw_input(prompt="", stream=None, input=None): # This doesn't save the string in the GNU readline history. if not stream: stream = sys.stderr if not input: input = sys.stdin prompt = str(prompt) if prompt: try: stream.write(prompt) except UnicodeEncodeError: # Use replace error handler to get as much as possible printed. prompt = prompt.encode(stream.encoding, 'replace') prompt = prompt.decode(stream.encoding) stream.write(prompt) stream.flush() # NOTE: The Python C API calls flockfile() (and unlock) during readline. line = input.readline() if not line: raise EOFError if line[-1] == '\n': line = line[:-1] return line def getuser(): """Get the username from the environment or password database. First try various environment variables, then the password database. This works on Windows as long as USERNAME is set. """ for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'): user = os.environ.get(name) if user: return user # If this fails, the exception will "explain" why import pwd return pwd.getpwuid(os.getuid())[0] # Bind the name getpass to the appropriate function try: import termios # it's possible there is an incompatible termios from the # McMillan Installer, make sure we have a UNIX-compatible termios termios.tcgetattr, termios.tcsetattr except (ImportError, AttributeError): try: import msvcrt except ImportError: getpass = fallback_getpass else: getpass = win_getpass else: getpass = unix_getpass
bsd-3-clause
a3d905915be3170570204d977b859af7
31.378378
81
0.58581
4.321789
false
false
false
false
brython-dev/brython
www/src/Lib/html/entities.py
2
75383
"""HTML character entity references.""" __all__ = ['html5', 'name2codepoint', 'codepoint2name', 'entitydefs'] # maps the HTML entity name to the Unicode code point # from https://html.spec.whatwg.org/multipage/named-characters.html name2codepoint = { 'AElig': 0x00c6, # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1 'Aacute': 0x00c1, # latin capital letter A with acute, U+00C1 ISOlat1 'Acirc': 0x00c2, # latin capital letter A with circumflex, U+00C2 ISOlat1 'Agrave': 0x00c0, # latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1 'Alpha': 0x0391, # greek capital letter alpha, U+0391 'Aring': 0x00c5, # latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1 'Atilde': 0x00c3, # latin capital letter A with tilde, U+00C3 ISOlat1 'Auml': 0x00c4, # latin capital letter A with diaeresis, U+00C4 ISOlat1 'Beta': 0x0392, # greek capital letter beta, U+0392 'Ccedil': 0x00c7, # latin capital letter C with cedilla, U+00C7 ISOlat1 'Chi': 0x03a7, # greek capital letter chi, U+03A7 'Dagger': 0x2021, # double dagger, U+2021 ISOpub 'Delta': 0x0394, # greek capital letter delta, U+0394 ISOgrk3 'ETH': 0x00d0, # latin capital letter ETH, U+00D0 ISOlat1 'Eacute': 0x00c9, # latin capital letter E with acute, U+00C9 ISOlat1 'Ecirc': 0x00ca, # latin capital letter E with circumflex, U+00CA ISOlat1 'Egrave': 0x00c8, # latin capital letter E with grave, U+00C8 ISOlat1 'Epsilon': 0x0395, # greek capital letter epsilon, U+0395 'Eta': 0x0397, # greek capital letter eta, U+0397 'Euml': 0x00cb, # latin capital letter E with diaeresis, U+00CB ISOlat1 'Gamma': 0x0393, # greek capital letter gamma, U+0393 ISOgrk3 'Iacute': 0x00cd, # latin capital letter I with acute, U+00CD ISOlat1 'Icirc': 0x00ce, # latin capital letter I with circumflex, U+00CE ISOlat1 'Igrave': 0x00cc, # latin capital letter I with grave, U+00CC ISOlat1 'Iota': 0x0399, # greek capital letter iota, U+0399 'Iuml': 0x00cf, # latin capital letter I with diaeresis, U+00CF ISOlat1 'Kappa': 0x039a, # greek capital letter kappa, U+039A 'Lambda': 0x039b, # greek capital letter lambda, U+039B ISOgrk3 'Mu': 0x039c, # greek capital letter mu, U+039C 'Ntilde': 0x00d1, # latin capital letter N with tilde, U+00D1 ISOlat1 'Nu': 0x039d, # greek capital letter nu, U+039D 'OElig': 0x0152, # latin capital ligature OE, U+0152 ISOlat2 'Oacute': 0x00d3, # latin capital letter O with acute, U+00D3 ISOlat1 'Ocirc': 0x00d4, # latin capital letter O with circumflex, U+00D4 ISOlat1 'Ograve': 0x00d2, # latin capital letter O with grave, U+00D2 ISOlat1 'Omega': 0x03a9, # greek capital letter omega, U+03A9 ISOgrk3 'Omicron': 0x039f, # greek capital letter omicron, U+039F 'Oslash': 0x00d8, # latin capital letter O with stroke = latin capital letter O slash, U+00D8 ISOlat1 'Otilde': 0x00d5, # latin capital letter O with tilde, U+00D5 ISOlat1 'Ouml': 0x00d6, # latin capital letter O with diaeresis, U+00D6 ISOlat1 'Phi': 0x03a6, # greek capital letter phi, U+03A6 ISOgrk3 'Pi': 0x03a0, # greek capital letter pi, U+03A0 ISOgrk3 'Prime': 0x2033, # double prime = seconds = inches, U+2033 ISOtech 'Psi': 0x03a8, # greek capital letter psi, U+03A8 ISOgrk3 'Rho': 0x03a1, # greek capital letter rho, U+03A1 'Scaron': 0x0160, # latin capital letter S with caron, U+0160 ISOlat2 'Sigma': 0x03a3, # greek capital letter sigma, U+03A3 ISOgrk3 'THORN': 0x00de, # latin capital letter THORN, U+00DE ISOlat1 'Tau': 0x03a4, # greek capital letter tau, U+03A4 'Theta': 0x0398, # greek capital letter theta, U+0398 ISOgrk3 'Uacute': 0x00da, # latin capital letter U with acute, U+00DA ISOlat1 'Ucirc': 0x00db, # latin capital letter U with circumflex, U+00DB ISOlat1 'Ugrave': 0x00d9, # latin capital letter U with grave, U+00D9 ISOlat1 'Upsilon': 0x03a5, # greek capital letter upsilon, U+03A5 ISOgrk3 'Uuml': 0x00dc, # latin capital letter U with diaeresis, U+00DC ISOlat1 'Xi': 0x039e, # greek capital letter xi, U+039E ISOgrk3 'Yacute': 0x00dd, # latin capital letter Y with acute, U+00DD ISOlat1 'Yuml': 0x0178, # latin capital letter Y with diaeresis, U+0178 ISOlat2 'Zeta': 0x0396, # greek capital letter zeta, U+0396 'aacute': 0x00e1, # latin small letter a with acute, U+00E1 ISOlat1 'acirc': 0x00e2, # latin small letter a with circumflex, U+00E2 ISOlat1 'acute': 0x00b4, # acute accent = spacing acute, U+00B4 ISOdia 'aelig': 0x00e6, # latin small letter ae = latin small ligature ae, U+00E6 ISOlat1 'agrave': 0x00e0, # latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1 'alefsym': 0x2135, # alef symbol = first transfinite cardinal, U+2135 NEW 'alpha': 0x03b1, # greek small letter alpha, U+03B1 ISOgrk3 'amp': 0x0026, # ampersand, U+0026 ISOnum 'and': 0x2227, # logical and = wedge, U+2227 ISOtech 'ang': 0x2220, # angle, U+2220 ISOamso 'aring': 0x00e5, # latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1 'asymp': 0x2248, # almost equal to = asymptotic to, U+2248 ISOamsr 'atilde': 0x00e3, # latin small letter a with tilde, U+00E3 ISOlat1 'auml': 0x00e4, # latin small letter a with diaeresis, U+00E4 ISOlat1 'bdquo': 0x201e, # double low-9 quotation mark, U+201E NEW 'beta': 0x03b2, # greek small letter beta, U+03B2 ISOgrk3 'brvbar': 0x00a6, # broken bar = broken vertical bar, U+00A6 ISOnum 'bull': 0x2022, # bullet = black small circle, U+2022 ISOpub 'cap': 0x2229, # intersection = cap, U+2229 ISOtech 'ccedil': 0x00e7, # latin small letter c with cedilla, U+00E7 ISOlat1 'cedil': 0x00b8, # cedilla = spacing cedilla, U+00B8 ISOdia 'cent': 0x00a2, # cent sign, U+00A2 ISOnum 'chi': 0x03c7, # greek small letter chi, U+03C7 ISOgrk3 'circ': 0x02c6, # modifier letter circumflex accent, U+02C6 ISOpub 'clubs': 0x2663, # black club suit = shamrock, U+2663 ISOpub 'cong': 0x2245, # approximately equal to, U+2245 ISOtech 'copy': 0x00a9, # copyright sign, U+00A9 ISOnum 'crarr': 0x21b5, # downwards arrow with corner leftwards = carriage return, U+21B5 NEW 'cup': 0x222a, # union = cup, U+222A ISOtech 'curren': 0x00a4, # currency sign, U+00A4 ISOnum 'dArr': 0x21d3, # downwards double arrow, U+21D3 ISOamsa 'dagger': 0x2020, # dagger, U+2020 ISOpub 'darr': 0x2193, # downwards arrow, U+2193 ISOnum 'deg': 0x00b0, # degree sign, U+00B0 ISOnum 'delta': 0x03b4, # greek small letter delta, U+03B4 ISOgrk3 'diams': 0x2666, # black diamond suit, U+2666 ISOpub 'divide': 0x00f7, # division sign, U+00F7 ISOnum 'eacute': 0x00e9, # latin small letter e with acute, U+00E9 ISOlat1 'ecirc': 0x00ea, # latin small letter e with circumflex, U+00EA ISOlat1 'egrave': 0x00e8, # latin small letter e with grave, U+00E8 ISOlat1 'empty': 0x2205, # empty set = null set = diameter, U+2205 ISOamso 'emsp': 0x2003, # em space, U+2003 ISOpub 'ensp': 0x2002, # en space, U+2002 ISOpub 'epsilon': 0x03b5, # greek small letter epsilon, U+03B5 ISOgrk3 'equiv': 0x2261, # identical to, U+2261 ISOtech 'eta': 0x03b7, # greek small letter eta, U+03B7 ISOgrk3 'eth': 0x00f0, # latin small letter eth, U+00F0 ISOlat1 'euml': 0x00eb, # latin small letter e with diaeresis, U+00EB ISOlat1 'euro': 0x20ac, # euro sign, U+20AC NEW 'exist': 0x2203, # there exists, U+2203 ISOtech 'fnof': 0x0192, # latin small f with hook = function = florin, U+0192 ISOtech 'forall': 0x2200, # for all, U+2200 ISOtech 'frac12': 0x00bd, # vulgar fraction one half = fraction one half, U+00BD ISOnum 'frac14': 0x00bc, # vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum 'frac34': 0x00be, # vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum 'frasl': 0x2044, # fraction slash, U+2044 NEW 'gamma': 0x03b3, # greek small letter gamma, U+03B3 ISOgrk3 'ge': 0x2265, # greater-than or equal to, U+2265 ISOtech 'gt': 0x003e, # greater-than sign, U+003E ISOnum 'hArr': 0x21d4, # left right double arrow, U+21D4 ISOamsa 'harr': 0x2194, # left right arrow, U+2194 ISOamsa 'hearts': 0x2665, # black heart suit = valentine, U+2665 ISOpub 'hellip': 0x2026, # horizontal ellipsis = three dot leader, U+2026 ISOpub 'iacute': 0x00ed, # latin small letter i with acute, U+00ED ISOlat1 'icirc': 0x00ee, # latin small letter i with circumflex, U+00EE ISOlat1 'iexcl': 0x00a1, # inverted exclamation mark, U+00A1 ISOnum 'igrave': 0x00ec, # latin small letter i with grave, U+00EC ISOlat1 'image': 0x2111, # blackletter capital I = imaginary part, U+2111 ISOamso 'infin': 0x221e, # infinity, U+221E ISOtech 'int': 0x222b, # integral, U+222B ISOtech 'iota': 0x03b9, # greek small letter iota, U+03B9 ISOgrk3 'iquest': 0x00bf, # inverted question mark = turned question mark, U+00BF ISOnum 'isin': 0x2208, # element of, U+2208 ISOtech 'iuml': 0x00ef, # latin small letter i with diaeresis, U+00EF ISOlat1 'kappa': 0x03ba, # greek small letter kappa, U+03BA ISOgrk3 'lArr': 0x21d0, # leftwards double arrow, U+21D0 ISOtech 'lambda': 0x03bb, # greek small letter lambda, U+03BB ISOgrk3 'lang': 0x2329, # left-pointing angle bracket = bra, U+2329 ISOtech 'laquo': 0x00ab, # left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum 'larr': 0x2190, # leftwards arrow, U+2190 ISOnum 'lceil': 0x2308, # left ceiling = apl upstile, U+2308 ISOamsc 'ldquo': 0x201c, # left double quotation mark, U+201C ISOnum 'le': 0x2264, # less-than or equal to, U+2264 ISOtech 'lfloor': 0x230a, # left floor = apl downstile, U+230A ISOamsc 'lowast': 0x2217, # asterisk operator, U+2217 ISOtech 'loz': 0x25ca, # lozenge, U+25CA ISOpub 'lrm': 0x200e, # left-to-right mark, U+200E NEW RFC 2070 'lsaquo': 0x2039, # single left-pointing angle quotation mark, U+2039 ISO proposed 'lsquo': 0x2018, # left single quotation mark, U+2018 ISOnum 'lt': 0x003c, # less-than sign, U+003C ISOnum 'macr': 0x00af, # macron = spacing macron = overline = APL overbar, U+00AF ISOdia 'mdash': 0x2014, # em dash, U+2014 ISOpub 'micro': 0x00b5, # micro sign, U+00B5 ISOnum 'middot': 0x00b7, # middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum 'minus': 0x2212, # minus sign, U+2212 ISOtech 'mu': 0x03bc, # greek small letter mu, U+03BC ISOgrk3 'nabla': 0x2207, # nabla = backward difference, U+2207 ISOtech 'nbsp': 0x00a0, # no-break space = non-breaking space, U+00A0 ISOnum 'ndash': 0x2013, # en dash, U+2013 ISOpub 'ne': 0x2260, # not equal to, U+2260 ISOtech 'ni': 0x220b, # contains as member, U+220B ISOtech 'not': 0x00ac, # not sign, U+00AC ISOnum 'notin': 0x2209, # not an element of, U+2209 ISOtech 'nsub': 0x2284, # not a subset of, U+2284 ISOamsn 'ntilde': 0x00f1, # latin small letter n with tilde, U+00F1 ISOlat1 'nu': 0x03bd, # greek small letter nu, U+03BD ISOgrk3 'oacute': 0x00f3, # latin small letter o with acute, U+00F3 ISOlat1 'ocirc': 0x00f4, # latin small letter o with circumflex, U+00F4 ISOlat1 'oelig': 0x0153, # latin small ligature oe, U+0153 ISOlat2 'ograve': 0x00f2, # latin small letter o with grave, U+00F2 ISOlat1 'oline': 0x203e, # overline = spacing overscore, U+203E NEW 'omega': 0x03c9, # greek small letter omega, U+03C9 ISOgrk3 'omicron': 0x03bf, # greek small letter omicron, U+03BF NEW 'oplus': 0x2295, # circled plus = direct sum, U+2295 ISOamsb 'or': 0x2228, # logical or = vee, U+2228 ISOtech 'ordf': 0x00aa, # feminine ordinal indicator, U+00AA ISOnum 'ordm': 0x00ba, # masculine ordinal indicator, U+00BA ISOnum 'oslash': 0x00f8, # latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1 'otilde': 0x00f5, # latin small letter o with tilde, U+00F5 ISOlat1 'otimes': 0x2297, # circled times = vector product, U+2297 ISOamsb 'ouml': 0x00f6, # latin small letter o with diaeresis, U+00F6 ISOlat1 'para': 0x00b6, # pilcrow sign = paragraph sign, U+00B6 ISOnum 'part': 0x2202, # partial differential, U+2202 ISOtech 'permil': 0x2030, # per mille sign, U+2030 ISOtech 'perp': 0x22a5, # up tack = orthogonal to = perpendicular, U+22A5 ISOtech 'phi': 0x03c6, # greek small letter phi, U+03C6 ISOgrk3 'pi': 0x03c0, # greek small letter pi, U+03C0 ISOgrk3 'piv': 0x03d6, # greek pi symbol, U+03D6 ISOgrk3 'plusmn': 0x00b1, # plus-minus sign = plus-or-minus sign, U+00B1 ISOnum 'pound': 0x00a3, # pound sign, U+00A3 ISOnum 'prime': 0x2032, # prime = minutes = feet, U+2032 ISOtech 'prod': 0x220f, # n-ary product = product sign, U+220F ISOamsb 'prop': 0x221d, # proportional to, U+221D ISOtech 'psi': 0x03c8, # greek small letter psi, U+03C8 ISOgrk3 'quot': 0x0022, # quotation mark = APL quote, U+0022 ISOnum 'rArr': 0x21d2, # rightwards double arrow, U+21D2 ISOtech 'radic': 0x221a, # square root = radical sign, U+221A ISOtech 'rang': 0x232a, # right-pointing angle bracket = ket, U+232A ISOtech 'raquo': 0x00bb, # right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum 'rarr': 0x2192, # rightwards arrow, U+2192 ISOnum 'rceil': 0x2309, # right ceiling, U+2309 ISOamsc 'rdquo': 0x201d, # right double quotation mark, U+201D ISOnum 'real': 0x211c, # blackletter capital R = real part symbol, U+211C ISOamso 'reg': 0x00ae, # registered sign = registered trade mark sign, U+00AE ISOnum 'rfloor': 0x230b, # right floor, U+230B ISOamsc 'rho': 0x03c1, # greek small letter rho, U+03C1 ISOgrk3 'rlm': 0x200f, # right-to-left mark, U+200F NEW RFC 2070 'rsaquo': 0x203a, # single right-pointing angle quotation mark, U+203A ISO proposed 'rsquo': 0x2019, # right single quotation mark, U+2019 ISOnum 'sbquo': 0x201a, # single low-9 quotation mark, U+201A NEW 'scaron': 0x0161, # latin small letter s with caron, U+0161 ISOlat2 'sdot': 0x22c5, # dot operator, U+22C5 ISOamsb 'sect': 0x00a7, # section sign, U+00A7 ISOnum 'shy': 0x00ad, # soft hyphen = discretionary hyphen, U+00AD ISOnum 'sigma': 0x03c3, # greek small letter sigma, U+03C3 ISOgrk3 'sigmaf': 0x03c2, # greek small letter final sigma, U+03C2 ISOgrk3 'sim': 0x223c, # tilde operator = varies with = similar to, U+223C ISOtech 'spades': 0x2660, # black spade suit, U+2660 ISOpub 'sub': 0x2282, # subset of, U+2282 ISOtech 'sube': 0x2286, # subset of or equal to, U+2286 ISOtech 'sum': 0x2211, # n-ary summation, U+2211 ISOamsb 'sup': 0x2283, # superset of, U+2283 ISOtech 'sup1': 0x00b9, # superscript one = superscript digit one, U+00B9 ISOnum 'sup2': 0x00b2, # superscript two = superscript digit two = squared, U+00B2 ISOnum 'sup3': 0x00b3, # superscript three = superscript digit three = cubed, U+00B3 ISOnum 'supe': 0x2287, # superset of or equal to, U+2287 ISOtech 'szlig': 0x00df, # latin small letter sharp s = ess-zed, U+00DF ISOlat1 'tau': 0x03c4, # greek small letter tau, U+03C4 ISOgrk3 'there4': 0x2234, # therefore, U+2234 ISOtech 'theta': 0x03b8, # greek small letter theta, U+03B8 ISOgrk3 'thetasym': 0x03d1, # greek small letter theta symbol, U+03D1 NEW 'thinsp': 0x2009, # thin space, U+2009 ISOpub 'thorn': 0x00fe, # latin small letter thorn with, U+00FE ISOlat1 'tilde': 0x02dc, # small tilde, U+02DC ISOdia 'times': 0x00d7, # multiplication sign, U+00D7 ISOnum 'trade': 0x2122, # trade mark sign, U+2122 ISOnum 'uArr': 0x21d1, # upwards double arrow, U+21D1 ISOamsa 'uacute': 0x00fa, # latin small letter u with acute, U+00FA ISOlat1 'uarr': 0x2191, # upwards arrow, U+2191 ISOnum 'ucirc': 0x00fb, # latin small letter u with circumflex, U+00FB ISOlat1 'ugrave': 0x00f9, # latin small letter u with grave, U+00F9 ISOlat1 'uml': 0x00a8, # diaeresis = spacing diaeresis, U+00A8 ISOdia 'upsih': 0x03d2, # greek upsilon with hook symbol, U+03D2 NEW 'upsilon': 0x03c5, # greek small letter upsilon, U+03C5 ISOgrk3 'uuml': 0x00fc, # latin small letter u with diaeresis, U+00FC ISOlat1 'weierp': 0x2118, # script capital P = power set = Weierstrass p, U+2118 ISOamso 'xi': 0x03be, # greek small letter xi, U+03BE ISOgrk3 'yacute': 0x00fd, # latin small letter y with acute, U+00FD ISOlat1 'yen': 0x00a5, # yen sign = yuan sign, U+00A5 ISOnum 'yuml': 0x00ff, # latin small letter y with diaeresis, U+00FF ISOlat1 'zeta': 0x03b6, # greek small letter zeta, U+03B6 ISOgrk3 'zwj': 0x200d, # zero width joiner, U+200D NEW RFC 2070 'zwnj': 0x200c, # zero width non-joiner, U+200C NEW RFC 2070 } # maps the HTML5 named character references to the equivalent Unicode character(s) html5 = { 'Aacute': '\xc1', 'aacute': '\xe1', 'Aacute;': '\xc1', 'aacute;': '\xe1', 'Abreve;': '\u0102', 'abreve;': '\u0103', 'ac;': '\u223e', 'acd;': '\u223f', 'acE;': '\u223e\u0333', 'Acirc': '\xc2', 'acirc': '\xe2', 'Acirc;': '\xc2', 'acirc;': '\xe2', 'acute': '\xb4', 'acute;': '\xb4', 'Acy;': '\u0410', 'acy;': '\u0430', 'AElig': '\xc6', 'aelig': '\xe6', 'AElig;': '\xc6', 'aelig;': '\xe6', 'af;': '\u2061', 'Afr;': '\U0001d504', 'afr;': '\U0001d51e', 'Agrave': '\xc0', 'agrave': '\xe0', 'Agrave;': '\xc0', 'agrave;': '\xe0', 'alefsym;': '\u2135', 'aleph;': '\u2135', 'Alpha;': '\u0391', 'alpha;': '\u03b1', 'Amacr;': '\u0100', 'amacr;': '\u0101', 'amalg;': '\u2a3f', 'AMP': '&', 'amp': '&', 'AMP;': '&', 'amp;': '&', 'And;': '\u2a53', 'and;': '\u2227', 'andand;': '\u2a55', 'andd;': '\u2a5c', 'andslope;': '\u2a58', 'andv;': '\u2a5a', 'ang;': '\u2220', 'ange;': '\u29a4', 'angle;': '\u2220', 'angmsd;': '\u2221', 'angmsdaa;': '\u29a8', 'angmsdab;': '\u29a9', 'angmsdac;': '\u29aa', 'angmsdad;': '\u29ab', 'angmsdae;': '\u29ac', 'angmsdaf;': '\u29ad', 'angmsdag;': '\u29ae', 'angmsdah;': '\u29af', 'angrt;': '\u221f', 'angrtvb;': '\u22be', 'angrtvbd;': '\u299d', 'angsph;': '\u2222', 'angst;': '\xc5', 'angzarr;': '\u237c', 'Aogon;': '\u0104', 'aogon;': '\u0105', 'Aopf;': '\U0001d538', 'aopf;': '\U0001d552', 'ap;': '\u2248', 'apacir;': '\u2a6f', 'apE;': '\u2a70', 'ape;': '\u224a', 'apid;': '\u224b', 'apos;': "'", 'ApplyFunction;': '\u2061', 'approx;': '\u2248', 'approxeq;': '\u224a', 'Aring': '\xc5', 'aring': '\xe5', 'Aring;': '\xc5', 'aring;': '\xe5', 'Ascr;': '\U0001d49c', 'ascr;': '\U0001d4b6', 'Assign;': '\u2254', 'ast;': '*', 'asymp;': '\u2248', 'asympeq;': '\u224d', 'Atilde': '\xc3', 'atilde': '\xe3', 'Atilde;': '\xc3', 'atilde;': '\xe3', 'Auml': '\xc4', 'auml': '\xe4', 'Auml;': '\xc4', 'auml;': '\xe4', 'awconint;': '\u2233', 'awint;': '\u2a11', 'backcong;': '\u224c', 'backepsilon;': '\u03f6', 'backprime;': '\u2035', 'backsim;': '\u223d', 'backsimeq;': '\u22cd', 'Backslash;': '\u2216', 'Barv;': '\u2ae7', 'barvee;': '\u22bd', 'Barwed;': '\u2306', 'barwed;': '\u2305', 'barwedge;': '\u2305', 'bbrk;': '\u23b5', 'bbrktbrk;': '\u23b6', 'bcong;': '\u224c', 'Bcy;': '\u0411', 'bcy;': '\u0431', 'bdquo;': '\u201e', 'becaus;': '\u2235', 'Because;': '\u2235', 'because;': '\u2235', 'bemptyv;': '\u29b0', 'bepsi;': '\u03f6', 'bernou;': '\u212c', 'Bernoullis;': '\u212c', 'Beta;': '\u0392', 'beta;': '\u03b2', 'beth;': '\u2136', 'between;': '\u226c', 'Bfr;': '\U0001d505', 'bfr;': '\U0001d51f', 'bigcap;': '\u22c2', 'bigcirc;': '\u25ef', 'bigcup;': '\u22c3', 'bigodot;': '\u2a00', 'bigoplus;': '\u2a01', 'bigotimes;': '\u2a02', 'bigsqcup;': '\u2a06', 'bigstar;': '\u2605', 'bigtriangledown;': '\u25bd', 'bigtriangleup;': '\u25b3', 'biguplus;': '\u2a04', 'bigvee;': '\u22c1', 'bigwedge;': '\u22c0', 'bkarow;': '\u290d', 'blacklozenge;': '\u29eb', 'blacksquare;': '\u25aa', 'blacktriangle;': '\u25b4', 'blacktriangledown;': '\u25be', 'blacktriangleleft;': '\u25c2', 'blacktriangleright;': '\u25b8', 'blank;': '\u2423', 'blk12;': '\u2592', 'blk14;': '\u2591', 'blk34;': '\u2593', 'block;': '\u2588', 'bne;': '=\u20e5', 'bnequiv;': '\u2261\u20e5', 'bNot;': '\u2aed', 'bnot;': '\u2310', 'Bopf;': '\U0001d539', 'bopf;': '\U0001d553', 'bot;': '\u22a5', 'bottom;': '\u22a5', 'bowtie;': '\u22c8', 'boxbox;': '\u29c9', 'boxDL;': '\u2557', 'boxDl;': '\u2556', 'boxdL;': '\u2555', 'boxdl;': '\u2510', 'boxDR;': '\u2554', 'boxDr;': '\u2553', 'boxdR;': '\u2552', 'boxdr;': '\u250c', 'boxH;': '\u2550', 'boxh;': '\u2500', 'boxHD;': '\u2566', 'boxHd;': '\u2564', 'boxhD;': '\u2565', 'boxhd;': '\u252c', 'boxHU;': '\u2569', 'boxHu;': '\u2567', 'boxhU;': '\u2568', 'boxhu;': '\u2534', 'boxminus;': '\u229f', 'boxplus;': '\u229e', 'boxtimes;': '\u22a0', 'boxUL;': '\u255d', 'boxUl;': '\u255c', 'boxuL;': '\u255b', 'boxul;': '\u2518', 'boxUR;': '\u255a', 'boxUr;': '\u2559', 'boxuR;': '\u2558', 'boxur;': '\u2514', 'boxV;': '\u2551', 'boxv;': '\u2502', 'boxVH;': '\u256c', 'boxVh;': '\u256b', 'boxvH;': '\u256a', 'boxvh;': '\u253c', 'boxVL;': '\u2563', 'boxVl;': '\u2562', 'boxvL;': '\u2561', 'boxvl;': '\u2524', 'boxVR;': '\u2560', 'boxVr;': '\u255f', 'boxvR;': '\u255e', 'boxvr;': '\u251c', 'bprime;': '\u2035', 'Breve;': '\u02d8', 'breve;': '\u02d8', 'brvbar': '\xa6', 'brvbar;': '\xa6', 'Bscr;': '\u212c', 'bscr;': '\U0001d4b7', 'bsemi;': '\u204f', 'bsim;': '\u223d', 'bsime;': '\u22cd', 'bsol;': '\\', 'bsolb;': '\u29c5', 'bsolhsub;': '\u27c8', 'bull;': '\u2022', 'bullet;': '\u2022', 'bump;': '\u224e', 'bumpE;': '\u2aae', 'bumpe;': '\u224f', 'Bumpeq;': '\u224e', 'bumpeq;': '\u224f', 'Cacute;': '\u0106', 'cacute;': '\u0107', 'Cap;': '\u22d2', 'cap;': '\u2229', 'capand;': '\u2a44', 'capbrcup;': '\u2a49', 'capcap;': '\u2a4b', 'capcup;': '\u2a47', 'capdot;': '\u2a40', 'CapitalDifferentialD;': '\u2145', 'caps;': '\u2229\ufe00', 'caret;': '\u2041', 'caron;': '\u02c7', 'Cayleys;': '\u212d', 'ccaps;': '\u2a4d', 'Ccaron;': '\u010c', 'ccaron;': '\u010d', 'Ccedil': '\xc7', 'ccedil': '\xe7', 'Ccedil;': '\xc7', 'ccedil;': '\xe7', 'Ccirc;': '\u0108', 'ccirc;': '\u0109', 'Cconint;': '\u2230', 'ccups;': '\u2a4c', 'ccupssm;': '\u2a50', 'Cdot;': '\u010a', 'cdot;': '\u010b', 'cedil': '\xb8', 'cedil;': '\xb8', 'Cedilla;': '\xb8', 'cemptyv;': '\u29b2', 'cent': '\xa2', 'cent;': '\xa2', 'CenterDot;': '\xb7', 'centerdot;': '\xb7', 'Cfr;': '\u212d', 'cfr;': '\U0001d520', 'CHcy;': '\u0427', 'chcy;': '\u0447', 'check;': '\u2713', 'checkmark;': '\u2713', 'Chi;': '\u03a7', 'chi;': '\u03c7', 'cir;': '\u25cb', 'circ;': '\u02c6', 'circeq;': '\u2257', 'circlearrowleft;': '\u21ba', 'circlearrowright;': '\u21bb', 'circledast;': '\u229b', 'circledcirc;': '\u229a', 'circleddash;': '\u229d', 'CircleDot;': '\u2299', 'circledR;': '\xae', 'circledS;': '\u24c8', 'CircleMinus;': '\u2296', 'CirclePlus;': '\u2295', 'CircleTimes;': '\u2297', 'cirE;': '\u29c3', 'cire;': '\u2257', 'cirfnint;': '\u2a10', 'cirmid;': '\u2aef', 'cirscir;': '\u29c2', 'ClockwiseContourIntegral;': '\u2232', 'CloseCurlyDoubleQuote;': '\u201d', 'CloseCurlyQuote;': '\u2019', 'clubs;': '\u2663', 'clubsuit;': '\u2663', 'Colon;': '\u2237', 'colon;': ':', 'Colone;': '\u2a74', 'colone;': '\u2254', 'coloneq;': '\u2254', 'comma;': ',', 'commat;': '@', 'comp;': '\u2201', 'compfn;': '\u2218', 'complement;': '\u2201', 'complexes;': '\u2102', 'cong;': '\u2245', 'congdot;': '\u2a6d', 'Congruent;': '\u2261', 'Conint;': '\u222f', 'conint;': '\u222e', 'ContourIntegral;': '\u222e', 'Copf;': '\u2102', 'copf;': '\U0001d554', 'coprod;': '\u2210', 'Coproduct;': '\u2210', 'COPY': '\xa9', 'copy': '\xa9', 'COPY;': '\xa9', 'copy;': '\xa9', 'copysr;': '\u2117', 'CounterClockwiseContourIntegral;': '\u2233', 'crarr;': '\u21b5', 'Cross;': '\u2a2f', 'cross;': '\u2717', 'Cscr;': '\U0001d49e', 'cscr;': '\U0001d4b8', 'csub;': '\u2acf', 'csube;': '\u2ad1', 'csup;': '\u2ad0', 'csupe;': '\u2ad2', 'ctdot;': '\u22ef', 'cudarrl;': '\u2938', 'cudarrr;': '\u2935', 'cuepr;': '\u22de', 'cuesc;': '\u22df', 'cularr;': '\u21b6', 'cularrp;': '\u293d', 'Cup;': '\u22d3', 'cup;': '\u222a', 'cupbrcap;': '\u2a48', 'CupCap;': '\u224d', 'cupcap;': '\u2a46', 'cupcup;': '\u2a4a', 'cupdot;': '\u228d', 'cupor;': '\u2a45', 'cups;': '\u222a\ufe00', 'curarr;': '\u21b7', 'curarrm;': '\u293c', 'curlyeqprec;': '\u22de', 'curlyeqsucc;': '\u22df', 'curlyvee;': '\u22ce', 'curlywedge;': '\u22cf', 'curren': '\xa4', 'curren;': '\xa4', 'curvearrowleft;': '\u21b6', 'curvearrowright;': '\u21b7', 'cuvee;': '\u22ce', 'cuwed;': '\u22cf', 'cwconint;': '\u2232', 'cwint;': '\u2231', 'cylcty;': '\u232d', 'Dagger;': '\u2021', 'dagger;': '\u2020', 'daleth;': '\u2138', 'Darr;': '\u21a1', 'dArr;': '\u21d3', 'darr;': '\u2193', 'dash;': '\u2010', 'Dashv;': '\u2ae4', 'dashv;': '\u22a3', 'dbkarow;': '\u290f', 'dblac;': '\u02dd', 'Dcaron;': '\u010e', 'dcaron;': '\u010f', 'Dcy;': '\u0414', 'dcy;': '\u0434', 'DD;': '\u2145', 'dd;': '\u2146', 'ddagger;': '\u2021', 'ddarr;': '\u21ca', 'DDotrahd;': '\u2911', 'ddotseq;': '\u2a77', 'deg': '\xb0', 'deg;': '\xb0', 'Del;': '\u2207', 'Delta;': '\u0394', 'delta;': '\u03b4', 'demptyv;': '\u29b1', 'dfisht;': '\u297f', 'Dfr;': '\U0001d507', 'dfr;': '\U0001d521', 'dHar;': '\u2965', 'dharl;': '\u21c3', 'dharr;': '\u21c2', 'DiacriticalAcute;': '\xb4', 'DiacriticalDot;': '\u02d9', 'DiacriticalDoubleAcute;': '\u02dd', 'DiacriticalGrave;': '`', 'DiacriticalTilde;': '\u02dc', 'diam;': '\u22c4', 'Diamond;': '\u22c4', 'diamond;': '\u22c4', 'diamondsuit;': '\u2666', 'diams;': '\u2666', 'die;': '\xa8', 'DifferentialD;': '\u2146', 'digamma;': '\u03dd', 'disin;': '\u22f2', 'div;': '\xf7', 'divide': '\xf7', 'divide;': '\xf7', 'divideontimes;': '\u22c7', 'divonx;': '\u22c7', 'DJcy;': '\u0402', 'djcy;': '\u0452', 'dlcorn;': '\u231e', 'dlcrop;': '\u230d', 'dollar;': '$', 'Dopf;': '\U0001d53b', 'dopf;': '\U0001d555', 'Dot;': '\xa8', 'dot;': '\u02d9', 'DotDot;': '\u20dc', 'doteq;': '\u2250', 'doteqdot;': '\u2251', 'DotEqual;': '\u2250', 'dotminus;': '\u2238', 'dotplus;': '\u2214', 'dotsquare;': '\u22a1', 'doublebarwedge;': '\u2306', 'DoubleContourIntegral;': '\u222f', 'DoubleDot;': '\xa8', 'DoubleDownArrow;': '\u21d3', 'DoubleLeftArrow;': '\u21d0', 'DoubleLeftRightArrow;': '\u21d4', 'DoubleLeftTee;': '\u2ae4', 'DoubleLongLeftArrow;': '\u27f8', 'DoubleLongLeftRightArrow;': '\u27fa', 'DoubleLongRightArrow;': '\u27f9', 'DoubleRightArrow;': '\u21d2', 'DoubleRightTee;': '\u22a8', 'DoubleUpArrow;': '\u21d1', 'DoubleUpDownArrow;': '\u21d5', 'DoubleVerticalBar;': '\u2225', 'DownArrow;': '\u2193', 'Downarrow;': '\u21d3', 'downarrow;': '\u2193', 'DownArrowBar;': '\u2913', 'DownArrowUpArrow;': '\u21f5', 'DownBreve;': '\u0311', 'downdownarrows;': '\u21ca', 'downharpoonleft;': '\u21c3', 'downharpoonright;': '\u21c2', 'DownLeftRightVector;': '\u2950', 'DownLeftTeeVector;': '\u295e', 'DownLeftVector;': '\u21bd', 'DownLeftVectorBar;': '\u2956', 'DownRightTeeVector;': '\u295f', 'DownRightVector;': '\u21c1', 'DownRightVectorBar;': '\u2957', 'DownTee;': '\u22a4', 'DownTeeArrow;': '\u21a7', 'drbkarow;': '\u2910', 'drcorn;': '\u231f', 'drcrop;': '\u230c', 'Dscr;': '\U0001d49f', 'dscr;': '\U0001d4b9', 'DScy;': '\u0405', 'dscy;': '\u0455', 'dsol;': '\u29f6', 'Dstrok;': '\u0110', 'dstrok;': '\u0111', 'dtdot;': '\u22f1', 'dtri;': '\u25bf', 'dtrif;': '\u25be', 'duarr;': '\u21f5', 'duhar;': '\u296f', 'dwangle;': '\u29a6', 'DZcy;': '\u040f', 'dzcy;': '\u045f', 'dzigrarr;': '\u27ff', 'Eacute': '\xc9', 'eacute': '\xe9', 'Eacute;': '\xc9', 'eacute;': '\xe9', 'easter;': '\u2a6e', 'Ecaron;': '\u011a', 'ecaron;': '\u011b', 'ecir;': '\u2256', 'Ecirc': '\xca', 'ecirc': '\xea', 'Ecirc;': '\xca', 'ecirc;': '\xea', 'ecolon;': '\u2255', 'Ecy;': '\u042d', 'ecy;': '\u044d', 'eDDot;': '\u2a77', 'Edot;': '\u0116', 'eDot;': '\u2251', 'edot;': '\u0117', 'ee;': '\u2147', 'efDot;': '\u2252', 'Efr;': '\U0001d508', 'efr;': '\U0001d522', 'eg;': '\u2a9a', 'Egrave': '\xc8', 'egrave': '\xe8', 'Egrave;': '\xc8', 'egrave;': '\xe8', 'egs;': '\u2a96', 'egsdot;': '\u2a98', 'el;': '\u2a99', 'Element;': '\u2208', 'elinters;': '\u23e7', 'ell;': '\u2113', 'els;': '\u2a95', 'elsdot;': '\u2a97', 'Emacr;': '\u0112', 'emacr;': '\u0113', 'empty;': '\u2205', 'emptyset;': '\u2205', 'EmptySmallSquare;': '\u25fb', 'emptyv;': '\u2205', 'EmptyVerySmallSquare;': '\u25ab', 'emsp13;': '\u2004', 'emsp14;': '\u2005', 'emsp;': '\u2003', 'ENG;': '\u014a', 'eng;': '\u014b', 'ensp;': '\u2002', 'Eogon;': '\u0118', 'eogon;': '\u0119', 'Eopf;': '\U0001d53c', 'eopf;': '\U0001d556', 'epar;': '\u22d5', 'eparsl;': '\u29e3', 'eplus;': '\u2a71', 'epsi;': '\u03b5', 'Epsilon;': '\u0395', 'epsilon;': '\u03b5', 'epsiv;': '\u03f5', 'eqcirc;': '\u2256', 'eqcolon;': '\u2255', 'eqsim;': '\u2242', 'eqslantgtr;': '\u2a96', 'eqslantless;': '\u2a95', 'Equal;': '\u2a75', 'equals;': '=', 'EqualTilde;': '\u2242', 'equest;': '\u225f', 'Equilibrium;': '\u21cc', 'equiv;': '\u2261', 'equivDD;': '\u2a78', 'eqvparsl;': '\u29e5', 'erarr;': '\u2971', 'erDot;': '\u2253', 'Escr;': '\u2130', 'escr;': '\u212f', 'esdot;': '\u2250', 'Esim;': '\u2a73', 'esim;': '\u2242', 'Eta;': '\u0397', 'eta;': '\u03b7', 'ETH': '\xd0', 'eth': '\xf0', 'ETH;': '\xd0', 'eth;': '\xf0', 'Euml': '\xcb', 'euml': '\xeb', 'Euml;': '\xcb', 'euml;': '\xeb', 'euro;': '\u20ac', 'excl;': '!', 'exist;': '\u2203', 'Exists;': '\u2203', 'expectation;': '\u2130', 'ExponentialE;': '\u2147', 'exponentiale;': '\u2147', 'fallingdotseq;': '\u2252', 'Fcy;': '\u0424', 'fcy;': '\u0444', 'female;': '\u2640', 'ffilig;': '\ufb03', 'fflig;': '\ufb00', 'ffllig;': '\ufb04', 'Ffr;': '\U0001d509', 'ffr;': '\U0001d523', 'filig;': '\ufb01', 'FilledSmallSquare;': '\u25fc', 'FilledVerySmallSquare;': '\u25aa', 'fjlig;': 'fj', 'flat;': '\u266d', 'fllig;': '\ufb02', 'fltns;': '\u25b1', 'fnof;': '\u0192', 'Fopf;': '\U0001d53d', 'fopf;': '\U0001d557', 'ForAll;': '\u2200', 'forall;': '\u2200', 'fork;': '\u22d4', 'forkv;': '\u2ad9', 'Fouriertrf;': '\u2131', 'fpartint;': '\u2a0d', 'frac12': '\xbd', 'frac12;': '\xbd', 'frac13;': '\u2153', 'frac14': '\xbc', 'frac14;': '\xbc', 'frac15;': '\u2155', 'frac16;': '\u2159', 'frac18;': '\u215b', 'frac23;': '\u2154', 'frac25;': '\u2156', 'frac34': '\xbe', 'frac34;': '\xbe', 'frac35;': '\u2157', 'frac38;': '\u215c', 'frac45;': '\u2158', 'frac56;': '\u215a', 'frac58;': '\u215d', 'frac78;': '\u215e', 'frasl;': '\u2044', 'frown;': '\u2322', 'Fscr;': '\u2131', 'fscr;': '\U0001d4bb', 'gacute;': '\u01f5', 'Gamma;': '\u0393', 'gamma;': '\u03b3', 'Gammad;': '\u03dc', 'gammad;': '\u03dd', 'gap;': '\u2a86', 'Gbreve;': '\u011e', 'gbreve;': '\u011f', 'Gcedil;': '\u0122', 'Gcirc;': '\u011c', 'gcirc;': '\u011d', 'Gcy;': '\u0413', 'gcy;': '\u0433', 'Gdot;': '\u0120', 'gdot;': '\u0121', 'gE;': '\u2267', 'ge;': '\u2265', 'gEl;': '\u2a8c', 'gel;': '\u22db', 'geq;': '\u2265', 'geqq;': '\u2267', 'geqslant;': '\u2a7e', 'ges;': '\u2a7e', 'gescc;': '\u2aa9', 'gesdot;': '\u2a80', 'gesdoto;': '\u2a82', 'gesdotol;': '\u2a84', 'gesl;': '\u22db\ufe00', 'gesles;': '\u2a94', 'Gfr;': '\U0001d50a', 'gfr;': '\U0001d524', 'Gg;': '\u22d9', 'gg;': '\u226b', 'ggg;': '\u22d9', 'gimel;': '\u2137', 'GJcy;': '\u0403', 'gjcy;': '\u0453', 'gl;': '\u2277', 'gla;': '\u2aa5', 'glE;': '\u2a92', 'glj;': '\u2aa4', 'gnap;': '\u2a8a', 'gnapprox;': '\u2a8a', 'gnE;': '\u2269', 'gne;': '\u2a88', 'gneq;': '\u2a88', 'gneqq;': '\u2269', 'gnsim;': '\u22e7', 'Gopf;': '\U0001d53e', 'gopf;': '\U0001d558', 'grave;': '`', 'GreaterEqual;': '\u2265', 'GreaterEqualLess;': '\u22db', 'GreaterFullEqual;': '\u2267', 'GreaterGreater;': '\u2aa2', 'GreaterLess;': '\u2277', 'GreaterSlantEqual;': '\u2a7e', 'GreaterTilde;': '\u2273', 'Gscr;': '\U0001d4a2', 'gscr;': '\u210a', 'gsim;': '\u2273', 'gsime;': '\u2a8e', 'gsiml;': '\u2a90', 'GT': '>', 'gt': '>', 'GT;': '>', 'Gt;': '\u226b', 'gt;': '>', 'gtcc;': '\u2aa7', 'gtcir;': '\u2a7a', 'gtdot;': '\u22d7', 'gtlPar;': '\u2995', 'gtquest;': '\u2a7c', 'gtrapprox;': '\u2a86', 'gtrarr;': '\u2978', 'gtrdot;': '\u22d7', 'gtreqless;': '\u22db', 'gtreqqless;': '\u2a8c', 'gtrless;': '\u2277', 'gtrsim;': '\u2273', 'gvertneqq;': '\u2269\ufe00', 'gvnE;': '\u2269\ufe00', 'Hacek;': '\u02c7', 'hairsp;': '\u200a', 'half;': '\xbd', 'hamilt;': '\u210b', 'HARDcy;': '\u042a', 'hardcy;': '\u044a', 'hArr;': '\u21d4', 'harr;': '\u2194', 'harrcir;': '\u2948', 'harrw;': '\u21ad', 'Hat;': '^', 'hbar;': '\u210f', 'Hcirc;': '\u0124', 'hcirc;': '\u0125', 'hearts;': '\u2665', 'heartsuit;': '\u2665', 'hellip;': '\u2026', 'hercon;': '\u22b9', 'Hfr;': '\u210c', 'hfr;': '\U0001d525', 'HilbertSpace;': '\u210b', 'hksearow;': '\u2925', 'hkswarow;': '\u2926', 'hoarr;': '\u21ff', 'homtht;': '\u223b', 'hookleftarrow;': '\u21a9', 'hookrightarrow;': '\u21aa', 'Hopf;': '\u210d', 'hopf;': '\U0001d559', 'horbar;': '\u2015', 'HorizontalLine;': '\u2500', 'Hscr;': '\u210b', 'hscr;': '\U0001d4bd', 'hslash;': '\u210f', 'Hstrok;': '\u0126', 'hstrok;': '\u0127', 'HumpDownHump;': '\u224e', 'HumpEqual;': '\u224f', 'hybull;': '\u2043', 'hyphen;': '\u2010', 'Iacute': '\xcd', 'iacute': '\xed', 'Iacute;': '\xcd', 'iacute;': '\xed', 'ic;': '\u2063', 'Icirc': '\xce', 'icirc': '\xee', 'Icirc;': '\xce', 'icirc;': '\xee', 'Icy;': '\u0418', 'icy;': '\u0438', 'Idot;': '\u0130', 'IEcy;': '\u0415', 'iecy;': '\u0435', 'iexcl': '\xa1', 'iexcl;': '\xa1', 'iff;': '\u21d4', 'Ifr;': '\u2111', 'ifr;': '\U0001d526', 'Igrave': '\xcc', 'igrave': '\xec', 'Igrave;': '\xcc', 'igrave;': '\xec', 'ii;': '\u2148', 'iiiint;': '\u2a0c', 'iiint;': '\u222d', 'iinfin;': '\u29dc', 'iiota;': '\u2129', 'IJlig;': '\u0132', 'ijlig;': '\u0133', 'Im;': '\u2111', 'Imacr;': '\u012a', 'imacr;': '\u012b', 'image;': '\u2111', 'ImaginaryI;': '\u2148', 'imagline;': '\u2110', 'imagpart;': '\u2111', 'imath;': '\u0131', 'imof;': '\u22b7', 'imped;': '\u01b5', 'Implies;': '\u21d2', 'in;': '\u2208', 'incare;': '\u2105', 'infin;': '\u221e', 'infintie;': '\u29dd', 'inodot;': '\u0131', 'Int;': '\u222c', 'int;': '\u222b', 'intcal;': '\u22ba', 'integers;': '\u2124', 'Integral;': '\u222b', 'intercal;': '\u22ba', 'Intersection;': '\u22c2', 'intlarhk;': '\u2a17', 'intprod;': '\u2a3c', 'InvisibleComma;': '\u2063', 'InvisibleTimes;': '\u2062', 'IOcy;': '\u0401', 'iocy;': '\u0451', 'Iogon;': '\u012e', 'iogon;': '\u012f', 'Iopf;': '\U0001d540', 'iopf;': '\U0001d55a', 'Iota;': '\u0399', 'iota;': '\u03b9', 'iprod;': '\u2a3c', 'iquest': '\xbf', 'iquest;': '\xbf', 'Iscr;': '\u2110', 'iscr;': '\U0001d4be', 'isin;': '\u2208', 'isindot;': '\u22f5', 'isinE;': '\u22f9', 'isins;': '\u22f4', 'isinsv;': '\u22f3', 'isinv;': '\u2208', 'it;': '\u2062', 'Itilde;': '\u0128', 'itilde;': '\u0129', 'Iukcy;': '\u0406', 'iukcy;': '\u0456', 'Iuml': '\xcf', 'iuml': '\xef', 'Iuml;': '\xcf', 'iuml;': '\xef', 'Jcirc;': '\u0134', 'jcirc;': '\u0135', 'Jcy;': '\u0419', 'jcy;': '\u0439', 'Jfr;': '\U0001d50d', 'jfr;': '\U0001d527', 'jmath;': '\u0237', 'Jopf;': '\U0001d541', 'jopf;': '\U0001d55b', 'Jscr;': '\U0001d4a5', 'jscr;': '\U0001d4bf', 'Jsercy;': '\u0408', 'jsercy;': '\u0458', 'Jukcy;': '\u0404', 'jukcy;': '\u0454', 'Kappa;': '\u039a', 'kappa;': '\u03ba', 'kappav;': '\u03f0', 'Kcedil;': '\u0136', 'kcedil;': '\u0137', 'Kcy;': '\u041a', 'kcy;': '\u043a', 'Kfr;': '\U0001d50e', 'kfr;': '\U0001d528', 'kgreen;': '\u0138', 'KHcy;': '\u0425', 'khcy;': '\u0445', 'KJcy;': '\u040c', 'kjcy;': '\u045c', 'Kopf;': '\U0001d542', 'kopf;': '\U0001d55c', 'Kscr;': '\U0001d4a6', 'kscr;': '\U0001d4c0', 'lAarr;': '\u21da', 'Lacute;': '\u0139', 'lacute;': '\u013a', 'laemptyv;': '\u29b4', 'lagran;': '\u2112', 'Lambda;': '\u039b', 'lambda;': '\u03bb', 'Lang;': '\u27ea', 'lang;': '\u27e8', 'langd;': '\u2991', 'langle;': '\u27e8', 'lap;': '\u2a85', 'Laplacetrf;': '\u2112', 'laquo': '\xab', 'laquo;': '\xab', 'Larr;': '\u219e', 'lArr;': '\u21d0', 'larr;': '\u2190', 'larrb;': '\u21e4', 'larrbfs;': '\u291f', 'larrfs;': '\u291d', 'larrhk;': '\u21a9', 'larrlp;': '\u21ab', 'larrpl;': '\u2939', 'larrsim;': '\u2973', 'larrtl;': '\u21a2', 'lat;': '\u2aab', 'lAtail;': '\u291b', 'latail;': '\u2919', 'late;': '\u2aad', 'lates;': '\u2aad\ufe00', 'lBarr;': '\u290e', 'lbarr;': '\u290c', 'lbbrk;': '\u2772', 'lbrace;': '{', 'lbrack;': '[', 'lbrke;': '\u298b', 'lbrksld;': '\u298f', 'lbrkslu;': '\u298d', 'Lcaron;': '\u013d', 'lcaron;': '\u013e', 'Lcedil;': '\u013b', 'lcedil;': '\u013c', 'lceil;': '\u2308', 'lcub;': '{', 'Lcy;': '\u041b', 'lcy;': '\u043b', 'ldca;': '\u2936', 'ldquo;': '\u201c', 'ldquor;': '\u201e', 'ldrdhar;': '\u2967', 'ldrushar;': '\u294b', 'ldsh;': '\u21b2', 'lE;': '\u2266', 'le;': '\u2264', 'LeftAngleBracket;': '\u27e8', 'LeftArrow;': '\u2190', 'Leftarrow;': '\u21d0', 'leftarrow;': '\u2190', 'LeftArrowBar;': '\u21e4', 'LeftArrowRightArrow;': '\u21c6', 'leftarrowtail;': '\u21a2', 'LeftCeiling;': '\u2308', 'LeftDoubleBracket;': '\u27e6', 'LeftDownTeeVector;': '\u2961', 'LeftDownVector;': '\u21c3', 'LeftDownVectorBar;': '\u2959', 'LeftFloor;': '\u230a', 'leftharpoondown;': '\u21bd', 'leftharpoonup;': '\u21bc', 'leftleftarrows;': '\u21c7', 'LeftRightArrow;': '\u2194', 'Leftrightarrow;': '\u21d4', 'leftrightarrow;': '\u2194', 'leftrightarrows;': '\u21c6', 'leftrightharpoons;': '\u21cb', 'leftrightsquigarrow;': '\u21ad', 'LeftRightVector;': '\u294e', 'LeftTee;': '\u22a3', 'LeftTeeArrow;': '\u21a4', 'LeftTeeVector;': '\u295a', 'leftthreetimes;': '\u22cb', 'LeftTriangle;': '\u22b2', 'LeftTriangleBar;': '\u29cf', 'LeftTriangleEqual;': '\u22b4', 'LeftUpDownVector;': '\u2951', 'LeftUpTeeVector;': '\u2960', 'LeftUpVector;': '\u21bf', 'LeftUpVectorBar;': '\u2958', 'LeftVector;': '\u21bc', 'LeftVectorBar;': '\u2952', 'lEg;': '\u2a8b', 'leg;': '\u22da', 'leq;': '\u2264', 'leqq;': '\u2266', 'leqslant;': '\u2a7d', 'les;': '\u2a7d', 'lescc;': '\u2aa8', 'lesdot;': '\u2a7f', 'lesdoto;': '\u2a81', 'lesdotor;': '\u2a83', 'lesg;': '\u22da\ufe00', 'lesges;': '\u2a93', 'lessapprox;': '\u2a85', 'lessdot;': '\u22d6', 'lesseqgtr;': '\u22da', 'lesseqqgtr;': '\u2a8b', 'LessEqualGreater;': '\u22da', 'LessFullEqual;': '\u2266', 'LessGreater;': '\u2276', 'lessgtr;': '\u2276', 'LessLess;': '\u2aa1', 'lesssim;': '\u2272', 'LessSlantEqual;': '\u2a7d', 'LessTilde;': '\u2272', 'lfisht;': '\u297c', 'lfloor;': '\u230a', 'Lfr;': '\U0001d50f', 'lfr;': '\U0001d529', 'lg;': '\u2276', 'lgE;': '\u2a91', 'lHar;': '\u2962', 'lhard;': '\u21bd', 'lharu;': '\u21bc', 'lharul;': '\u296a', 'lhblk;': '\u2584', 'LJcy;': '\u0409', 'ljcy;': '\u0459', 'Ll;': '\u22d8', 'll;': '\u226a', 'llarr;': '\u21c7', 'llcorner;': '\u231e', 'Lleftarrow;': '\u21da', 'llhard;': '\u296b', 'lltri;': '\u25fa', 'Lmidot;': '\u013f', 'lmidot;': '\u0140', 'lmoust;': '\u23b0', 'lmoustache;': '\u23b0', 'lnap;': '\u2a89', 'lnapprox;': '\u2a89', 'lnE;': '\u2268', 'lne;': '\u2a87', 'lneq;': '\u2a87', 'lneqq;': '\u2268', 'lnsim;': '\u22e6', 'loang;': '\u27ec', 'loarr;': '\u21fd', 'lobrk;': '\u27e6', 'LongLeftArrow;': '\u27f5', 'Longleftarrow;': '\u27f8', 'longleftarrow;': '\u27f5', 'LongLeftRightArrow;': '\u27f7', 'Longleftrightarrow;': '\u27fa', 'longleftrightarrow;': '\u27f7', 'longmapsto;': '\u27fc', 'LongRightArrow;': '\u27f6', 'Longrightarrow;': '\u27f9', 'longrightarrow;': '\u27f6', 'looparrowleft;': '\u21ab', 'looparrowright;': '\u21ac', 'lopar;': '\u2985', 'Lopf;': '\U0001d543', 'lopf;': '\U0001d55d', 'loplus;': '\u2a2d', 'lotimes;': '\u2a34', 'lowast;': '\u2217', 'lowbar;': '_', 'LowerLeftArrow;': '\u2199', 'LowerRightArrow;': '\u2198', 'loz;': '\u25ca', 'lozenge;': '\u25ca', 'lozf;': '\u29eb', 'lpar;': '(', 'lparlt;': '\u2993', 'lrarr;': '\u21c6', 'lrcorner;': '\u231f', 'lrhar;': '\u21cb', 'lrhard;': '\u296d', 'lrm;': '\u200e', 'lrtri;': '\u22bf', 'lsaquo;': '\u2039', 'Lscr;': '\u2112', 'lscr;': '\U0001d4c1', 'Lsh;': '\u21b0', 'lsh;': '\u21b0', 'lsim;': '\u2272', 'lsime;': '\u2a8d', 'lsimg;': '\u2a8f', 'lsqb;': '[', 'lsquo;': '\u2018', 'lsquor;': '\u201a', 'Lstrok;': '\u0141', 'lstrok;': '\u0142', 'LT': '<', 'lt': '<', 'LT;': '<', 'Lt;': '\u226a', 'lt;': '<', 'ltcc;': '\u2aa6', 'ltcir;': '\u2a79', 'ltdot;': '\u22d6', 'lthree;': '\u22cb', 'ltimes;': '\u22c9', 'ltlarr;': '\u2976', 'ltquest;': '\u2a7b', 'ltri;': '\u25c3', 'ltrie;': '\u22b4', 'ltrif;': '\u25c2', 'ltrPar;': '\u2996', 'lurdshar;': '\u294a', 'luruhar;': '\u2966', 'lvertneqq;': '\u2268\ufe00', 'lvnE;': '\u2268\ufe00', 'macr': '\xaf', 'macr;': '\xaf', 'male;': '\u2642', 'malt;': '\u2720', 'maltese;': '\u2720', 'Map;': '\u2905', 'map;': '\u21a6', 'mapsto;': '\u21a6', 'mapstodown;': '\u21a7', 'mapstoleft;': '\u21a4', 'mapstoup;': '\u21a5', 'marker;': '\u25ae', 'mcomma;': '\u2a29', 'Mcy;': '\u041c', 'mcy;': '\u043c', 'mdash;': '\u2014', 'mDDot;': '\u223a', 'measuredangle;': '\u2221', 'MediumSpace;': '\u205f', 'Mellintrf;': '\u2133', 'Mfr;': '\U0001d510', 'mfr;': '\U0001d52a', 'mho;': '\u2127', 'micro': '\xb5', 'micro;': '\xb5', 'mid;': '\u2223', 'midast;': '*', 'midcir;': '\u2af0', 'middot': '\xb7', 'middot;': '\xb7', 'minus;': '\u2212', 'minusb;': '\u229f', 'minusd;': '\u2238', 'minusdu;': '\u2a2a', 'MinusPlus;': '\u2213', 'mlcp;': '\u2adb', 'mldr;': '\u2026', 'mnplus;': '\u2213', 'models;': '\u22a7', 'Mopf;': '\U0001d544', 'mopf;': '\U0001d55e', 'mp;': '\u2213', 'Mscr;': '\u2133', 'mscr;': '\U0001d4c2', 'mstpos;': '\u223e', 'Mu;': '\u039c', 'mu;': '\u03bc', 'multimap;': '\u22b8', 'mumap;': '\u22b8', 'nabla;': '\u2207', 'Nacute;': '\u0143', 'nacute;': '\u0144', 'nang;': '\u2220\u20d2', 'nap;': '\u2249', 'napE;': '\u2a70\u0338', 'napid;': '\u224b\u0338', 'napos;': '\u0149', 'napprox;': '\u2249', 'natur;': '\u266e', 'natural;': '\u266e', 'naturals;': '\u2115', 'nbsp': '\xa0', 'nbsp;': '\xa0', 'nbump;': '\u224e\u0338', 'nbumpe;': '\u224f\u0338', 'ncap;': '\u2a43', 'Ncaron;': '\u0147', 'ncaron;': '\u0148', 'Ncedil;': '\u0145', 'ncedil;': '\u0146', 'ncong;': '\u2247', 'ncongdot;': '\u2a6d\u0338', 'ncup;': '\u2a42', 'Ncy;': '\u041d', 'ncy;': '\u043d', 'ndash;': '\u2013', 'ne;': '\u2260', 'nearhk;': '\u2924', 'neArr;': '\u21d7', 'nearr;': '\u2197', 'nearrow;': '\u2197', 'nedot;': '\u2250\u0338', 'NegativeMediumSpace;': '\u200b', 'NegativeThickSpace;': '\u200b', 'NegativeThinSpace;': '\u200b', 'NegativeVeryThinSpace;': '\u200b', 'nequiv;': '\u2262', 'nesear;': '\u2928', 'nesim;': '\u2242\u0338', 'NestedGreaterGreater;': '\u226b', 'NestedLessLess;': '\u226a', 'NewLine;': '\n', 'nexist;': '\u2204', 'nexists;': '\u2204', 'Nfr;': '\U0001d511', 'nfr;': '\U0001d52b', 'ngE;': '\u2267\u0338', 'nge;': '\u2271', 'ngeq;': '\u2271', 'ngeqq;': '\u2267\u0338', 'ngeqslant;': '\u2a7e\u0338', 'nges;': '\u2a7e\u0338', 'nGg;': '\u22d9\u0338', 'ngsim;': '\u2275', 'nGt;': '\u226b\u20d2', 'ngt;': '\u226f', 'ngtr;': '\u226f', 'nGtv;': '\u226b\u0338', 'nhArr;': '\u21ce', 'nharr;': '\u21ae', 'nhpar;': '\u2af2', 'ni;': '\u220b', 'nis;': '\u22fc', 'nisd;': '\u22fa', 'niv;': '\u220b', 'NJcy;': '\u040a', 'njcy;': '\u045a', 'nlArr;': '\u21cd', 'nlarr;': '\u219a', 'nldr;': '\u2025', 'nlE;': '\u2266\u0338', 'nle;': '\u2270', 'nLeftarrow;': '\u21cd', 'nleftarrow;': '\u219a', 'nLeftrightarrow;': '\u21ce', 'nleftrightarrow;': '\u21ae', 'nleq;': '\u2270', 'nleqq;': '\u2266\u0338', 'nleqslant;': '\u2a7d\u0338', 'nles;': '\u2a7d\u0338', 'nless;': '\u226e', 'nLl;': '\u22d8\u0338', 'nlsim;': '\u2274', 'nLt;': '\u226a\u20d2', 'nlt;': '\u226e', 'nltri;': '\u22ea', 'nltrie;': '\u22ec', 'nLtv;': '\u226a\u0338', 'nmid;': '\u2224', 'NoBreak;': '\u2060', 'NonBreakingSpace;': '\xa0', 'Nopf;': '\u2115', 'nopf;': '\U0001d55f', 'not': '\xac', 'Not;': '\u2aec', 'not;': '\xac', 'NotCongruent;': '\u2262', 'NotCupCap;': '\u226d', 'NotDoubleVerticalBar;': '\u2226', 'NotElement;': '\u2209', 'NotEqual;': '\u2260', 'NotEqualTilde;': '\u2242\u0338', 'NotExists;': '\u2204', 'NotGreater;': '\u226f', 'NotGreaterEqual;': '\u2271', 'NotGreaterFullEqual;': '\u2267\u0338', 'NotGreaterGreater;': '\u226b\u0338', 'NotGreaterLess;': '\u2279', 'NotGreaterSlantEqual;': '\u2a7e\u0338', 'NotGreaterTilde;': '\u2275', 'NotHumpDownHump;': '\u224e\u0338', 'NotHumpEqual;': '\u224f\u0338', 'notin;': '\u2209', 'notindot;': '\u22f5\u0338', 'notinE;': '\u22f9\u0338', 'notinva;': '\u2209', 'notinvb;': '\u22f7', 'notinvc;': '\u22f6', 'NotLeftTriangle;': '\u22ea', 'NotLeftTriangleBar;': '\u29cf\u0338', 'NotLeftTriangleEqual;': '\u22ec', 'NotLess;': '\u226e', 'NotLessEqual;': '\u2270', 'NotLessGreater;': '\u2278', 'NotLessLess;': '\u226a\u0338', 'NotLessSlantEqual;': '\u2a7d\u0338', 'NotLessTilde;': '\u2274', 'NotNestedGreaterGreater;': '\u2aa2\u0338', 'NotNestedLessLess;': '\u2aa1\u0338', 'notni;': '\u220c', 'notniva;': '\u220c', 'notnivb;': '\u22fe', 'notnivc;': '\u22fd', 'NotPrecedes;': '\u2280', 'NotPrecedesEqual;': '\u2aaf\u0338', 'NotPrecedesSlantEqual;': '\u22e0', 'NotReverseElement;': '\u220c', 'NotRightTriangle;': '\u22eb', 'NotRightTriangleBar;': '\u29d0\u0338', 'NotRightTriangleEqual;': '\u22ed', 'NotSquareSubset;': '\u228f\u0338', 'NotSquareSubsetEqual;': '\u22e2', 'NotSquareSuperset;': '\u2290\u0338', 'NotSquareSupersetEqual;': '\u22e3', 'NotSubset;': '\u2282\u20d2', 'NotSubsetEqual;': '\u2288', 'NotSucceeds;': '\u2281', 'NotSucceedsEqual;': '\u2ab0\u0338', 'NotSucceedsSlantEqual;': '\u22e1', 'NotSucceedsTilde;': '\u227f\u0338', 'NotSuperset;': '\u2283\u20d2', 'NotSupersetEqual;': '\u2289', 'NotTilde;': '\u2241', 'NotTildeEqual;': '\u2244', 'NotTildeFullEqual;': '\u2247', 'NotTildeTilde;': '\u2249', 'NotVerticalBar;': '\u2224', 'npar;': '\u2226', 'nparallel;': '\u2226', 'nparsl;': '\u2afd\u20e5', 'npart;': '\u2202\u0338', 'npolint;': '\u2a14', 'npr;': '\u2280', 'nprcue;': '\u22e0', 'npre;': '\u2aaf\u0338', 'nprec;': '\u2280', 'npreceq;': '\u2aaf\u0338', 'nrArr;': '\u21cf', 'nrarr;': '\u219b', 'nrarrc;': '\u2933\u0338', 'nrarrw;': '\u219d\u0338', 'nRightarrow;': '\u21cf', 'nrightarrow;': '\u219b', 'nrtri;': '\u22eb', 'nrtrie;': '\u22ed', 'nsc;': '\u2281', 'nsccue;': '\u22e1', 'nsce;': '\u2ab0\u0338', 'Nscr;': '\U0001d4a9', 'nscr;': '\U0001d4c3', 'nshortmid;': '\u2224', 'nshortparallel;': '\u2226', 'nsim;': '\u2241', 'nsime;': '\u2244', 'nsimeq;': '\u2244', 'nsmid;': '\u2224', 'nspar;': '\u2226', 'nsqsube;': '\u22e2', 'nsqsupe;': '\u22e3', 'nsub;': '\u2284', 'nsubE;': '\u2ac5\u0338', 'nsube;': '\u2288', 'nsubset;': '\u2282\u20d2', 'nsubseteq;': '\u2288', 'nsubseteqq;': '\u2ac5\u0338', 'nsucc;': '\u2281', 'nsucceq;': '\u2ab0\u0338', 'nsup;': '\u2285', 'nsupE;': '\u2ac6\u0338', 'nsupe;': '\u2289', 'nsupset;': '\u2283\u20d2', 'nsupseteq;': '\u2289', 'nsupseteqq;': '\u2ac6\u0338', 'ntgl;': '\u2279', 'Ntilde': '\xd1', 'ntilde': '\xf1', 'Ntilde;': '\xd1', 'ntilde;': '\xf1', 'ntlg;': '\u2278', 'ntriangleleft;': '\u22ea', 'ntrianglelefteq;': '\u22ec', 'ntriangleright;': '\u22eb', 'ntrianglerighteq;': '\u22ed', 'Nu;': '\u039d', 'nu;': '\u03bd', 'num;': '#', 'numero;': '\u2116', 'numsp;': '\u2007', 'nvap;': '\u224d\u20d2', 'nVDash;': '\u22af', 'nVdash;': '\u22ae', 'nvDash;': '\u22ad', 'nvdash;': '\u22ac', 'nvge;': '\u2265\u20d2', 'nvgt;': '>\u20d2', 'nvHarr;': '\u2904', 'nvinfin;': '\u29de', 'nvlArr;': '\u2902', 'nvle;': '\u2264\u20d2', 'nvlt;': '<\u20d2', 'nvltrie;': '\u22b4\u20d2', 'nvrArr;': '\u2903', 'nvrtrie;': '\u22b5\u20d2', 'nvsim;': '\u223c\u20d2', 'nwarhk;': '\u2923', 'nwArr;': '\u21d6', 'nwarr;': '\u2196', 'nwarrow;': '\u2196', 'nwnear;': '\u2927', 'Oacute': '\xd3', 'oacute': '\xf3', 'Oacute;': '\xd3', 'oacute;': '\xf3', 'oast;': '\u229b', 'ocir;': '\u229a', 'Ocirc': '\xd4', 'ocirc': '\xf4', 'Ocirc;': '\xd4', 'ocirc;': '\xf4', 'Ocy;': '\u041e', 'ocy;': '\u043e', 'odash;': '\u229d', 'Odblac;': '\u0150', 'odblac;': '\u0151', 'odiv;': '\u2a38', 'odot;': '\u2299', 'odsold;': '\u29bc', 'OElig;': '\u0152', 'oelig;': '\u0153', 'ofcir;': '\u29bf', 'Ofr;': '\U0001d512', 'ofr;': '\U0001d52c', 'ogon;': '\u02db', 'Ograve': '\xd2', 'ograve': '\xf2', 'Ograve;': '\xd2', 'ograve;': '\xf2', 'ogt;': '\u29c1', 'ohbar;': '\u29b5', 'ohm;': '\u03a9', 'oint;': '\u222e', 'olarr;': '\u21ba', 'olcir;': '\u29be', 'olcross;': '\u29bb', 'oline;': '\u203e', 'olt;': '\u29c0', 'Omacr;': '\u014c', 'omacr;': '\u014d', 'Omega;': '\u03a9', 'omega;': '\u03c9', 'Omicron;': '\u039f', 'omicron;': '\u03bf', 'omid;': '\u29b6', 'ominus;': '\u2296', 'Oopf;': '\U0001d546', 'oopf;': '\U0001d560', 'opar;': '\u29b7', 'OpenCurlyDoubleQuote;': '\u201c', 'OpenCurlyQuote;': '\u2018', 'operp;': '\u29b9', 'oplus;': '\u2295', 'Or;': '\u2a54', 'or;': '\u2228', 'orarr;': '\u21bb', 'ord;': '\u2a5d', 'order;': '\u2134', 'orderof;': '\u2134', 'ordf': '\xaa', 'ordf;': '\xaa', 'ordm': '\xba', 'ordm;': '\xba', 'origof;': '\u22b6', 'oror;': '\u2a56', 'orslope;': '\u2a57', 'orv;': '\u2a5b', 'oS;': '\u24c8', 'Oscr;': '\U0001d4aa', 'oscr;': '\u2134', 'Oslash': '\xd8', 'oslash': '\xf8', 'Oslash;': '\xd8', 'oslash;': '\xf8', 'osol;': '\u2298', 'Otilde': '\xd5', 'otilde': '\xf5', 'Otilde;': '\xd5', 'otilde;': '\xf5', 'Otimes;': '\u2a37', 'otimes;': '\u2297', 'otimesas;': '\u2a36', 'Ouml': '\xd6', 'ouml': '\xf6', 'Ouml;': '\xd6', 'ouml;': '\xf6', 'ovbar;': '\u233d', 'OverBar;': '\u203e', 'OverBrace;': '\u23de', 'OverBracket;': '\u23b4', 'OverParenthesis;': '\u23dc', 'par;': '\u2225', 'para': '\xb6', 'para;': '\xb6', 'parallel;': '\u2225', 'parsim;': '\u2af3', 'parsl;': '\u2afd', 'part;': '\u2202', 'PartialD;': '\u2202', 'Pcy;': '\u041f', 'pcy;': '\u043f', 'percnt;': '%', 'period;': '.', 'permil;': '\u2030', 'perp;': '\u22a5', 'pertenk;': '\u2031', 'Pfr;': '\U0001d513', 'pfr;': '\U0001d52d', 'Phi;': '\u03a6', 'phi;': '\u03c6', 'phiv;': '\u03d5', 'phmmat;': '\u2133', 'phone;': '\u260e', 'Pi;': '\u03a0', 'pi;': '\u03c0', 'pitchfork;': '\u22d4', 'piv;': '\u03d6', 'planck;': '\u210f', 'planckh;': '\u210e', 'plankv;': '\u210f', 'plus;': '+', 'plusacir;': '\u2a23', 'plusb;': '\u229e', 'pluscir;': '\u2a22', 'plusdo;': '\u2214', 'plusdu;': '\u2a25', 'pluse;': '\u2a72', 'PlusMinus;': '\xb1', 'plusmn': '\xb1', 'plusmn;': '\xb1', 'plussim;': '\u2a26', 'plustwo;': '\u2a27', 'pm;': '\xb1', 'Poincareplane;': '\u210c', 'pointint;': '\u2a15', 'Popf;': '\u2119', 'popf;': '\U0001d561', 'pound': '\xa3', 'pound;': '\xa3', 'Pr;': '\u2abb', 'pr;': '\u227a', 'prap;': '\u2ab7', 'prcue;': '\u227c', 'prE;': '\u2ab3', 'pre;': '\u2aaf', 'prec;': '\u227a', 'precapprox;': '\u2ab7', 'preccurlyeq;': '\u227c', 'Precedes;': '\u227a', 'PrecedesEqual;': '\u2aaf', 'PrecedesSlantEqual;': '\u227c', 'PrecedesTilde;': '\u227e', 'preceq;': '\u2aaf', 'precnapprox;': '\u2ab9', 'precneqq;': '\u2ab5', 'precnsim;': '\u22e8', 'precsim;': '\u227e', 'Prime;': '\u2033', 'prime;': '\u2032', 'primes;': '\u2119', 'prnap;': '\u2ab9', 'prnE;': '\u2ab5', 'prnsim;': '\u22e8', 'prod;': '\u220f', 'Product;': '\u220f', 'profalar;': '\u232e', 'profline;': '\u2312', 'profsurf;': '\u2313', 'prop;': '\u221d', 'Proportion;': '\u2237', 'Proportional;': '\u221d', 'propto;': '\u221d', 'prsim;': '\u227e', 'prurel;': '\u22b0', 'Pscr;': '\U0001d4ab', 'pscr;': '\U0001d4c5', 'Psi;': '\u03a8', 'psi;': '\u03c8', 'puncsp;': '\u2008', 'Qfr;': '\U0001d514', 'qfr;': '\U0001d52e', 'qint;': '\u2a0c', 'Qopf;': '\u211a', 'qopf;': '\U0001d562', 'qprime;': '\u2057', 'Qscr;': '\U0001d4ac', 'qscr;': '\U0001d4c6', 'quaternions;': '\u210d', 'quatint;': '\u2a16', 'quest;': '?', 'questeq;': '\u225f', 'QUOT': '"', 'quot': '"', 'QUOT;': '"', 'quot;': '"', 'rAarr;': '\u21db', 'race;': '\u223d\u0331', 'Racute;': '\u0154', 'racute;': '\u0155', 'radic;': '\u221a', 'raemptyv;': '\u29b3', 'Rang;': '\u27eb', 'rang;': '\u27e9', 'rangd;': '\u2992', 'range;': '\u29a5', 'rangle;': '\u27e9', 'raquo': '\xbb', 'raquo;': '\xbb', 'Rarr;': '\u21a0', 'rArr;': '\u21d2', 'rarr;': '\u2192', 'rarrap;': '\u2975', 'rarrb;': '\u21e5', 'rarrbfs;': '\u2920', 'rarrc;': '\u2933', 'rarrfs;': '\u291e', 'rarrhk;': '\u21aa', 'rarrlp;': '\u21ac', 'rarrpl;': '\u2945', 'rarrsim;': '\u2974', 'Rarrtl;': '\u2916', 'rarrtl;': '\u21a3', 'rarrw;': '\u219d', 'rAtail;': '\u291c', 'ratail;': '\u291a', 'ratio;': '\u2236', 'rationals;': '\u211a', 'RBarr;': '\u2910', 'rBarr;': '\u290f', 'rbarr;': '\u290d', 'rbbrk;': '\u2773', 'rbrace;': '}', 'rbrack;': ']', 'rbrke;': '\u298c', 'rbrksld;': '\u298e', 'rbrkslu;': '\u2990', 'Rcaron;': '\u0158', 'rcaron;': '\u0159', 'Rcedil;': '\u0156', 'rcedil;': '\u0157', 'rceil;': '\u2309', 'rcub;': '}', 'Rcy;': '\u0420', 'rcy;': '\u0440', 'rdca;': '\u2937', 'rdldhar;': '\u2969', 'rdquo;': '\u201d', 'rdquor;': '\u201d', 'rdsh;': '\u21b3', 'Re;': '\u211c', 'real;': '\u211c', 'realine;': '\u211b', 'realpart;': '\u211c', 'reals;': '\u211d', 'rect;': '\u25ad', 'REG': '\xae', 'reg': '\xae', 'REG;': '\xae', 'reg;': '\xae', 'ReverseElement;': '\u220b', 'ReverseEquilibrium;': '\u21cb', 'ReverseUpEquilibrium;': '\u296f', 'rfisht;': '\u297d', 'rfloor;': '\u230b', 'Rfr;': '\u211c', 'rfr;': '\U0001d52f', 'rHar;': '\u2964', 'rhard;': '\u21c1', 'rharu;': '\u21c0', 'rharul;': '\u296c', 'Rho;': '\u03a1', 'rho;': '\u03c1', 'rhov;': '\u03f1', 'RightAngleBracket;': '\u27e9', 'RightArrow;': '\u2192', 'Rightarrow;': '\u21d2', 'rightarrow;': '\u2192', 'RightArrowBar;': '\u21e5', 'RightArrowLeftArrow;': '\u21c4', 'rightarrowtail;': '\u21a3', 'RightCeiling;': '\u2309', 'RightDoubleBracket;': '\u27e7', 'RightDownTeeVector;': '\u295d', 'RightDownVector;': '\u21c2', 'RightDownVectorBar;': '\u2955', 'RightFloor;': '\u230b', 'rightharpoondown;': '\u21c1', 'rightharpoonup;': '\u21c0', 'rightleftarrows;': '\u21c4', 'rightleftharpoons;': '\u21cc', 'rightrightarrows;': '\u21c9', 'rightsquigarrow;': '\u219d', 'RightTee;': '\u22a2', 'RightTeeArrow;': '\u21a6', 'RightTeeVector;': '\u295b', 'rightthreetimes;': '\u22cc', 'RightTriangle;': '\u22b3', 'RightTriangleBar;': '\u29d0', 'RightTriangleEqual;': '\u22b5', 'RightUpDownVector;': '\u294f', 'RightUpTeeVector;': '\u295c', 'RightUpVector;': '\u21be', 'RightUpVectorBar;': '\u2954', 'RightVector;': '\u21c0', 'RightVectorBar;': '\u2953', 'ring;': '\u02da', 'risingdotseq;': '\u2253', 'rlarr;': '\u21c4', 'rlhar;': '\u21cc', 'rlm;': '\u200f', 'rmoust;': '\u23b1', 'rmoustache;': '\u23b1', 'rnmid;': '\u2aee', 'roang;': '\u27ed', 'roarr;': '\u21fe', 'robrk;': '\u27e7', 'ropar;': '\u2986', 'Ropf;': '\u211d', 'ropf;': '\U0001d563', 'roplus;': '\u2a2e', 'rotimes;': '\u2a35', 'RoundImplies;': '\u2970', 'rpar;': ')', 'rpargt;': '\u2994', 'rppolint;': '\u2a12', 'rrarr;': '\u21c9', 'Rrightarrow;': '\u21db', 'rsaquo;': '\u203a', 'Rscr;': '\u211b', 'rscr;': '\U0001d4c7', 'Rsh;': '\u21b1', 'rsh;': '\u21b1', 'rsqb;': ']', 'rsquo;': '\u2019', 'rsquor;': '\u2019', 'rthree;': '\u22cc', 'rtimes;': '\u22ca', 'rtri;': '\u25b9', 'rtrie;': '\u22b5', 'rtrif;': '\u25b8', 'rtriltri;': '\u29ce', 'RuleDelayed;': '\u29f4', 'ruluhar;': '\u2968', 'rx;': '\u211e', 'Sacute;': '\u015a', 'sacute;': '\u015b', 'sbquo;': '\u201a', 'Sc;': '\u2abc', 'sc;': '\u227b', 'scap;': '\u2ab8', 'Scaron;': '\u0160', 'scaron;': '\u0161', 'sccue;': '\u227d', 'scE;': '\u2ab4', 'sce;': '\u2ab0', 'Scedil;': '\u015e', 'scedil;': '\u015f', 'Scirc;': '\u015c', 'scirc;': '\u015d', 'scnap;': '\u2aba', 'scnE;': '\u2ab6', 'scnsim;': '\u22e9', 'scpolint;': '\u2a13', 'scsim;': '\u227f', 'Scy;': '\u0421', 'scy;': '\u0441', 'sdot;': '\u22c5', 'sdotb;': '\u22a1', 'sdote;': '\u2a66', 'searhk;': '\u2925', 'seArr;': '\u21d8', 'searr;': '\u2198', 'searrow;': '\u2198', 'sect': '\xa7', 'sect;': '\xa7', 'semi;': ';', 'seswar;': '\u2929', 'setminus;': '\u2216', 'setmn;': '\u2216', 'sext;': '\u2736', 'Sfr;': '\U0001d516', 'sfr;': '\U0001d530', 'sfrown;': '\u2322', 'sharp;': '\u266f', 'SHCHcy;': '\u0429', 'shchcy;': '\u0449', 'SHcy;': '\u0428', 'shcy;': '\u0448', 'ShortDownArrow;': '\u2193', 'ShortLeftArrow;': '\u2190', 'shortmid;': '\u2223', 'shortparallel;': '\u2225', 'ShortRightArrow;': '\u2192', 'ShortUpArrow;': '\u2191', 'shy': '\xad', 'shy;': '\xad', 'Sigma;': '\u03a3', 'sigma;': '\u03c3', 'sigmaf;': '\u03c2', 'sigmav;': '\u03c2', 'sim;': '\u223c', 'simdot;': '\u2a6a', 'sime;': '\u2243', 'simeq;': '\u2243', 'simg;': '\u2a9e', 'simgE;': '\u2aa0', 'siml;': '\u2a9d', 'simlE;': '\u2a9f', 'simne;': '\u2246', 'simplus;': '\u2a24', 'simrarr;': '\u2972', 'slarr;': '\u2190', 'SmallCircle;': '\u2218', 'smallsetminus;': '\u2216', 'smashp;': '\u2a33', 'smeparsl;': '\u29e4', 'smid;': '\u2223', 'smile;': '\u2323', 'smt;': '\u2aaa', 'smte;': '\u2aac', 'smtes;': '\u2aac\ufe00', 'SOFTcy;': '\u042c', 'softcy;': '\u044c', 'sol;': '/', 'solb;': '\u29c4', 'solbar;': '\u233f', 'Sopf;': '\U0001d54a', 'sopf;': '\U0001d564', 'spades;': '\u2660', 'spadesuit;': '\u2660', 'spar;': '\u2225', 'sqcap;': '\u2293', 'sqcaps;': '\u2293\ufe00', 'sqcup;': '\u2294', 'sqcups;': '\u2294\ufe00', 'Sqrt;': '\u221a', 'sqsub;': '\u228f', 'sqsube;': '\u2291', 'sqsubset;': '\u228f', 'sqsubseteq;': '\u2291', 'sqsup;': '\u2290', 'sqsupe;': '\u2292', 'sqsupset;': '\u2290', 'sqsupseteq;': '\u2292', 'squ;': '\u25a1', 'Square;': '\u25a1', 'square;': '\u25a1', 'SquareIntersection;': '\u2293', 'SquareSubset;': '\u228f', 'SquareSubsetEqual;': '\u2291', 'SquareSuperset;': '\u2290', 'SquareSupersetEqual;': '\u2292', 'SquareUnion;': '\u2294', 'squarf;': '\u25aa', 'squf;': '\u25aa', 'srarr;': '\u2192', 'Sscr;': '\U0001d4ae', 'sscr;': '\U0001d4c8', 'ssetmn;': '\u2216', 'ssmile;': '\u2323', 'sstarf;': '\u22c6', 'Star;': '\u22c6', 'star;': '\u2606', 'starf;': '\u2605', 'straightepsilon;': '\u03f5', 'straightphi;': '\u03d5', 'strns;': '\xaf', 'Sub;': '\u22d0', 'sub;': '\u2282', 'subdot;': '\u2abd', 'subE;': '\u2ac5', 'sube;': '\u2286', 'subedot;': '\u2ac3', 'submult;': '\u2ac1', 'subnE;': '\u2acb', 'subne;': '\u228a', 'subplus;': '\u2abf', 'subrarr;': '\u2979', 'Subset;': '\u22d0', 'subset;': '\u2282', 'subseteq;': '\u2286', 'subseteqq;': '\u2ac5', 'SubsetEqual;': '\u2286', 'subsetneq;': '\u228a', 'subsetneqq;': '\u2acb', 'subsim;': '\u2ac7', 'subsub;': '\u2ad5', 'subsup;': '\u2ad3', 'succ;': '\u227b', 'succapprox;': '\u2ab8', 'succcurlyeq;': '\u227d', 'Succeeds;': '\u227b', 'SucceedsEqual;': '\u2ab0', 'SucceedsSlantEqual;': '\u227d', 'SucceedsTilde;': '\u227f', 'succeq;': '\u2ab0', 'succnapprox;': '\u2aba', 'succneqq;': '\u2ab6', 'succnsim;': '\u22e9', 'succsim;': '\u227f', 'SuchThat;': '\u220b', 'Sum;': '\u2211', 'sum;': '\u2211', 'sung;': '\u266a', 'sup1': '\xb9', 'sup1;': '\xb9', 'sup2': '\xb2', 'sup2;': '\xb2', 'sup3': '\xb3', 'sup3;': '\xb3', 'Sup;': '\u22d1', 'sup;': '\u2283', 'supdot;': '\u2abe', 'supdsub;': '\u2ad8', 'supE;': '\u2ac6', 'supe;': '\u2287', 'supedot;': '\u2ac4', 'Superset;': '\u2283', 'SupersetEqual;': '\u2287', 'suphsol;': '\u27c9', 'suphsub;': '\u2ad7', 'suplarr;': '\u297b', 'supmult;': '\u2ac2', 'supnE;': '\u2acc', 'supne;': '\u228b', 'supplus;': '\u2ac0', 'Supset;': '\u22d1', 'supset;': '\u2283', 'supseteq;': '\u2287', 'supseteqq;': '\u2ac6', 'supsetneq;': '\u228b', 'supsetneqq;': '\u2acc', 'supsim;': '\u2ac8', 'supsub;': '\u2ad4', 'supsup;': '\u2ad6', 'swarhk;': '\u2926', 'swArr;': '\u21d9', 'swarr;': '\u2199', 'swarrow;': '\u2199', 'swnwar;': '\u292a', 'szlig': '\xdf', 'szlig;': '\xdf', 'Tab;': '\t', 'target;': '\u2316', 'Tau;': '\u03a4', 'tau;': '\u03c4', 'tbrk;': '\u23b4', 'Tcaron;': '\u0164', 'tcaron;': '\u0165', 'Tcedil;': '\u0162', 'tcedil;': '\u0163', 'Tcy;': '\u0422', 'tcy;': '\u0442', 'tdot;': '\u20db', 'telrec;': '\u2315', 'Tfr;': '\U0001d517', 'tfr;': '\U0001d531', 'there4;': '\u2234', 'Therefore;': '\u2234', 'therefore;': '\u2234', 'Theta;': '\u0398', 'theta;': '\u03b8', 'thetasym;': '\u03d1', 'thetav;': '\u03d1', 'thickapprox;': '\u2248', 'thicksim;': '\u223c', 'ThickSpace;': '\u205f\u200a', 'thinsp;': '\u2009', 'ThinSpace;': '\u2009', 'thkap;': '\u2248', 'thksim;': '\u223c', 'THORN': '\xde', 'thorn': '\xfe', 'THORN;': '\xde', 'thorn;': '\xfe', 'Tilde;': '\u223c', 'tilde;': '\u02dc', 'TildeEqual;': '\u2243', 'TildeFullEqual;': '\u2245', 'TildeTilde;': '\u2248', 'times': '\xd7', 'times;': '\xd7', 'timesb;': '\u22a0', 'timesbar;': '\u2a31', 'timesd;': '\u2a30', 'tint;': '\u222d', 'toea;': '\u2928', 'top;': '\u22a4', 'topbot;': '\u2336', 'topcir;': '\u2af1', 'Topf;': '\U0001d54b', 'topf;': '\U0001d565', 'topfork;': '\u2ada', 'tosa;': '\u2929', 'tprime;': '\u2034', 'TRADE;': '\u2122', 'trade;': '\u2122', 'triangle;': '\u25b5', 'triangledown;': '\u25bf', 'triangleleft;': '\u25c3', 'trianglelefteq;': '\u22b4', 'triangleq;': '\u225c', 'triangleright;': '\u25b9', 'trianglerighteq;': '\u22b5', 'tridot;': '\u25ec', 'trie;': '\u225c', 'triminus;': '\u2a3a', 'TripleDot;': '\u20db', 'triplus;': '\u2a39', 'trisb;': '\u29cd', 'tritime;': '\u2a3b', 'trpezium;': '\u23e2', 'Tscr;': '\U0001d4af', 'tscr;': '\U0001d4c9', 'TScy;': '\u0426', 'tscy;': '\u0446', 'TSHcy;': '\u040b', 'tshcy;': '\u045b', 'Tstrok;': '\u0166', 'tstrok;': '\u0167', 'twixt;': '\u226c', 'twoheadleftarrow;': '\u219e', 'twoheadrightarrow;': '\u21a0', 'Uacute': '\xda', 'uacute': '\xfa', 'Uacute;': '\xda', 'uacute;': '\xfa', 'Uarr;': '\u219f', 'uArr;': '\u21d1', 'uarr;': '\u2191', 'Uarrocir;': '\u2949', 'Ubrcy;': '\u040e', 'ubrcy;': '\u045e', 'Ubreve;': '\u016c', 'ubreve;': '\u016d', 'Ucirc': '\xdb', 'ucirc': '\xfb', 'Ucirc;': '\xdb', 'ucirc;': '\xfb', 'Ucy;': '\u0423', 'ucy;': '\u0443', 'udarr;': '\u21c5', 'Udblac;': '\u0170', 'udblac;': '\u0171', 'udhar;': '\u296e', 'ufisht;': '\u297e', 'Ufr;': '\U0001d518', 'ufr;': '\U0001d532', 'Ugrave': '\xd9', 'ugrave': '\xf9', 'Ugrave;': '\xd9', 'ugrave;': '\xf9', 'uHar;': '\u2963', 'uharl;': '\u21bf', 'uharr;': '\u21be', 'uhblk;': '\u2580', 'ulcorn;': '\u231c', 'ulcorner;': '\u231c', 'ulcrop;': '\u230f', 'ultri;': '\u25f8', 'Umacr;': '\u016a', 'umacr;': '\u016b', 'uml': '\xa8', 'uml;': '\xa8', 'UnderBar;': '_', 'UnderBrace;': '\u23df', 'UnderBracket;': '\u23b5', 'UnderParenthesis;': '\u23dd', 'Union;': '\u22c3', 'UnionPlus;': '\u228e', 'Uogon;': '\u0172', 'uogon;': '\u0173', 'Uopf;': '\U0001d54c', 'uopf;': '\U0001d566', 'UpArrow;': '\u2191', 'Uparrow;': '\u21d1', 'uparrow;': '\u2191', 'UpArrowBar;': '\u2912', 'UpArrowDownArrow;': '\u21c5', 'UpDownArrow;': '\u2195', 'Updownarrow;': '\u21d5', 'updownarrow;': '\u2195', 'UpEquilibrium;': '\u296e', 'upharpoonleft;': '\u21bf', 'upharpoonright;': '\u21be', 'uplus;': '\u228e', 'UpperLeftArrow;': '\u2196', 'UpperRightArrow;': '\u2197', 'Upsi;': '\u03d2', 'upsi;': '\u03c5', 'upsih;': '\u03d2', 'Upsilon;': '\u03a5', 'upsilon;': '\u03c5', 'UpTee;': '\u22a5', 'UpTeeArrow;': '\u21a5', 'upuparrows;': '\u21c8', 'urcorn;': '\u231d', 'urcorner;': '\u231d', 'urcrop;': '\u230e', 'Uring;': '\u016e', 'uring;': '\u016f', 'urtri;': '\u25f9', 'Uscr;': '\U0001d4b0', 'uscr;': '\U0001d4ca', 'utdot;': '\u22f0', 'Utilde;': '\u0168', 'utilde;': '\u0169', 'utri;': '\u25b5', 'utrif;': '\u25b4', 'uuarr;': '\u21c8', 'Uuml': '\xdc', 'uuml': '\xfc', 'Uuml;': '\xdc', 'uuml;': '\xfc', 'uwangle;': '\u29a7', 'vangrt;': '\u299c', 'varepsilon;': '\u03f5', 'varkappa;': '\u03f0', 'varnothing;': '\u2205', 'varphi;': '\u03d5', 'varpi;': '\u03d6', 'varpropto;': '\u221d', 'vArr;': '\u21d5', 'varr;': '\u2195', 'varrho;': '\u03f1', 'varsigma;': '\u03c2', 'varsubsetneq;': '\u228a\ufe00', 'varsubsetneqq;': '\u2acb\ufe00', 'varsupsetneq;': '\u228b\ufe00', 'varsupsetneqq;': '\u2acc\ufe00', 'vartheta;': '\u03d1', 'vartriangleleft;': '\u22b2', 'vartriangleright;': '\u22b3', 'Vbar;': '\u2aeb', 'vBar;': '\u2ae8', 'vBarv;': '\u2ae9', 'Vcy;': '\u0412', 'vcy;': '\u0432', 'VDash;': '\u22ab', 'Vdash;': '\u22a9', 'vDash;': '\u22a8', 'vdash;': '\u22a2', 'Vdashl;': '\u2ae6', 'Vee;': '\u22c1', 'vee;': '\u2228', 'veebar;': '\u22bb', 'veeeq;': '\u225a', 'vellip;': '\u22ee', 'Verbar;': '\u2016', 'verbar;': '|', 'Vert;': '\u2016', 'vert;': '|', 'VerticalBar;': '\u2223', 'VerticalLine;': '|', 'VerticalSeparator;': '\u2758', 'VerticalTilde;': '\u2240', 'VeryThinSpace;': '\u200a', 'Vfr;': '\U0001d519', 'vfr;': '\U0001d533', 'vltri;': '\u22b2', 'vnsub;': '\u2282\u20d2', 'vnsup;': '\u2283\u20d2', 'Vopf;': '\U0001d54d', 'vopf;': '\U0001d567', 'vprop;': '\u221d', 'vrtri;': '\u22b3', 'Vscr;': '\U0001d4b1', 'vscr;': '\U0001d4cb', 'vsubnE;': '\u2acb\ufe00', 'vsubne;': '\u228a\ufe00', 'vsupnE;': '\u2acc\ufe00', 'vsupne;': '\u228b\ufe00', 'Vvdash;': '\u22aa', 'vzigzag;': '\u299a', 'Wcirc;': '\u0174', 'wcirc;': '\u0175', 'wedbar;': '\u2a5f', 'Wedge;': '\u22c0', 'wedge;': '\u2227', 'wedgeq;': '\u2259', 'weierp;': '\u2118', 'Wfr;': '\U0001d51a', 'wfr;': '\U0001d534', 'Wopf;': '\U0001d54e', 'wopf;': '\U0001d568', 'wp;': '\u2118', 'wr;': '\u2240', 'wreath;': '\u2240', 'Wscr;': '\U0001d4b2', 'wscr;': '\U0001d4cc', 'xcap;': '\u22c2', 'xcirc;': '\u25ef', 'xcup;': '\u22c3', 'xdtri;': '\u25bd', 'Xfr;': '\U0001d51b', 'xfr;': '\U0001d535', 'xhArr;': '\u27fa', 'xharr;': '\u27f7', 'Xi;': '\u039e', 'xi;': '\u03be', 'xlArr;': '\u27f8', 'xlarr;': '\u27f5', 'xmap;': '\u27fc', 'xnis;': '\u22fb', 'xodot;': '\u2a00', 'Xopf;': '\U0001d54f', 'xopf;': '\U0001d569', 'xoplus;': '\u2a01', 'xotime;': '\u2a02', 'xrArr;': '\u27f9', 'xrarr;': '\u27f6', 'Xscr;': '\U0001d4b3', 'xscr;': '\U0001d4cd', 'xsqcup;': '\u2a06', 'xuplus;': '\u2a04', 'xutri;': '\u25b3', 'xvee;': '\u22c1', 'xwedge;': '\u22c0', 'Yacute': '\xdd', 'yacute': '\xfd', 'Yacute;': '\xdd', 'yacute;': '\xfd', 'YAcy;': '\u042f', 'yacy;': '\u044f', 'Ycirc;': '\u0176', 'ycirc;': '\u0177', 'Ycy;': '\u042b', 'ycy;': '\u044b', 'yen': '\xa5', 'yen;': '\xa5', 'Yfr;': '\U0001d51c', 'yfr;': '\U0001d536', 'YIcy;': '\u0407', 'yicy;': '\u0457', 'Yopf;': '\U0001d550', 'yopf;': '\U0001d56a', 'Yscr;': '\U0001d4b4', 'yscr;': '\U0001d4ce', 'YUcy;': '\u042e', 'yucy;': '\u044e', 'yuml': '\xff', 'Yuml;': '\u0178', 'yuml;': '\xff', 'Zacute;': '\u0179', 'zacute;': '\u017a', 'Zcaron;': '\u017d', 'zcaron;': '\u017e', 'Zcy;': '\u0417', 'zcy;': '\u0437', 'Zdot;': '\u017b', 'zdot;': '\u017c', 'zeetrf;': '\u2128', 'ZeroWidthSpace;': '\u200b', 'Zeta;': '\u0396', 'zeta;': '\u03b6', 'Zfr;': '\u2128', 'zfr;': '\U0001d537', 'ZHcy;': '\u0416', 'zhcy;': '\u0436', 'zigrarr;': '\u21dd', 'Zopf;': '\u2124', 'zopf;': '\U0001d56b', 'Zscr;': '\U0001d4b5', 'zscr;': '\U0001d4cf', 'zwj;': '\u200d', 'zwnj;': '\u200c', } # maps the Unicode code point to the HTML entity name codepoint2name = {} # maps the HTML entity name to the character # (or a character reference if the character is outside the Latin-1 range) entitydefs = {} for (name, codepoint) in name2codepoint.items(): codepoint2name[codepoint] = name entitydefs[name] = chr(codepoint) del name, codepoint
bsd-3-clause
7c66396250826830c0e8794fa95a9899
29.033068
110
0.501824
2.479704
false
false
false
false
brython-dev/brython
www/tests/dom.py
1
3367
from browser import window, document, html, svg assert window.empty_list() == [] assert window.list1() == [1, 2, 'a', ['b']] assert window.jsobj().to_dict() == {'a':1} c = window.subscriptable('abracadabra') assert len(c) == 11 assert c[2] == 'r' Foo = window.get_constructor().new assert Foo().foo == 'hi' # test dynamic constructor creation Constructor = window.base_class.extend().new assert Constructor().name == 'base' assert Constructor().extra == 'extra' class A(html.DIV): def __init__(self, mybool): self.mybool = mybool x = A(True) assert x.mybool is True x.mybool = False assert x.mybool is False y = A(False) assert y.mybool == False # test setting a callback function f_called = False def f(*args, **kwargs): global f_called f_called = True element = document.getElementById('dom-test-element-id') # test passing an additional argument after the callback f element.addEventListener('click', f, True) element.click() assert f_called # issue 829 # HTML attributes are case-insensitive class A(html.DIV): def __init__(self): self.uV = 5 self.f = 0.5 self.Xyz = "mystring" self.zd = {"a": 3} self.long_int = 18446744073709552000 p = A() assert not hasattr(p, "XYZ") assert p.Xyz == "mystring" assert p.uV == 5 assert p.f == 0.5 assert not hasattr(p, "uv") assert p.zd == {"a": 3} assert p.long_int == 18446744073709552000, p.long_int # SVG attributes are case-sensitive class B(svg.circle): def __init__(self): self.svg_uV = 6 self.Abc = "anotherstring" q = B() assert q.svg_uV == 6 assert q.Abc == "anotherstring" try: print('q.abc', q.abc) raise Exception("should have raised AttributeError") except AttributeError: pass # issue 888 import javascript assert window.jsReturnsUndefined() is javascript.UNDEFINED # issue 1327 num = document['banner_row'].attrs.get('test', 10) assert num == 10 # issue 1384 class A(html.DIV): def __init__(self, s): self.myattr = s x = A("Andy") assert x.myattr == "Andy" del x.myattr assert not hasattr(x, "myattr") try: del x.attrs raise Exception("should have raised AttributeError") except AttributeError: pass class B(svg.line): def __init__(self, s): self.myattr = s y = B("Didier") assert y.myattr == "Didier" del y.myattr assert not hasattr(y, "myattr") # chained insertions html.P() <= html.B() <= html.I("coucou") # Brython-specific attributes document <= (s := html.SPAN(style=dict(position="absolute", height="10px"))) for attr in ['abs_left', 'abs_top', 'top', 'width', 'height', 'width', 'scrolled_left', 'scrolled_top']: assert isinstance(getattr(s, attr), int), getattr(s, attr) assert s.inside(document) # issue 1647 style={"background-color":"yellow", "display": "none"} d = html.DIV("Hello world", style=style, id="mydiv") document <= d assert dict(d.attrs.items()) == { 'style': 'background-color: yellow; display: none;', 'id': 'mydiv' } assert set(d.attrs) == {"style", "id"} assert set(d.attrs.keys()) == {"style", "id"} assert set(d.attrs.values()) == { 'background-color: yellow; display: none;', 'mydiv' } assert "id" in d.attrs # issue 2014 d.attrs["height"] = 4.5 assert d.attrs["height"] == "4.5", d.attrs["height"] # set function as attribute def func(): pass element.foo = func assert element.foo == func
bsd-3-clause
81f386f25e34c8f9e20f6e81fddd9e5b
22.227586
76
0.650431
3.027878
false
false
false
false
brython-dev/brython
www/src/Lib/test/test_sort.py
3
13917
from test import support import random import unittest from functools import cmp_to_key verbose = support.verbose nerrors = 0 def check(tag, expected, raw, compare=None): global nerrors if verbose: print(" checking", tag) orig = raw[:] # save input in case of error if compare: raw.sort(key=cmp_to_key(compare)) else: raw.sort() if len(expected) != len(raw): print("error in", tag) print("length mismatch;", len(expected), len(raw)) print(expected) print(orig) print(raw) nerrors += 1 return for i, good in enumerate(expected): maybe = raw[i] if good is not maybe: print("error in", tag) print("out of order at index", i, good, maybe) print(expected) print(orig) print(raw) nerrors += 1 return class TestBase(unittest.TestCase): def testStressfully(self): # Try a variety of sizes at and around powers of 2, and at powers of 10. sizes = [0] for power in range(1, 10): n = 2 ** power sizes.extend(range(n-1, n+2)) sizes.extend([10, 100, 1000]) class Complains(object): maybe_complain = True def __init__(self, i): self.i = i def __lt__(self, other): if Complains.maybe_complain and random.random() < 0.001: if verbose: print(" complaining at", self, other) raise RuntimeError return self.i < other.i def __repr__(self): return "Complains(%d)" % self.i class Stable(object): def __init__(self, key, i): self.key = key self.index = i def __lt__(self, other): return self.key < other.key def __repr__(self): return "Stable(%d, %d)" % (self.key, self.index) for n in sizes: x = list(range(n)) if verbose: print("Testing size", n) s = x[:] check("identity", x, s) s = x[:] s.reverse() check("reversed", x, s) s = x[:] random.shuffle(s) check("random permutation", x, s) y = x[:] y.reverse() s = x[:] check("reversed via function", y, s, lambda a, b: (b>a)-(b<a)) if verbose: print(" Checking against an insane comparison function.") print(" If the implementation isn't careful, this may segfault.") s = x[:] s.sort(key=cmp_to_key(lambda a, b: int(random.random() * 3) - 1)) check("an insane function left some permutation", x, s) if len(x) >= 2: def bad_key(x): raise RuntimeError s = x[:] self.assertRaises(RuntimeError, s.sort, key=bad_key) x = [Complains(i) for i in x] s = x[:] random.shuffle(s) Complains.maybe_complain = True it_complained = False try: s.sort() except RuntimeError: it_complained = True if it_complained: Complains.maybe_complain = False check("exception during sort left some permutation", x, s) s = [Stable(random.randrange(10), i) for i in range(n)] augmented = [(e, e.index) for e in s] augmented.sort() # forced stable because ties broken by index x = [e for e, i in augmented] # a stable sort of s check("stability", x, s) #============================================================================== class TestBugs(unittest.TestCase): def test_bug453523(self): # bug 453523 -- list.sort() crasher. # If this fails, the most likely outcome is a core dump. # Mutations during a list sort should raise a ValueError. class C: def __lt__(self, other): if L and random.random() < 0.75: L.pop() else: L.append(3) return random.random() < 0.5 L = [C() for i in range(50)] self.assertRaises(ValueError, L.sort) def test_undetected_mutation(self): # Python 2.4a1 did not always detect mutation memorywaster = [] for i in range(20): def mutating_cmp(x, y): L.append(3) L.pop() return (x > y) - (x < y) L = [1,2] self.assertRaises(ValueError, L.sort, key=cmp_to_key(mutating_cmp)) def mutating_cmp(x, y): L.append(3) del L[:] return (x > y) - (x < y) self.assertRaises(ValueError, L.sort, key=cmp_to_key(mutating_cmp)) memorywaster = [memorywaster] #============================================================================== class TestDecorateSortUndecorate(unittest.TestCase): def test_decorated(self): data = 'The quick Brown fox Jumped over The lazy Dog'.split() copy = data[:] random.shuffle(data) data.sort(key=str.lower) def my_cmp(x, y): xlower, ylower = x.lower(), y.lower() return (xlower > ylower) - (xlower < ylower) copy.sort(key=cmp_to_key(my_cmp)) def test_baddecorator(self): data = 'The quick Brown fox Jumped over The lazy Dog'.split() self.assertRaises(TypeError, data.sort, key=lambda x,y: 0) def test_stability(self): data = [(random.randrange(100), i) for i in range(200)] copy = data[:] data.sort(key=lambda t: t[0]) # sort on the random first field copy.sort() # sort using both fields self.assertEqual(data, copy) # should get the same result def test_key_with_exception(self): # Verify that the wrapper has been removed data = list(range(-2, 2)) dup = data[:] self.assertRaises(ZeroDivisionError, data.sort, key=lambda x: 1/x) self.assertEqual(data, dup) def test_key_with_mutation(self): data = list(range(10)) def k(x): del data[:] data[:] = range(20) return x self.assertRaises(ValueError, data.sort, key=k) def test_key_with_mutating_del(self): data = list(range(10)) class SortKiller(object): def __init__(self, x): pass def __del__(self): del data[:] data[:] = range(20) def __lt__(self, other): return id(self) < id(other) self.assertRaises(ValueError, data.sort, key=SortKiller) def test_key_with_mutating_del_and_exception(self): data = list(range(10)) ## dup = data[:] class SortKiller(object): def __init__(self, x): if x > 2: raise RuntimeError def __del__(self): del data[:] data[:] = list(range(20)) self.assertRaises(RuntimeError, data.sort, key=SortKiller) ## major honking subtlety: we *can't* do: ## ## self.assertEqual(data, dup) ## ## because there is a reference to a SortKiller in the ## traceback and by the time it dies we're outside the call to ## .sort() and so the list protection gimmicks are out of ## date (this cost some brain cells to figure out...). def test_reverse(self): data = list(range(100)) random.shuffle(data) data.sort(reverse=True) self.assertEqual(data, list(range(99,-1,-1))) def test_reverse_stability(self): data = [(random.randrange(100), i) for i in range(200)] copy1 = data[:] copy2 = data[:] def my_cmp(x, y): x0, y0 = x[0], y[0] return (x0 > y0) - (x0 < y0) def my_cmp_reversed(x, y): x0, y0 = x[0], y[0] return (y0 > x0) - (y0 < x0) data.sort(key=cmp_to_key(my_cmp), reverse=True) copy1.sort(key=cmp_to_key(my_cmp_reversed)) self.assertEqual(data, copy1) copy2.sort(key=lambda x: x[0], reverse=True) self.assertEqual(data, copy2) #============================================================================== def check_against_PyObject_RichCompareBool(self, L): ## The idea here is to exploit the fact that unsafe_tuple_compare uses ## PyObject_RichCompareBool for the second elements of tuples. So we have, ## for (most) L, sorted(L) == [y[1] for y in sorted([(0,x) for x in L])] ## This will work as long as __eq__ => not __lt__ for all the objects in L, ## which holds for all the types used below. ## ## Testing this way ensures that the optimized implementation remains consistent ## with the naive implementation, even if changes are made to any of the ## richcompares. ## ## This function tests sorting for three lists (it randomly shuffles each one): ## 1. L ## 2. [(x,) for x in L] ## 3. [((x,),) for x in L] random.seed(0) random.shuffle(L) L_1 = L[:] L_2 = [(x,) for x in L] L_3 = [((x,),) for x in L] for L in [L_1, L_2, L_3]: optimized = sorted(L) reference = [y[1] for y in sorted([(0,x) for x in L])] for (opt, ref) in zip(optimized, reference): self.assertIs(opt, ref) #note: not assertEqual! We want to ensure *identical* behavior. class TestOptimizedCompares(unittest.TestCase): def test_safe_object_compare(self): heterogeneous_lists = [[0, 'foo'], [0.0, 'foo'], [('foo',), 'foo']] for L in heterogeneous_lists: self.assertRaises(TypeError, L.sort) self.assertRaises(TypeError, [(x,) for x in L].sort) self.assertRaises(TypeError, [((x,),) for x in L].sort) float_int_lists = [[1,1.1], [1<<70,1.1], [1.1,1], [1.1,1<<70]] for L in float_int_lists: check_against_PyObject_RichCompareBool(self, L) def test_unsafe_object_compare(self): # This test is by ppperry. It ensures that unsafe_object_compare is # verifying ms->key_richcompare == tp->richcompare before comparing. class WackyComparator(int): def __lt__(self, other): elem.__class__ = WackyList2 return int.__lt__(self, other) class WackyList1(list): pass class WackyList2(list): def __lt__(self, other): raise ValueError L = [WackyList1([WackyComparator(i), i]) for i in range(10)] elem = L[-1] with self.assertRaises(ValueError): L.sort() L = [WackyList1([WackyComparator(i), i]) for i in range(10)] elem = L[-1] with self.assertRaises(ValueError): [(x,) for x in L].sort() # The following test is also by ppperry. It ensures that # unsafe_object_compare handles Py_NotImplemented appropriately. class PointlessComparator: def __lt__(self, other): return NotImplemented L = [PointlessComparator(), PointlessComparator()] self.assertRaises(TypeError, L.sort) self.assertRaises(TypeError, [(x,) for x in L].sort) # The following tests go through various types that would trigger # ms->key_compare = unsafe_object_compare lists = [list(range(100)) + [(1<<70)], [str(x) for x in range(100)] + ['\uffff'], [bytes(x) for x in range(100)], [cmp_to_key(lambda x,y: x<y)(x) for x in range(100)]] for L in lists: check_against_PyObject_RichCompareBool(self, L) def test_unsafe_latin_compare(self): check_against_PyObject_RichCompareBool(self, [str(x) for x in range(100)]) def test_unsafe_long_compare(self): check_against_PyObject_RichCompareBool(self, [x for x in range(100)]) def test_unsafe_float_compare(self): check_against_PyObject_RichCompareBool(self, [float(x) for x in range(100)]) def test_unsafe_tuple_compare(self): # This test was suggested by Tim Peters. It verifies that the tuple # comparison respects the current tuple compare semantics, which do not # guarantee that x < x <=> (x,) < (x,) # # Note that we don't have to put anything in tuples here, because # the check function does a tuple test automatically. check_against_PyObject_RichCompareBool(self, [float('nan')]*100) check_against_PyObject_RichCompareBool(self, [float('nan') for _ in range(100)]) def test_not_all_tuples(self): self.assertRaises(TypeError, [(1.0, 1.0), (False, "A"), 6].sort) self.assertRaises(TypeError, [('a', 1), (1, 'a')].sort) self.assertRaises(TypeError, [(1, 'a'), ('a', 1)].sort) def test_none_in_tuples(self): expected = [(None, 1), (None, 2)] actual = sorted([(None, 2), (None, 1)]) self.assertEqual(actual, expected) #============================================================================== if __name__ == "__main__": unittest.main()
bsd-3-clause
a996ef948de6bafb31dfa59c0bbc9bee
34.684615
88
0.507078
3.948085
false
true
false
false
brython-dev/brython
www/src/Lib/external_import.py
1
3004
import os import sys from browser import doc import urllib.request ## this module is able to download modules that are external to ## localhost/src ## so we could download from any URL class ModuleFinder: def __init__(self, path_entry): print("external_import here..") #print(path_entry) self._module=None if path_entry.startswith('http://'): self.path_entry=path_entry else: raise ImportError() def __str__(self): return '<%s for "%s">' % (self.__class__.__name__, self.path_entry) def find_module(self, fullname, path=None): path = path or self.path_entry #print('looking for "%s" in %s ...' % (fullname, path)) for _ext in ['js', 'pyj', 'py']: _fp,_url,_headers=urllib.request.urlopen(path + '/' + '%s.%s' % (fullname, _ext)) self._module=_fp.read() _fp.close() if self._module is not None: print("module found at %s:%s" % (path, fullname)) return ModuleLoader(path, fullname, self._module) print('module %s not found' % fullname) raise ImportError() return None class ModuleLoader: """Load source for modules""" def __init__(self, filepath, name, module_source): self._filepath=filepath self._name=name self._module_source=module_source def get_source(self): return self._module_source def is_package(self): return '.' in self._name def load_module(self): if self._name in sys.modules: #print('reusing existing module from previous import of "%s"' % fullname) mod = sys.modules[self._name] return mod _src = self.get_source() if self._filepath.endswith('.js'): mod = JSObject(import_js_module(_src, self._filepath, self._name)) elif self._filepath.endswith('.py'): mod = JSObject(import_py_module(_src, self._filepath, self._name)) elif self._filepath.endswith('.pyj'): mod = JSObject(import_pyj_module(_src, self._filepath, self._name)) else: raise ImportError('Invalid Module: %s' % self._filepath) # Set a few properties required by PEP 302 mod.__file__ = self._filepath mod.__name__ = self._name mod.__path__ = os.path.abspath(self._filepath) mod.__loader__ = self mod.__package__ = '.'.join(self._name.split('.')[:-1]) if self.is_package(): print('adding path for package') # Set __path__ for packages # so we can find the sub-modules. mod.__path__ = [ self._filepath ] else: print('imported as regular module') print('creating a new module object for "%s"' % self._name) sys.modules.setdefault(self._name, mod) JSObject(__BRYTHON__.imported)[self._name]=mod return mod
bsd-3-clause
8dc520df395d313b158e50e67345674b
33.528736
93
0.554594
4.092643
false
false
false
false
brython-dev/brython
www/src/Lib/encodings/mac_cyrillic.py
35
13761
""" Python Character Mapping Codec mac_cyrillic generated from 'MAPPINGS/VENDORS/APPLE/CYRILLIC.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='mac-cyrillic', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> CONTROL CHARACTER '\x01' # 0x01 -> CONTROL CHARACTER '\x02' # 0x02 -> CONTROL CHARACTER '\x03' # 0x03 -> CONTROL CHARACTER '\x04' # 0x04 -> CONTROL CHARACTER '\x05' # 0x05 -> CONTROL CHARACTER '\x06' # 0x06 -> CONTROL CHARACTER '\x07' # 0x07 -> CONTROL CHARACTER '\x08' # 0x08 -> CONTROL CHARACTER '\t' # 0x09 -> CONTROL CHARACTER '\n' # 0x0A -> CONTROL CHARACTER '\x0b' # 0x0B -> CONTROL CHARACTER '\x0c' # 0x0C -> CONTROL CHARACTER '\r' # 0x0D -> CONTROL CHARACTER '\x0e' # 0x0E -> CONTROL CHARACTER '\x0f' # 0x0F -> CONTROL CHARACTER '\x10' # 0x10 -> CONTROL CHARACTER '\x11' # 0x11 -> CONTROL CHARACTER '\x12' # 0x12 -> CONTROL CHARACTER '\x13' # 0x13 -> CONTROL CHARACTER '\x14' # 0x14 -> CONTROL CHARACTER '\x15' # 0x15 -> CONTROL CHARACTER '\x16' # 0x16 -> CONTROL CHARACTER '\x17' # 0x17 -> CONTROL CHARACTER '\x18' # 0x18 -> CONTROL CHARACTER '\x19' # 0x19 -> CONTROL CHARACTER '\x1a' # 0x1A -> CONTROL CHARACTER '\x1b' # 0x1B -> CONTROL CHARACTER '\x1c' # 0x1C -> CONTROL CHARACTER '\x1d' # 0x1D -> CONTROL CHARACTER '\x1e' # 0x1E -> CONTROL CHARACTER '\x1f' # 0x1F -> CONTROL CHARACTER ' ' # 0x20 -> SPACE '!' # 0x21 -> EXCLAMATION MARK '"' # 0x22 -> QUOTATION MARK '#' # 0x23 -> NUMBER SIGN '$' # 0x24 -> DOLLAR SIGN '%' # 0x25 -> PERCENT SIGN '&' # 0x26 -> AMPERSAND "'" # 0x27 -> APOSTROPHE '(' # 0x28 -> LEFT PARENTHESIS ')' # 0x29 -> RIGHT PARENTHESIS '*' # 0x2A -> ASTERISK '+' # 0x2B -> PLUS SIGN ',' # 0x2C -> COMMA '-' # 0x2D -> HYPHEN-MINUS '.' # 0x2E -> FULL STOP '/' # 0x2F -> SOLIDUS '0' # 0x30 -> DIGIT ZERO '1' # 0x31 -> DIGIT ONE '2' # 0x32 -> DIGIT TWO '3' # 0x33 -> DIGIT THREE '4' # 0x34 -> DIGIT FOUR '5' # 0x35 -> DIGIT FIVE '6' # 0x36 -> DIGIT SIX '7' # 0x37 -> DIGIT SEVEN '8' # 0x38 -> DIGIT EIGHT '9' # 0x39 -> DIGIT NINE ':' # 0x3A -> COLON ';' # 0x3B -> SEMICOLON '<' # 0x3C -> LESS-THAN SIGN '=' # 0x3D -> EQUALS SIGN '>' # 0x3E -> GREATER-THAN SIGN '?' # 0x3F -> QUESTION MARK '@' # 0x40 -> COMMERCIAL AT 'A' # 0x41 -> LATIN CAPITAL LETTER A 'B' # 0x42 -> LATIN CAPITAL LETTER B 'C' # 0x43 -> LATIN CAPITAL LETTER C 'D' # 0x44 -> LATIN CAPITAL LETTER D 'E' # 0x45 -> LATIN CAPITAL LETTER E 'F' # 0x46 -> LATIN CAPITAL LETTER F 'G' # 0x47 -> LATIN CAPITAL LETTER G 'H' # 0x48 -> LATIN CAPITAL LETTER H 'I' # 0x49 -> LATIN CAPITAL LETTER I 'J' # 0x4A -> LATIN CAPITAL LETTER J 'K' # 0x4B -> LATIN CAPITAL LETTER K 'L' # 0x4C -> LATIN CAPITAL LETTER L 'M' # 0x4D -> LATIN CAPITAL LETTER M 'N' # 0x4E -> LATIN CAPITAL LETTER N 'O' # 0x4F -> LATIN CAPITAL LETTER O 'P' # 0x50 -> LATIN CAPITAL LETTER P 'Q' # 0x51 -> LATIN CAPITAL LETTER Q 'R' # 0x52 -> LATIN CAPITAL LETTER R 'S' # 0x53 -> LATIN CAPITAL LETTER S 'T' # 0x54 -> LATIN CAPITAL LETTER T 'U' # 0x55 -> LATIN CAPITAL LETTER U 'V' # 0x56 -> LATIN CAPITAL LETTER V 'W' # 0x57 -> LATIN CAPITAL LETTER W 'X' # 0x58 -> LATIN CAPITAL LETTER X 'Y' # 0x59 -> LATIN CAPITAL LETTER Y 'Z' # 0x5A -> LATIN CAPITAL LETTER Z '[' # 0x5B -> LEFT SQUARE BRACKET '\\' # 0x5C -> REVERSE SOLIDUS ']' # 0x5D -> RIGHT SQUARE BRACKET '^' # 0x5E -> CIRCUMFLEX ACCENT '_' # 0x5F -> LOW LINE '`' # 0x60 -> GRAVE ACCENT 'a' # 0x61 -> LATIN SMALL LETTER A 'b' # 0x62 -> LATIN SMALL LETTER B 'c' # 0x63 -> LATIN SMALL LETTER C 'd' # 0x64 -> LATIN SMALL LETTER D 'e' # 0x65 -> LATIN SMALL LETTER E 'f' # 0x66 -> LATIN SMALL LETTER F 'g' # 0x67 -> LATIN SMALL LETTER G 'h' # 0x68 -> LATIN SMALL LETTER H 'i' # 0x69 -> LATIN SMALL LETTER I 'j' # 0x6A -> LATIN SMALL LETTER J 'k' # 0x6B -> LATIN SMALL LETTER K 'l' # 0x6C -> LATIN SMALL LETTER L 'm' # 0x6D -> LATIN SMALL LETTER M 'n' # 0x6E -> LATIN SMALL LETTER N 'o' # 0x6F -> LATIN SMALL LETTER O 'p' # 0x70 -> LATIN SMALL LETTER P 'q' # 0x71 -> LATIN SMALL LETTER Q 'r' # 0x72 -> LATIN SMALL LETTER R 's' # 0x73 -> LATIN SMALL LETTER S 't' # 0x74 -> LATIN SMALL LETTER T 'u' # 0x75 -> LATIN SMALL LETTER U 'v' # 0x76 -> LATIN SMALL LETTER V 'w' # 0x77 -> LATIN SMALL LETTER W 'x' # 0x78 -> LATIN SMALL LETTER X 'y' # 0x79 -> LATIN SMALL LETTER Y 'z' # 0x7A -> LATIN SMALL LETTER Z '{' # 0x7B -> LEFT CURLY BRACKET '|' # 0x7C -> VERTICAL LINE '}' # 0x7D -> RIGHT CURLY BRACKET '~' # 0x7E -> TILDE '\x7f' # 0x7F -> CONTROL CHARACTER '\u0410' # 0x80 -> CYRILLIC CAPITAL LETTER A '\u0411' # 0x81 -> CYRILLIC CAPITAL LETTER BE '\u0412' # 0x82 -> CYRILLIC CAPITAL LETTER VE '\u0413' # 0x83 -> CYRILLIC CAPITAL LETTER GHE '\u0414' # 0x84 -> CYRILLIC CAPITAL LETTER DE '\u0415' # 0x85 -> CYRILLIC CAPITAL LETTER IE '\u0416' # 0x86 -> CYRILLIC CAPITAL LETTER ZHE '\u0417' # 0x87 -> CYRILLIC CAPITAL LETTER ZE '\u0418' # 0x88 -> CYRILLIC CAPITAL LETTER I '\u0419' # 0x89 -> CYRILLIC CAPITAL LETTER SHORT I '\u041a' # 0x8A -> CYRILLIC CAPITAL LETTER KA '\u041b' # 0x8B -> CYRILLIC CAPITAL LETTER EL '\u041c' # 0x8C -> CYRILLIC CAPITAL LETTER EM '\u041d' # 0x8D -> CYRILLIC CAPITAL LETTER EN '\u041e' # 0x8E -> CYRILLIC CAPITAL LETTER O '\u041f' # 0x8F -> CYRILLIC CAPITAL LETTER PE '\u0420' # 0x90 -> CYRILLIC CAPITAL LETTER ER '\u0421' # 0x91 -> CYRILLIC CAPITAL LETTER ES '\u0422' # 0x92 -> CYRILLIC CAPITAL LETTER TE '\u0423' # 0x93 -> CYRILLIC CAPITAL LETTER U '\u0424' # 0x94 -> CYRILLIC CAPITAL LETTER EF '\u0425' # 0x95 -> CYRILLIC CAPITAL LETTER HA '\u0426' # 0x96 -> CYRILLIC CAPITAL LETTER TSE '\u0427' # 0x97 -> CYRILLIC CAPITAL LETTER CHE '\u0428' # 0x98 -> CYRILLIC CAPITAL LETTER SHA '\u0429' # 0x99 -> CYRILLIC CAPITAL LETTER SHCHA '\u042a' # 0x9A -> CYRILLIC CAPITAL LETTER HARD SIGN '\u042b' # 0x9B -> CYRILLIC CAPITAL LETTER YERU '\u042c' # 0x9C -> CYRILLIC CAPITAL LETTER SOFT SIGN '\u042d' # 0x9D -> CYRILLIC CAPITAL LETTER E '\u042e' # 0x9E -> CYRILLIC CAPITAL LETTER YU '\u042f' # 0x9F -> CYRILLIC CAPITAL LETTER YA '\u2020' # 0xA0 -> DAGGER '\xb0' # 0xA1 -> DEGREE SIGN '\u0490' # 0xA2 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN '\xa3' # 0xA3 -> POUND SIGN '\xa7' # 0xA4 -> SECTION SIGN '\u2022' # 0xA5 -> BULLET '\xb6' # 0xA6 -> PILCROW SIGN '\u0406' # 0xA7 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I '\xae' # 0xA8 -> REGISTERED SIGN '\xa9' # 0xA9 -> COPYRIGHT SIGN '\u2122' # 0xAA -> TRADE MARK SIGN '\u0402' # 0xAB -> CYRILLIC CAPITAL LETTER DJE '\u0452' # 0xAC -> CYRILLIC SMALL LETTER DJE '\u2260' # 0xAD -> NOT EQUAL TO '\u0403' # 0xAE -> CYRILLIC CAPITAL LETTER GJE '\u0453' # 0xAF -> CYRILLIC SMALL LETTER GJE '\u221e' # 0xB0 -> INFINITY '\xb1' # 0xB1 -> PLUS-MINUS SIGN '\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO '\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO '\u0456' # 0xB4 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I '\xb5' # 0xB5 -> MICRO SIGN '\u0491' # 0xB6 -> CYRILLIC SMALL LETTER GHE WITH UPTURN '\u0408' # 0xB7 -> CYRILLIC CAPITAL LETTER JE '\u0404' # 0xB8 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE '\u0454' # 0xB9 -> CYRILLIC SMALL LETTER UKRAINIAN IE '\u0407' # 0xBA -> CYRILLIC CAPITAL LETTER YI '\u0457' # 0xBB -> CYRILLIC SMALL LETTER YI '\u0409' # 0xBC -> CYRILLIC CAPITAL LETTER LJE '\u0459' # 0xBD -> CYRILLIC SMALL LETTER LJE '\u040a' # 0xBE -> CYRILLIC CAPITAL LETTER NJE '\u045a' # 0xBF -> CYRILLIC SMALL LETTER NJE '\u0458' # 0xC0 -> CYRILLIC SMALL LETTER JE '\u0405' # 0xC1 -> CYRILLIC CAPITAL LETTER DZE '\xac' # 0xC2 -> NOT SIGN '\u221a' # 0xC3 -> SQUARE ROOT '\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK '\u2248' # 0xC5 -> ALMOST EQUAL TO '\u2206' # 0xC6 -> INCREMENT '\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK '\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK '\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS '\xa0' # 0xCA -> NO-BREAK SPACE '\u040b' # 0xCB -> CYRILLIC CAPITAL LETTER TSHE '\u045b' # 0xCC -> CYRILLIC SMALL LETTER TSHE '\u040c' # 0xCD -> CYRILLIC CAPITAL LETTER KJE '\u045c' # 0xCE -> CYRILLIC SMALL LETTER KJE '\u0455' # 0xCF -> CYRILLIC SMALL LETTER DZE '\u2013' # 0xD0 -> EN DASH '\u2014' # 0xD1 -> EM DASH '\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK '\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK '\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK '\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK '\xf7' # 0xD6 -> DIVISION SIGN '\u201e' # 0xD7 -> DOUBLE LOW-9 QUOTATION MARK '\u040e' # 0xD8 -> CYRILLIC CAPITAL LETTER SHORT U '\u045e' # 0xD9 -> CYRILLIC SMALL LETTER SHORT U '\u040f' # 0xDA -> CYRILLIC CAPITAL LETTER DZHE '\u045f' # 0xDB -> CYRILLIC SMALL LETTER DZHE '\u2116' # 0xDC -> NUMERO SIGN '\u0401' # 0xDD -> CYRILLIC CAPITAL LETTER IO '\u0451' # 0xDE -> CYRILLIC SMALL LETTER IO '\u044f' # 0xDF -> CYRILLIC SMALL LETTER YA '\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A '\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE '\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE '\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE '\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE '\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE '\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE '\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE '\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I '\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I '\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA '\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL '\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM '\u043d' # 0xED -> CYRILLIC SMALL LETTER EN '\u043e' # 0xEE -> CYRILLIC SMALL LETTER O '\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE '\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER '\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES '\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE '\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U '\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF '\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA '\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE '\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE '\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA '\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA '\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN '\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU '\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN '\u044d' # 0xFD -> CYRILLIC SMALL LETTER E '\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU '\u20ac' # 0xFF -> EURO SIGN ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
bsd-3-clause
24dd4e47234006975dc25fce6c484d54
42.824104
118
0.53608
3.17806
false
false
false
false
brython-dev/brython
www/src/Lib/symtable.py
3
10368
"""Interface to the compiler's internal symbol tables""" import _symtable from _symtable import (USE, DEF_GLOBAL, DEF_NONLOCAL, DEF_LOCAL, DEF_PARAM, DEF_IMPORT, DEF_BOUND, DEF_ANNOT, SCOPE_OFF, SCOPE_MASK, FREE, LOCAL, GLOBAL_IMPLICIT, GLOBAL_EXPLICIT, CELL) import weakref __all__ = ["symtable", "SymbolTable", "Class", "Function", "Symbol"] def symtable(code, filename, compile_type): """ Return the toplevel *SymbolTable* for the source code. *filename* is the name of the file with the code and *compile_type* is the *compile()* mode argument. """ top = _symtable.symtable(code, filename, compile_type) return _newSymbolTable(top, filename) class SymbolTableFactory: def __init__(self): self.__memo = weakref.WeakValueDictionary() def new(self, table, filename): if table.type == _symtable.TYPE_FUNCTION: return Function(table, filename) if table.type == _symtable.TYPE_CLASS: return Class(table, filename) return SymbolTable(table, filename) def __call__(self, table, filename): key = table, filename obj = self.__memo.get(key, None) if obj is None: obj = self.__memo[key] = self.new(table, filename) return obj _newSymbolTable = SymbolTableFactory() class SymbolTable: def __init__(self, raw_table, filename): self._table = raw_table self._filename = filename self._symbols = {} def __repr__(self): if self.__class__ == SymbolTable: kind = "" else: kind = "%s " % self.__class__.__name__ if self._table.name == "top": return "<{0}SymbolTable for module {1}>".format(kind, self._filename) else: return "<{0}SymbolTable for {1} in {2}>".format(kind, self._table.name, self._filename) def get_type(self): """Return the type of the symbol table. The values returned are 'class', 'module' and 'function'. """ if self._table.type == _symtable.TYPE_MODULE: return "module" if self._table.type == _symtable.TYPE_FUNCTION: return "function" if self._table.type == _symtable.TYPE_CLASS: return "class" assert self._table.type in (1, 2, 3), \ "unexpected type: {0}".format(self._table.type) def get_id(self): """Return an identifier for the table. """ return self._table.id def get_name(self): """Return the table's name. This corresponds to the name of the class, function or 'top' if the table is for a class, function or global respectively. """ return self._table.name def get_lineno(self): """Return the number of the first line in the block for the table. """ return self._table.lineno def is_optimized(self): """Return *True* if the locals in the table are optimizable. """ return bool(self._table.type == _symtable.TYPE_FUNCTION) def is_nested(self): """Return *True* if the block is a nested class or function.""" return bool(self._table.nested) def has_children(self): """Return *True* if the block has nested namespaces. """ return bool(self._table.children) def get_identifiers(self): """Return a view object containing the names of symbols in the table. """ return self._table.symbols.keys() def lookup(self, name): """Lookup a *name* in the table. Returns a *Symbol* instance. """ sym = self._symbols.get(name) if sym is None: flags = self._table.symbols[name] namespaces = self.__check_children(name) module_scope = (self._table.name == "top") sym = self._symbols[name] = Symbol(name, flags, namespaces, module_scope=module_scope) return sym def get_symbols(self): """Return a list of *Symbol* instances for names in the table. """ return [self.lookup(ident) for ident in self.get_identifiers()] def __check_children(self, name): return [_newSymbolTable(st, self._filename) for st in self._table.children if st.name == name] def get_children(self): """Return a list of the nested symbol tables. """ return [_newSymbolTable(st, self._filename) for st in self._table.children] class Function(SymbolTable): # Default values for instance variables __params = None __locals = None __frees = None __globals = None __nonlocals = None def __idents_matching(self, test_func): return tuple(ident for ident in self.get_identifiers() if test_func(self._table.symbols[ident])) def get_parameters(self): """Return a tuple of parameters to the function. """ if self.__params is None: self.__params = self.__idents_matching(lambda x:x & DEF_PARAM) return self.__params def get_locals(self): """Return a tuple of locals in the function. """ if self.__locals is None: locs = (LOCAL, CELL) test = lambda x: ((x >> SCOPE_OFF) & SCOPE_MASK) in locs self.__locals = self.__idents_matching(test) return self.__locals def get_globals(self): """Return a tuple of globals in the function. """ if self.__globals is None: glob = (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT) test = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) in glob self.__globals = self.__idents_matching(test) return self.__globals def get_nonlocals(self): """Return a tuple of nonlocals in the function. """ if self.__nonlocals is None: self.__nonlocals = self.__idents_matching(lambda x:x & DEF_NONLOCAL) return self.__nonlocals def get_frees(self): """Return a tuple of free variables in the function. """ if self.__frees is None: is_free = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) == FREE self.__frees = self.__idents_matching(is_free) return self.__frees class Class(SymbolTable): __methods = None def get_methods(self): """Return a tuple of methods declared in the class. """ if self.__methods is None: d = {} for st in self._table.children: d[st.name] = 1 self.__methods = tuple(d) return self.__methods class Symbol: def __init__(self, name, flags, namespaces=None, *, module_scope=False): self.__name = name self.__flags = flags self.__scope = (flags >> SCOPE_OFF) & SCOPE_MASK # like PyST_GetScope() self.__namespaces = namespaces or () self.__module_scope = module_scope def __repr__(self): return "<symbol {0!r}>".format(self.__name) def get_name(self): """Return a name of a symbol. """ return self.__name def is_referenced(self): """Return *True* if the symbol is used in its block. """ return bool(self.__flags & _symtable.USE) def is_parameter(self): """Return *True* if the symbol is a parameter. """ return bool(self.__flags & DEF_PARAM) def is_global(self): """Return *True* if the symbol is global. """ return bool(self.__scope in (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT) or (self.__module_scope and self.__flags & DEF_BOUND)) def is_nonlocal(self): """Return *True* if the symbol is nonlocal.""" return bool(self.__flags & DEF_NONLOCAL) def is_declared_global(self): """Return *True* if the symbol is declared global with a global statement.""" return bool(self.__scope == GLOBAL_EXPLICIT) def is_local(self): """Return *True* if the symbol is local. """ return bool(self.__scope in (LOCAL, CELL) or (self.__module_scope and self.__flags & DEF_BOUND)) def is_annotated(self): """Return *True* if the symbol is annotated. """ return bool(self.__flags & DEF_ANNOT) def is_free(self): """Return *True* if a referenced symbol is not assigned to. """ return bool(self.__scope == FREE) def is_imported(self): """Return *True* if the symbol is created from an import statement. """ return bool(self.__flags & DEF_IMPORT) def is_assigned(self): """Return *True* if a symbol is assigned to.""" return bool(self.__flags & DEF_LOCAL) def is_namespace(self): """Returns *True* if name binding introduces new namespace. If the name is used as the target of a function or class statement, this will be true. Note that a single name can be bound to multiple objects. If is_namespace() is true, the name may also be bound to other objects, like an int or list, that does not introduce a new namespace. """ return bool(self.__namespaces) def get_namespaces(self): """Return a list of namespaces bound to this name""" return self.__namespaces def get_namespace(self): """Return the single namespace bound to this name. Raises ValueError if the name is bound to multiple namespaces or no namespace. """ if len(self.__namespaces) == 0: raise ValueError("name is not bound to any namespaces") elif len(self.__namespaces) > 1: raise ValueError("name is bound to multiple namespaces") else: return self.__namespaces[0] if __name__ == "__main__": import os, sys with open(sys.argv[0]) as f: src = f.read() mod = symtable(src, os.path.split(sys.argv[0])[1], "exec") for ident in mod.get_identifiers(): info = mod.lookup(ident) print(info, info.is_local(), info.is_namespace())
bsd-3-clause
fd0f139d606a8a94d7817a78b6d5c18f
30.803681
81
0.563754
4.101266
false
false
false
false
brython-dev/brython
www/src/Lib/encodings/cp1140.py
35
13412
""" Python Character Mapping Codec cp1140 generated from 'python-mappings/CP1140.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp1140', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> NULL '\x01' # 0x01 -> START OF HEADING '\x02' # 0x02 -> START OF TEXT '\x03' # 0x03 -> END OF TEXT '\x9c' # 0x04 -> CONTROL '\t' # 0x05 -> HORIZONTAL TABULATION '\x86' # 0x06 -> CONTROL '\x7f' # 0x07 -> DELETE '\x97' # 0x08 -> CONTROL '\x8d' # 0x09 -> CONTROL '\x8e' # 0x0A -> CONTROL '\x0b' # 0x0B -> VERTICAL TABULATION '\x0c' # 0x0C -> FORM FEED '\r' # 0x0D -> CARRIAGE RETURN '\x0e' # 0x0E -> SHIFT OUT '\x0f' # 0x0F -> SHIFT IN '\x10' # 0x10 -> DATA LINK ESCAPE '\x11' # 0x11 -> DEVICE CONTROL ONE '\x12' # 0x12 -> DEVICE CONTROL TWO '\x13' # 0x13 -> DEVICE CONTROL THREE '\x9d' # 0x14 -> CONTROL '\x85' # 0x15 -> CONTROL '\x08' # 0x16 -> BACKSPACE '\x87' # 0x17 -> CONTROL '\x18' # 0x18 -> CANCEL '\x19' # 0x19 -> END OF MEDIUM '\x92' # 0x1A -> CONTROL '\x8f' # 0x1B -> CONTROL '\x1c' # 0x1C -> FILE SEPARATOR '\x1d' # 0x1D -> GROUP SEPARATOR '\x1e' # 0x1E -> RECORD SEPARATOR '\x1f' # 0x1F -> UNIT SEPARATOR '\x80' # 0x20 -> CONTROL '\x81' # 0x21 -> CONTROL '\x82' # 0x22 -> CONTROL '\x83' # 0x23 -> CONTROL '\x84' # 0x24 -> CONTROL '\n' # 0x25 -> LINE FEED '\x17' # 0x26 -> END OF TRANSMISSION BLOCK '\x1b' # 0x27 -> ESCAPE '\x88' # 0x28 -> CONTROL '\x89' # 0x29 -> CONTROL '\x8a' # 0x2A -> CONTROL '\x8b' # 0x2B -> CONTROL '\x8c' # 0x2C -> CONTROL '\x05' # 0x2D -> ENQUIRY '\x06' # 0x2E -> ACKNOWLEDGE '\x07' # 0x2F -> BELL '\x90' # 0x30 -> CONTROL '\x91' # 0x31 -> CONTROL '\x16' # 0x32 -> SYNCHRONOUS IDLE '\x93' # 0x33 -> CONTROL '\x94' # 0x34 -> CONTROL '\x95' # 0x35 -> CONTROL '\x96' # 0x36 -> CONTROL '\x04' # 0x37 -> END OF TRANSMISSION '\x98' # 0x38 -> CONTROL '\x99' # 0x39 -> CONTROL '\x9a' # 0x3A -> CONTROL '\x9b' # 0x3B -> CONTROL '\x14' # 0x3C -> DEVICE CONTROL FOUR '\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE '\x9e' # 0x3E -> CONTROL '\x1a' # 0x3F -> SUBSTITUTE ' ' # 0x40 -> SPACE '\xa0' # 0x41 -> NO-BREAK SPACE '\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX '\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS '\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE '\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE '\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE '\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE '\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA '\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE '\xa2' # 0x4A -> CENT SIGN '.' # 0x4B -> FULL STOP '<' # 0x4C -> LESS-THAN SIGN '(' # 0x4D -> LEFT PARENTHESIS '+' # 0x4E -> PLUS SIGN '|' # 0x4F -> VERTICAL LINE '&' # 0x50 -> AMPERSAND '\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE '\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX '\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS '\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE '\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE '\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX '\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS '\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE '\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN) '!' # 0x5A -> EXCLAMATION MARK '$' # 0x5B -> DOLLAR SIGN '*' # 0x5C -> ASTERISK ')' # 0x5D -> RIGHT PARENTHESIS ';' # 0x5E -> SEMICOLON '\xac' # 0x5F -> NOT SIGN '-' # 0x60 -> HYPHEN-MINUS '/' # 0x61 -> SOLIDUS '\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX '\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS '\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE '\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE '\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE '\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE '\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA '\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE '\xa6' # 0x6A -> BROKEN BAR ',' # 0x6B -> COMMA '%' # 0x6C -> PERCENT SIGN '_' # 0x6D -> LOW LINE '>' # 0x6E -> GREATER-THAN SIGN '?' # 0x6F -> QUESTION MARK '\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE '\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE '\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX '\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS '\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE '\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE '\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX '\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS '\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE '`' # 0x79 -> GRAVE ACCENT ':' # 0x7A -> COLON '#' # 0x7B -> NUMBER SIGN '@' # 0x7C -> COMMERCIAL AT "'" # 0x7D -> APOSTROPHE '=' # 0x7E -> EQUALS SIGN '"' # 0x7F -> QUOTATION MARK '\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE 'a' # 0x81 -> LATIN SMALL LETTER A 'b' # 0x82 -> LATIN SMALL LETTER B 'c' # 0x83 -> LATIN SMALL LETTER C 'd' # 0x84 -> LATIN SMALL LETTER D 'e' # 0x85 -> LATIN SMALL LETTER E 'f' # 0x86 -> LATIN SMALL LETTER F 'g' # 0x87 -> LATIN SMALL LETTER G 'h' # 0x88 -> LATIN SMALL LETTER H 'i' # 0x89 -> LATIN SMALL LETTER I '\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK '\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK '\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC) '\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE '\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC) '\xb1' # 0x8F -> PLUS-MINUS SIGN '\xb0' # 0x90 -> DEGREE SIGN 'j' # 0x91 -> LATIN SMALL LETTER J 'k' # 0x92 -> LATIN SMALL LETTER K 'l' # 0x93 -> LATIN SMALL LETTER L 'm' # 0x94 -> LATIN SMALL LETTER M 'n' # 0x95 -> LATIN SMALL LETTER N 'o' # 0x96 -> LATIN SMALL LETTER O 'p' # 0x97 -> LATIN SMALL LETTER P 'q' # 0x98 -> LATIN SMALL LETTER Q 'r' # 0x99 -> LATIN SMALL LETTER R '\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR '\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR '\xe6' # 0x9C -> LATIN SMALL LIGATURE AE '\xb8' # 0x9D -> CEDILLA '\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE '\u20ac' # 0x9F -> EURO SIGN '\xb5' # 0xA0 -> MICRO SIGN '~' # 0xA1 -> TILDE 's' # 0xA2 -> LATIN SMALL LETTER S 't' # 0xA3 -> LATIN SMALL LETTER T 'u' # 0xA4 -> LATIN SMALL LETTER U 'v' # 0xA5 -> LATIN SMALL LETTER V 'w' # 0xA6 -> LATIN SMALL LETTER W 'x' # 0xA7 -> LATIN SMALL LETTER X 'y' # 0xA8 -> LATIN SMALL LETTER Y 'z' # 0xA9 -> LATIN SMALL LETTER Z '\xa1' # 0xAA -> INVERTED EXCLAMATION MARK '\xbf' # 0xAB -> INVERTED QUESTION MARK '\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC) '\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE '\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC) '\xae' # 0xAF -> REGISTERED SIGN '^' # 0xB0 -> CIRCUMFLEX ACCENT '\xa3' # 0xB1 -> POUND SIGN '\xa5' # 0xB2 -> YEN SIGN '\xb7' # 0xB3 -> MIDDLE DOT '\xa9' # 0xB4 -> COPYRIGHT SIGN '\xa7' # 0xB5 -> SECTION SIGN '\xb6' # 0xB6 -> PILCROW SIGN '\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER '\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF '\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS '[' # 0xBA -> LEFT SQUARE BRACKET ']' # 0xBB -> RIGHT SQUARE BRACKET '\xaf' # 0xBC -> MACRON '\xa8' # 0xBD -> DIAERESIS '\xb4' # 0xBE -> ACUTE ACCENT '\xd7' # 0xBF -> MULTIPLICATION SIGN '{' # 0xC0 -> LEFT CURLY BRACKET 'A' # 0xC1 -> LATIN CAPITAL LETTER A 'B' # 0xC2 -> LATIN CAPITAL LETTER B 'C' # 0xC3 -> LATIN CAPITAL LETTER C 'D' # 0xC4 -> LATIN CAPITAL LETTER D 'E' # 0xC5 -> LATIN CAPITAL LETTER E 'F' # 0xC6 -> LATIN CAPITAL LETTER F 'G' # 0xC7 -> LATIN CAPITAL LETTER G 'H' # 0xC8 -> LATIN CAPITAL LETTER H 'I' # 0xC9 -> LATIN CAPITAL LETTER I '\xad' # 0xCA -> SOFT HYPHEN '\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX '\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS '\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE '\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE '\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE '}' # 0xD0 -> RIGHT CURLY BRACKET 'J' # 0xD1 -> LATIN CAPITAL LETTER J 'K' # 0xD2 -> LATIN CAPITAL LETTER K 'L' # 0xD3 -> LATIN CAPITAL LETTER L 'M' # 0xD4 -> LATIN CAPITAL LETTER M 'N' # 0xD5 -> LATIN CAPITAL LETTER N 'O' # 0xD6 -> LATIN CAPITAL LETTER O 'P' # 0xD7 -> LATIN CAPITAL LETTER P 'Q' # 0xD8 -> LATIN CAPITAL LETTER Q 'R' # 0xD9 -> LATIN CAPITAL LETTER R '\xb9' # 0xDA -> SUPERSCRIPT ONE '\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX '\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS '\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE '\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE '\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS '\\' # 0xE0 -> REVERSE SOLIDUS '\xf7' # 0xE1 -> DIVISION SIGN 'S' # 0xE2 -> LATIN CAPITAL LETTER S 'T' # 0xE3 -> LATIN CAPITAL LETTER T 'U' # 0xE4 -> LATIN CAPITAL LETTER U 'V' # 0xE5 -> LATIN CAPITAL LETTER V 'W' # 0xE6 -> LATIN CAPITAL LETTER W 'X' # 0xE7 -> LATIN CAPITAL LETTER X 'Y' # 0xE8 -> LATIN CAPITAL LETTER Y 'Z' # 0xE9 -> LATIN CAPITAL LETTER Z '\xb2' # 0xEA -> SUPERSCRIPT TWO '\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX '\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS '\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE '\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE '\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE '0' # 0xF0 -> DIGIT ZERO '1' # 0xF1 -> DIGIT ONE '2' # 0xF2 -> DIGIT TWO '3' # 0xF3 -> DIGIT THREE '4' # 0xF4 -> DIGIT FOUR '5' # 0xF5 -> DIGIT FIVE '6' # 0xF6 -> DIGIT SIX '7' # 0xF7 -> DIGIT SEVEN '8' # 0xF8 -> DIGIT EIGHT '9' # 0xF9 -> DIGIT NINE '\xb3' # 0xFA -> SUPERSCRIPT THREE '\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX '\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS '\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE '\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE '\x9f' # 0xFF -> CONTROL ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
bsd-3-clause
2bac69bb324acaf92e7a5b0d3e868993
41.687296
103
0.506934
3.219395
false
false
false
false
brython-dev/brython
www/src/Lib/encodings/iso8859_1.py
35
13483
""" Python Character Mapping Codec iso8859_1 generated from 'MAPPINGS/ISO8859/8859-1.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='iso8859-1', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> NULL '\x01' # 0x01 -> START OF HEADING '\x02' # 0x02 -> START OF TEXT '\x03' # 0x03 -> END OF TEXT '\x04' # 0x04 -> END OF TRANSMISSION '\x05' # 0x05 -> ENQUIRY '\x06' # 0x06 -> ACKNOWLEDGE '\x07' # 0x07 -> BELL '\x08' # 0x08 -> BACKSPACE '\t' # 0x09 -> HORIZONTAL TABULATION '\n' # 0x0A -> LINE FEED '\x0b' # 0x0B -> VERTICAL TABULATION '\x0c' # 0x0C -> FORM FEED '\r' # 0x0D -> CARRIAGE RETURN '\x0e' # 0x0E -> SHIFT OUT '\x0f' # 0x0F -> SHIFT IN '\x10' # 0x10 -> DATA LINK ESCAPE '\x11' # 0x11 -> DEVICE CONTROL ONE '\x12' # 0x12 -> DEVICE CONTROL TWO '\x13' # 0x13 -> DEVICE CONTROL THREE '\x14' # 0x14 -> DEVICE CONTROL FOUR '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE '\x16' # 0x16 -> SYNCHRONOUS IDLE '\x17' # 0x17 -> END OF TRANSMISSION BLOCK '\x18' # 0x18 -> CANCEL '\x19' # 0x19 -> END OF MEDIUM '\x1a' # 0x1A -> SUBSTITUTE '\x1b' # 0x1B -> ESCAPE '\x1c' # 0x1C -> FILE SEPARATOR '\x1d' # 0x1D -> GROUP SEPARATOR '\x1e' # 0x1E -> RECORD SEPARATOR '\x1f' # 0x1F -> UNIT SEPARATOR ' ' # 0x20 -> SPACE '!' # 0x21 -> EXCLAMATION MARK '"' # 0x22 -> QUOTATION MARK '#' # 0x23 -> NUMBER SIGN '$' # 0x24 -> DOLLAR SIGN '%' # 0x25 -> PERCENT SIGN '&' # 0x26 -> AMPERSAND "'" # 0x27 -> APOSTROPHE '(' # 0x28 -> LEFT PARENTHESIS ')' # 0x29 -> RIGHT PARENTHESIS '*' # 0x2A -> ASTERISK '+' # 0x2B -> PLUS SIGN ',' # 0x2C -> COMMA '-' # 0x2D -> HYPHEN-MINUS '.' # 0x2E -> FULL STOP '/' # 0x2F -> SOLIDUS '0' # 0x30 -> DIGIT ZERO '1' # 0x31 -> DIGIT ONE '2' # 0x32 -> DIGIT TWO '3' # 0x33 -> DIGIT THREE '4' # 0x34 -> DIGIT FOUR '5' # 0x35 -> DIGIT FIVE '6' # 0x36 -> DIGIT SIX '7' # 0x37 -> DIGIT SEVEN '8' # 0x38 -> DIGIT EIGHT '9' # 0x39 -> DIGIT NINE ':' # 0x3A -> COLON ';' # 0x3B -> SEMICOLON '<' # 0x3C -> LESS-THAN SIGN '=' # 0x3D -> EQUALS SIGN '>' # 0x3E -> GREATER-THAN SIGN '?' # 0x3F -> QUESTION MARK '@' # 0x40 -> COMMERCIAL AT 'A' # 0x41 -> LATIN CAPITAL LETTER A 'B' # 0x42 -> LATIN CAPITAL LETTER B 'C' # 0x43 -> LATIN CAPITAL LETTER C 'D' # 0x44 -> LATIN CAPITAL LETTER D 'E' # 0x45 -> LATIN CAPITAL LETTER E 'F' # 0x46 -> LATIN CAPITAL LETTER F 'G' # 0x47 -> LATIN CAPITAL LETTER G 'H' # 0x48 -> LATIN CAPITAL LETTER H 'I' # 0x49 -> LATIN CAPITAL LETTER I 'J' # 0x4A -> LATIN CAPITAL LETTER J 'K' # 0x4B -> LATIN CAPITAL LETTER K 'L' # 0x4C -> LATIN CAPITAL LETTER L 'M' # 0x4D -> LATIN CAPITAL LETTER M 'N' # 0x4E -> LATIN CAPITAL LETTER N 'O' # 0x4F -> LATIN CAPITAL LETTER O 'P' # 0x50 -> LATIN CAPITAL LETTER P 'Q' # 0x51 -> LATIN CAPITAL LETTER Q 'R' # 0x52 -> LATIN CAPITAL LETTER R 'S' # 0x53 -> LATIN CAPITAL LETTER S 'T' # 0x54 -> LATIN CAPITAL LETTER T 'U' # 0x55 -> LATIN CAPITAL LETTER U 'V' # 0x56 -> LATIN CAPITAL LETTER V 'W' # 0x57 -> LATIN CAPITAL LETTER W 'X' # 0x58 -> LATIN CAPITAL LETTER X 'Y' # 0x59 -> LATIN CAPITAL LETTER Y 'Z' # 0x5A -> LATIN CAPITAL LETTER Z '[' # 0x5B -> LEFT SQUARE BRACKET '\\' # 0x5C -> REVERSE SOLIDUS ']' # 0x5D -> RIGHT SQUARE BRACKET '^' # 0x5E -> CIRCUMFLEX ACCENT '_' # 0x5F -> LOW LINE '`' # 0x60 -> GRAVE ACCENT 'a' # 0x61 -> LATIN SMALL LETTER A 'b' # 0x62 -> LATIN SMALL LETTER B 'c' # 0x63 -> LATIN SMALL LETTER C 'd' # 0x64 -> LATIN SMALL LETTER D 'e' # 0x65 -> LATIN SMALL LETTER E 'f' # 0x66 -> LATIN SMALL LETTER F 'g' # 0x67 -> LATIN SMALL LETTER G 'h' # 0x68 -> LATIN SMALL LETTER H 'i' # 0x69 -> LATIN SMALL LETTER I 'j' # 0x6A -> LATIN SMALL LETTER J 'k' # 0x6B -> LATIN SMALL LETTER K 'l' # 0x6C -> LATIN SMALL LETTER L 'm' # 0x6D -> LATIN SMALL LETTER M 'n' # 0x6E -> LATIN SMALL LETTER N 'o' # 0x6F -> LATIN SMALL LETTER O 'p' # 0x70 -> LATIN SMALL LETTER P 'q' # 0x71 -> LATIN SMALL LETTER Q 'r' # 0x72 -> LATIN SMALL LETTER R 's' # 0x73 -> LATIN SMALL LETTER S 't' # 0x74 -> LATIN SMALL LETTER T 'u' # 0x75 -> LATIN SMALL LETTER U 'v' # 0x76 -> LATIN SMALL LETTER V 'w' # 0x77 -> LATIN SMALL LETTER W 'x' # 0x78 -> LATIN SMALL LETTER X 'y' # 0x79 -> LATIN SMALL LETTER Y 'z' # 0x7A -> LATIN SMALL LETTER Z '{' # 0x7B -> LEFT CURLY BRACKET '|' # 0x7C -> VERTICAL LINE '}' # 0x7D -> RIGHT CURLY BRACKET '~' # 0x7E -> TILDE '\x7f' # 0x7F -> DELETE '\x80' # 0x80 -> <control> '\x81' # 0x81 -> <control> '\x82' # 0x82 -> <control> '\x83' # 0x83 -> <control> '\x84' # 0x84 -> <control> '\x85' # 0x85 -> <control> '\x86' # 0x86 -> <control> '\x87' # 0x87 -> <control> '\x88' # 0x88 -> <control> '\x89' # 0x89 -> <control> '\x8a' # 0x8A -> <control> '\x8b' # 0x8B -> <control> '\x8c' # 0x8C -> <control> '\x8d' # 0x8D -> <control> '\x8e' # 0x8E -> <control> '\x8f' # 0x8F -> <control> '\x90' # 0x90 -> <control> '\x91' # 0x91 -> <control> '\x92' # 0x92 -> <control> '\x93' # 0x93 -> <control> '\x94' # 0x94 -> <control> '\x95' # 0x95 -> <control> '\x96' # 0x96 -> <control> '\x97' # 0x97 -> <control> '\x98' # 0x98 -> <control> '\x99' # 0x99 -> <control> '\x9a' # 0x9A -> <control> '\x9b' # 0x9B -> <control> '\x9c' # 0x9C -> <control> '\x9d' # 0x9D -> <control> '\x9e' # 0x9E -> <control> '\x9f' # 0x9F -> <control> '\xa0' # 0xA0 -> NO-BREAK SPACE '\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK '\xa2' # 0xA2 -> CENT SIGN '\xa3' # 0xA3 -> POUND SIGN '\xa4' # 0xA4 -> CURRENCY SIGN '\xa5' # 0xA5 -> YEN SIGN '\xa6' # 0xA6 -> BROKEN BAR '\xa7' # 0xA7 -> SECTION SIGN '\xa8' # 0xA8 -> DIAERESIS '\xa9' # 0xA9 -> COPYRIGHT SIGN '\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK '\xac' # 0xAC -> NOT SIGN '\xad' # 0xAD -> SOFT HYPHEN '\xae' # 0xAE -> REGISTERED SIGN '\xaf' # 0xAF -> MACRON '\xb0' # 0xB0 -> DEGREE SIGN '\xb1' # 0xB1 -> PLUS-MINUS SIGN '\xb2' # 0xB2 -> SUPERSCRIPT TWO '\xb3' # 0xB3 -> SUPERSCRIPT THREE '\xb4' # 0xB4 -> ACUTE ACCENT '\xb5' # 0xB5 -> MICRO SIGN '\xb6' # 0xB6 -> PILCROW SIGN '\xb7' # 0xB7 -> MIDDLE DOT '\xb8' # 0xB8 -> CEDILLA '\xb9' # 0xB9 -> SUPERSCRIPT ONE '\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK '\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF '\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS '\xbf' # 0xBF -> INVERTED QUESTION MARK '\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE '\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE '\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX '\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE '\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS '\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE '\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE '\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA '\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE '\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE '\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX '\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS '\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE '\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE '\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX '\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS '\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic) '\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE '\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE '\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE '\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX '\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE '\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS '\xd7' # 0xD7 -> MULTIPLICATION SIGN '\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE '\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE '\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE '\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX '\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS '\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE '\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic) '\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German) '\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE '\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE '\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX '\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE '\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS '\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE '\xe6' # 0xE6 -> LATIN SMALL LETTER AE '\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA '\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE '\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE '\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX '\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS '\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE '\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE '\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX '\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS '\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic) '\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE '\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE '\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE '\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX '\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE '\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS '\xf7' # 0xF7 -> DIVISION SIGN '\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE '\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE '\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE '\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX '\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS '\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE '\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic) '\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
bsd-3-clause
e433b7cebabf884e11cacfe2c119de10
41.918567
107
0.504413
3.185964
false
false
false
false
brython-dev/brython
www/src/Lib/base64.py
1
21028
#! /usr/bin/env python3 """Base16, Base32, Base64 (RFC 3548), Base85 and Ascii85 data encodings""" # Modified 04-Oct-1995 by Jack Jansen to use binascii module # Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support # Modified 22-May-2007 by Guido van Rossum to use bytes everywhere import re import struct import binascii __all__ = [ # Legacy interface exports traditional RFC 2045 Base64 encodings 'encode', 'decode', 'encodebytes', 'decodebytes', # Generalized interface for other encodings 'b64encode', 'b64decode', 'b32encode', 'b32decode', 'b32hexencode', 'b32hexdecode', 'b16encode', 'b16decode', # Base85 and Ascii85 encodings 'b85encode', 'b85decode', 'a85encode', 'a85decode', # Standard Base64 encoding 'standard_b64encode', 'standard_b64decode', # Some common Base64 alternatives. As referenced by RFC 3458, see thread # starting at: # # http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html 'urlsafe_b64encode', 'urlsafe_b64decode', ] bytes_types = (bytes, bytearray) # Types acceptable as binary data def _bytes_from_decode_data(s): if isinstance(s, str): try: return s.encode('ascii') except UnicodeEncodeError: raise ValueError('string argument should contain only ASCII characters') if isinstance(s, bytes_types): return s try: return memoryview(s).tobytes() except TypeError: raise TypeError("argument should be a bytes-like object or ASCII " "string, not %r" % s.__class__.__name__) from None # Base64 encoding/decoding uses binascii def b64encode(s, altchars=None): """Encode the bytes-like object s using Base64 and return a bytes object. Optional altchars should be a byte string of length 2 which specifies an alternative alphabet for the '+' and '/' characters. This allows an application to e.g. generate url or filesystem safe Base64 strings. """ encoded = binascii.b2a_base64(s, newline=False) if altchars is not None: assert len(altchars) == 2, repr(altchars) return encoded.translate(bytes.maketrans(b'+/', altchars)) return encoded def b64decode(s, altchars=None, validate=False): """Decode the Base64 encoded bytes-like object or ASCII string s. Optional altchars must be a bytes-like object or ASCII string of length 2 which specifies the alternative alphabet used instead of the '+' and '/' characters. The result is returned as a bytes object. A binascii.Error is raised if s is incorrectly padded. If validate is False (the default), characters that are neither in the normal base-64 alphabet nor the alternative alphabet are discarded prior to the padding check. If validate is True, these non-alphabet characters in the input result in a binascii.Error. For more information about the strict base64 check, see: https://docs.python.org/3.11/library/binascii.html#binascii.a2b_base64 """ s = _bytes_from_decode_data(s) if altchars is not None: altchars = _bytes_from_decode_data(altchars) assert len(altchars) == 2, repr(altchars) s = s.translate(bytes.maketrans(altchars, b'+/')) return binascii.a2b_base64(s, strict_mode=validate) def standard_b64encode(s): """Encode bytes-like object s using the standard Base64 alphabet. The result is returned as a bytes object. """ return b64encode(s) def standard_b64decode(s): """Decode bytes encoded with the standard Base64 alphabet. Argument s is a bytes-like object or ASCII string to decode. The result is returned as a bytes object. A binascii.Error is raised if the input is incorrectly padded. Characters that are not in the standard alphabet are discarded prior to the padding check. """ return b64decode(s) _urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_') _urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/') def urlsafe_b64encode(s): """Encode bytes using the URL- and filesystem-safe Base64 alphabet. Argument s is a bytes-like object to encode. The result is returned as a bytes object. The alphabet uses '-' instead of '+' and '_' instead of '/'. """ return b64encode(s).translate(_urlsafe_encode_translation) def urlsafe_b64decode(s): """Decode bytes using the URL- and filesystem-safe Base64 alphabet. Argument s is a bytes-like object or ASCII string to decode. The result is returned as a bytes object. A binascii.Error is raised if the input is incorrectly padded. Characters that are not in the URL-safe base-64 alphabet, and are not a plus '+' or slash '/', are discarded prior to the padding check. The alphabet uses '-' instead of '+' and '_' instead of '/'. """ s = _bytes_from_decode_data(s) s = s.translate(_urlsafe_decode_translation) return b64decode(s) # Base32 encoding/decoding must be done in Python _B32_ENCODE_DOCSTRING = ''' Encode the bytes-like objects using {encoding} and return a bytes object. ''' _B32_DECODE_DOCSTRING = ''' Decode the {encoding} encoded bytes-like object or ASCII string s. Optional casefold is a flag specifying whether a lowercase alphabet is acceptable as input. For security purposes, the default is False. {extra_args} The result is returned as a bytes object. A binascii.Error is raised if the input is incorrectly padded or if there are non-alphabet characters present in the input. ''' _B32_DECODE_MAP01_DOCSTRING = ''' RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O (oh), and for optional mapping of the digit 1 (one) to either the letter I (eye) or letter L (el). The optional argument map01 when not None, specifies which letter the digit 1 should be mapped to (when map01 is not None, the digit 0 is always mapped to the letter O). For security purposes the default is None, so that 0 and 1 are not allowed in the input. ''' _b32alphabet = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567' _b32hexalphabet = b'0123456789ABCDEFGHIJKLMNOPQRSTUV' _b32tab2 = {} _b32rev = {} def _b32encode(alphabet, s): global _b32tab2 # Delay the initialization of the table to not waste memory # if the function is never called if alphabet not in _b32tab2: b32tab = [bytes((i,)) for i in alphabet] _b32tab2[alphabet] = [a + b for a in b32tab for b in b32tab] b32tab = None if not isinstance(s, bytes_types): s = memoryview(s).tobytes() leftover = len(s) % 5 # Pad the last quantum with zero bits if necessary if leftover: s = s + b'\0' * (5 - leftover) # Don't use += ! encoded = bytearray() from_bytes = int.from_bytes b32tab2 = _b32tab2[alphabet] for i in range(0, len(s), 5): c = from_bytes(s[i: i + 5]) # big endian encoded += (b32tab2[c >> 30] + # bits 1 - 10 b32tab2[(c >> 20) & 0x3ff] + # bits 11 - 20 b32tab2[(c >> 10) & 0x3ff] + # bits 21 - 30 b32tab2[c & 0x3ff] # bits 31 - 40 ) # Adjust for any leftover partial quanta if leftover == 1: encoded[-6:] = b'======' elif leftover == 2: encoded[-4:] = b'====' elif leftover == 3: encoded[-3:] = b'===' elif leftover == 4: encoded[-1:] = b'=' return bytes(encoded) def _b32decode(alphabet, s, casefold=False, map01=None): global _b32rev # Delay the initialization of the table to not waste memory # if the function is never called if alphabet not in _b32rev: _b32rev[alphabet] = {v: k for k, v in enumerate(alphabet)} s = _bytes_from_decode_data(s) if len(s) % 8: raise binascii.Error('Incorrect padding') # Handle section 2.4 zero and one mapping. The flag map01 will be either # False, or the character to map the digit 1 (one) to. It should be # either L (el) or I (eye). if map01 is not None: map01 = _bytes_from_decode_data(map01) assert len(map01) == 1, repr(map01) s = s.translate(bytes.maketrans(b'01', b'O' + map01)) if casefold: s = s.upper() # Strip off pad characters from the right. We need to count the pad # characters because this will tell us how many null bytes to remove from # the end of the decoded string. l = len(s) s = s.rstrip(b'=') padchars = l - len(s) # Now decode the full quanta decoded = bytearray() b32rev = _b32rev[alphabet] for i in range(0, len(s), 8): quanta = s[i: i + 8] acc = 0 try: for c in quanta: acc = (acc << 5) + b32rev[c] except KeyError: raise binascii.Error('Non-base32 digit found') from None decoded += acc.to_bytes(5) # big endian # Process the last, partial quanta if l % 8 or padchars not in {0, 1, 3, 4, 6}: raise binascii.Error('Incorrect padding') if padchars and decoded: acc <<= 5 * padchars last = acc.to_bytes(5) # big endian leftover = (43 - 5 * padchars) // 8 # 1: 4, 3: 3, 4: 2, 6: 1 decoded[-5:] = last[:leftover] return bytes(decoded) def b32encode(s): return _b32encode(_b32alphabet, s) b32encode.__doc__ = _B32_ENCODE_DOCSTRING.format(encoding='base32') def b32decode(s, casefold=False, map01=None): return _b32decode(_b32alphabet, s, casefold, map01) b32decode.__doc__ = _B32_DECODE_DOCSTRING.format(encoding='base32', extra_args=_B32_DECODE_MAP01_DOCSTRING) def b32hexencode(s): return _b32encode(_b32hexalphabet, s) b32hexencode.__doc__ = _B32_ENCODE_DOCSTRING.format(encoding='base32hex') def b32hexdecode(s, casefold=False): # base32hex does not have the 01 mapping return _b32decode(_b32hexalphabet, s, casefold) b32hexdecode.__doc__ = _B32_DECODE_DOCSTRING.format(encoding='base32hex', extra_args='') # RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns # lowercase. The RFC also recommends against accepting input case # insensitively. def b16encode(s): """Encode the bytes-like object s using Base16 and return a bytes object. """ return binascii.hexlify(s).upper() def b16decode(s, casefold=False): """Decode the Base16 encoded bytes-like object or ASCII string s. Optional casefold is a flag specifying whether a lowercase alphabet is acceptable as input. For security purposes, the default is False. The result is returned as a bytes object. A binascii.Error is raised if s is incorrectly padded or if there are non-alphabet characters present in the input. """ s = _bytes_from_decode_data(s) if casefold: s = s.upper() if re.search(b'[^0-9A-F]', s): raise binascii.Error('Non-base16 digit found') return binascii.unhexlify(s) # # Ascii85 encoding/decoding # _a85chars = None _a85chars2 = None _A85START = b"<~" _A85END = b"~>" def _85encode(b, chars, chars2, pad=False, foldnuls=False, foldspaces=False): # Helper function for a85encode and b85encode if not isinstance(b, bytes_types): b = memoryview(b).tobytes() padding = (-len(b)) % 4 if padding: b = b + b'\0' * padding words = struct.Struct('!%dI' % (len(b) // 4)).unpack(b) chunks = [b'z' if foldnuls and not word else b'y' if foldspaces and word == 0x20202020 else (chars2[word // 614125] + chars2[word // 85 % 7225] + chars[word % 85]) for word in words] if padding and not pad: if chunks[-1] == b'z': chunks[-1] = chars[0] * 5 chunks[-1] = chunks[-1][:-padding] return b''.join(chunks) def a85encode(b, *, foldspaces=False, wrapcol=0, pad=False, adobe=False): """Encode bytes-like object b using Ascii85 and return a bytes object. foldspaces is an optional flag that uses the special short sequence 'y' instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This feature is not supported by the "standard" Adobe encoding. wrapcol controls whether the output should have newline (b'\\n') characters added to it. If this is non-zero, each output line will be at most this many characters long. pad controls whether the input is padded to a multiple of 4 before encoding. Note that the btoa implementation always pads. adobe controls whether the encoded byte sequence is framed with <~ and ~>, which is used by the Adobe implementation. """ global _a85chars, _a85chars2 # Delay the initialization of tables to not waste memory # if the function is never called if _a85chars2 is None: _a85chars = [bytes((i,)) for i in range(33, 118)] _a85chars2 = [(a + b) for a in _a85chars for b in _a85chars] result = _85encode(b, _a85chars, _a85chars2, pad, True, foldspaces) if adobe: result = _A85START + result if wrapcol: wrapcol = max(2 if adobe else 1, wrapcol) chunks = [result[i: i + wrapcol] for i in range(0, len(result), wrapcol)] if adobe: if len(chunks[-1]) + 2 > wrapcol: chunks.append(b'') result = b'\n'.join(chunks) if adobe: result += _A85END return result def a85decode(b, *, foldspaces=False, adobe=False, ignorechars=b' \t\n\r\v'): """Decode the Ascii85 encoded bytes-like object or ASCII string b. foldspaces is a flag that specifies whether the 'y' short sequence should be accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is not supported by the "standard" Adobe encoding. adobe controls whether the input sequence is in Adobe Ascii85 format (i.e. is framed with <~ and ~>). ignorechars should be a byte string containing characters to ignore from the input. This should only contain whitespace characters, and by default contains all whitespace characters in ASCII. The result is returned as a bytes object. """ b = _bytes_from_decode_data(b) if adobe: if not b.endswith(_A85END): raise ValueError( "Ascii85 encoded byte sequences must end " "with {!r}".format(_A85END) ) if b.startswith(_A85START): b = b[2:-2] # Strip off start/end markers else: b = b[:-2] # # We have to go through this stepwise, so as to ignore spaces and handle # special short sequences # packI = struct.Struct('!I').pack decoded = [] decoded_append = decoded.append curr = [] curr_append = curr.append curr_clear = curr.clear for x in b + b'u' * 4: if b'!'[0] <= x <= b'u'[0]: curr_append(x) if len(curr) == 5: acc = 0 for x in curr: acc = 85 * acc + (x - 33) try: decoded_append(packI(acc)) except struct.error: raise ValueError('Ascii85 overflow') from None curr_clear() elif x == b'z'[0]: if curr: raise ValueError('z inside Ascii85 5-tuple') decoded_append(b'\0\0\0\0') elif foldspaces and x == b'y'[0]: if curr: raise ValueError('y inside Ascii85 5-tuple') decoded_append(b'\x20\x20\x20\x20') elif x in ignorechars: # Skip whitespace continue else: raise ValueError('Non-Ascii85 digit found: %c' % x) result = b''.join(decoded) padding = 4 - len(curr) if padding: # Throw away the extra padding result = result[:-padding] return result # The following code is originally taken (with permission) from Mercurial _b85alphabet = (b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" b"abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~") _b85chars = None _b85chars2 = None _b85dec = None def b85encode(b, pad=False): """Encode bytes-like object b in base85 format and return a bytes object. If pad is true, the input is padded with b'\\0' so its length is a multiple of 4 bytes before encoding. """ global _b85chars, _b85chars2 # Delay the initialization of tables to not waste memory # if the function is never called if _b85chars2 is None: _b85chars = [bytes((i,)) for i in _b85alphabet] _b85chars2 = [(a + b) for a in _b85chars for b in _b85chars] return _85encode(b, _b85chars, _b85chars2, pad) def b85decode(b): """Decode the base85-encoded bytes-like object or ASCII string b The result is returned as a bytes object. """ global _b85dec # Delay the initialization of tables to not waste memory # if the function is never called if _b85dec is None: _b85dec = [None] * 256 for i, c in enumerate(_b85alphabet): _b85dec[c] = i b = _bytes_from_decode_data(b) padding = (-len(b)) % 5 b = b + b'~' * padding out = [] packI = struct.Struct('!I').pack for i in range(0, len(b), 5): chunk = b[i:i + 5] acc = 0 try: for c in chunk: acc = acc * 85 + _b85dec[c] except TypeError: for j, c in enumerate(chunk): if _b85dec[c] is None: raise ValueError('bad base85 character at position %d' % (i + j)) from None raise try: out.append(packI(acc)) except struct.error: raise ValueError('base85 overflow in hunk starting at byte %d' % i) from None result = b''.join(out) if padding: result = result[:-padding] return result # Legacy interface. This code could be cleaned up since I don't believe # binascii has any line length limitations. It just doesn't seem worth it # though. The files should be opened in binary mode. MAXLINESIZE = 76 # Excluding the CRLF MAXBINSIZE = (MAXLINESIZE//4)*3 def encode(input, output): """Encode a file; input and output are binary files.""" while True: s = input.read(MAXBINSIZE) if not s: break while len(s) < MAXBINSIZE: ns = input.read(MAXBINSIZE-len(s)) if not ns: break s += ns line = binascii.b2a_base64(s) output.write(line) def decode(input, output): """Decode a file; input and output are binary files.""" while True: line = input.readline() if not line: break s = binascii.a2b_base64(line) output.write(s) def _input_type_check(s): try: m = memoryview(s) except TypeError as err: msg = "expected bytes-like object, not %s" % s.__class__.__name__ raise TypeError(msg) from err if m.format not in ('c', 'b', 'B'): msg = ("expected single byte elements, not %r from %s" % (m.format, s.__class__.__name__)) raise TypeError(msg) if m.ndim != 1: msg = ("expected 1-D data, not %d-D data from %s" % (m.ndim, s.__class__.__name__)) raise TypeError(msg) def encodebytes(s): """Encode a bytestring into a bytes object containing multiple lines of base-64 data.""" _input_type_check(s) pieces = [] for i in range(0, len(s), MAXBINSIZE): chunk = s[i : i + MAXBINSIZE] pieces.append(binascii.b2a_base64(chunk)) return b"".join(pieces) def decodebytes(s): """Decode a bytestring of base-64 data into a bytes object.""" _input_type_check(s) return binascii.a2b_base64(s) # Usable as a script... def main(): """Small main program""" import sys, getopt usage = """usage: %s [-h|-d|-e|-u|-t] [file|-] -h: print this help message and exit -d, -u: decode -e: encode (default) -t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0] try: opts, args = getopt.getopt(sys.argv[1:], 'hdeut') except getopt.error as msg: sys.stdout = sys.stderr print(msg) print(usage) sys.exit(2) func = encode for o, a in opts: if o == '-e': func = encode if o == '-d': func = decode if o == '-u': func = decode if o == '-t': test(); return if o == '-h': print(usage); return if args and args[0] != '-': with open(args[0], 'rb') as f: func(f, sys.stdout.buffer) else: func(sys.stdin.buffer, sys.stdout.buffer) def test(): s0 = b"Aladdin:open sesame" print(repr(s0)) s1 = encodebytes(s0) print(repr(s1)) s2 = decodebytes(s1) print(repr(s2)) assert s0 == s2 if __name__ == '__main__': main()
bsd-3-clause
aeccc714f5c1ac34410de1ee745b83cb
33.642504
84
0.615322
3.603153
false
false
false
false
brython-dev/brython
www/src/Lib/test/test_multibytecodec.py
4
16134
# # test_multibytecodec.py # Unit test for multibytecodec itself # import _multibytecodec import codecs import io import sys import textwrap import unittest from test import support from test.support import os_helper from test.support.os_helper import TESTFN ALL_CJKENCODINGS = [ # _codecs_cn 'gb2312', 'gbk', 'gb18030', 'hz', # _codecs_hk 'big5hkscs', # _codecs_jp 'cp932', 'shift_jis', 'euc_jp', 'euc_jisx0213', 'shift_jisx0213', 'euc_jis_2004', 'shift_jis_2004', # _codecs_kr 'cp949', 'euc_kr', 'johab', # _codecs_tw 'big5', 'cp950', # _codecs_iso2022 'iso2022_jp', 'iso2022_jp_1', 'iso2022_jp_2', 'iso2022_jp_2004', 'iso2022_jp_3', 'iso2022_jp_ext', 'iso2022_kr', ] class Test_MultibyteCodec(unittest.TestCase): def test_nullcoding(self): for enc in ALL_CJKENCODINGS: self.assertEqual(b''.decode(enc), '') self.assertEqual(str(b'', enc), '') self.assertEqual(''.encode(enc), b'') def test_str_decode(self): for enc in ALL_CJKENCODINGS: self.assertEqual('abcd'.encode(enc), b'abcd') def test_errorcallback_longindex(self): dec = codecs.getdecoder('euc-kr') myreplace = lambda exc: ('', sys.maxsize+1) codecs.register_error('test.cjktest', myreplace) self.assertRaises(IndexError, dec, b'apple\x92ham\x93spam', 'test.cjktest') def test_errorcallback_custom_ignore(self): # Issue #23215: MemoryError with custom error handlers and multibyte codecs data = 100 * "\udc00" codecs.register_error("test.ignore", codecs.ignore_errors) for enc in ALL_CJKENCODINGS: self.assertEqual(data.encode(enc, "test.ignore"), b'') def test_codingspec(self): try: for enc in ALL_CJKENCODINGS: code = '# coding: {}\n'.format(enc) exec(code) finally: os_helper.unlink(TESTFN) def test_init_segfault(self): # bug #3305: this used to segfault self.assertRaises(AttributeError, _multibytecodec.MultibyteStreamReader, None) self.assertRaises(AttributeError, _multibytecodec.MultibyteStreamWriter, None) def test_decode_unicode(self): # Trying to decode a unicode string should raise a TypeError for enc in ALL_CJKENCODINGS: self.assertRaises(TypeError, codecs.getdecoder(enc), "") class Test_IncrementalEncoder(unittest.TestCase): def test_stateless(self): # cp949 encoder isn't stateful at all. encoder = codecs.getincrementalencoder('cp949')() self.assertEqual(encoder.encode('\ud30c\uc774\uc36c \ub9c8\uc744'), b'\xc6\xc4\xc0\xcc\xbd\xe3 \xb8\xb6\xc0\xbb') self.assertEqual(encoder.reset(), None) self.assertEqual(encoder.encode('\u2606\u223c\u2606', True), b'\xa1\xd9\xa1\xad\xa1\xd9') self.assertEqual(encoder.reset(), None) self.assertEqual(encoder.encode('', True), b'') self.assertEqual(encoder.encode('', False), b'') self.assertEqual(encoder.reset(), None) def test_stateful(self): # jisx0213 encoder is stateful for a few code points. eg) # U+00E6 => A9DC # U+00E6 U+0300 => ABC4 # U+0300 => ABDC encoder = codecs.getincrementalencoder('jisx0213')() self.assertEqual(encoder.encode('\u00e6\u0300'), b'\xab\xc4') self.assertEqual(encoder.encode('\u00e6'), b'') self.assertEqual(encoder.encode('\u0300'), b'\xab\xc4') self.assertEqual(encoder.encode('\u00e6', True), b'\xa9\xdc') self.assertEqual(encoder.reset(), None) self.assertEqual(encoder.encode('\u0300'), b'\xab\xdc') self.assertEqual(encoder.encode('\u00e6'), b'') self.assertEqual(encoder.encode('', True), b'\xa9\xdc') self.assertEqual(encoder.encode('', True), b'') def test_stateful_keep_buffer(self): encoder = codecs.getincrementalencoder('jisx0213')() self.assertEqual(encoder.encode('\u00e6'), b'') self.assertRaises(UnicodeEncodeError, encoder.encode, '\u0123') self.assertEqual(encoder.encode('\u0300\u00e6'), b'\xab\xc4') self.assertRaises(UnicodeEncodeError, encoder.encode, '\u0123') self.assertEqual(encoder.reset(), None) self.assertEqual(encoder.encode('\u0300'), b'\xab\xdc') self.assertEqual(encoder.encode('\u00e6'), b'') self.assertRaises(UnicodeEncodeError, encoder.encode, '\u0123') self.assertEqual(encoder.encode('', True), b'\xa9\xdc') def test_state_methods_with_buffer_state(self): # euc_jis_2004 stores state as a buffer of pending bytes encoder = codecs.getincrementalencoder('euc_jis_2004')() initial_state = encoder.getstate() self.assertEqual(encoder.encode('\u00e6\u0300'), b'\xab\xc4') encoder.setstate(initial_state) self.assertEqual(encoder.encode('\u00e6\u0300'), b'\xab\xc4') self.assertEqual(encoder.encode('\u00e6'), b'') partial_state = encoder.getstate() self.assertEqual(encoder.encode('\u0300'), b'\xab\xc4') encoder.setstate(partial_state) self.assertEqual(encoder.encode('\u0300'), b'\xab\xc4') def test_state_methods_with_non_buffer_state(self): # iso2022_jp stores state without using a buffer encoder = codecs.getincrementalencoder('iso2022_jp')() self.assertEqual(encoder.encode('z'), b'z') en_state = encoder.getstate() self.assertEqual(encoder.encode('\u3042'), b'\x1b\x24\x42\x24\x22') jp_state = encoder.getstate() self.assertEqual(encoder.encode('z'), b'\x1b\x28\x42z') encoder.setstate(jp_state) self.assertEqual(encoder.encode('\u3042'), b'\x24\x22') encoder.setstate(en_state) self.assertEqual(encoder.encode('z'), b'z') def test_getstate_returns_expected_value(self): # Note: getstate is implemented such that these state values # are expected to be the same across all builds of Python, # regardless of x32/64 bit, endianness and compiler. # euc_jis_2004 stores state as a buffer of pending bytes buffer_state_encoder = codecs.getincrementalencoder('euc_jis_2004')() self.assertEqual(buffer_state_encoder.getstate(), 0) buffer_state_encoder.encode('\u00e6') self.assertEqual(buffer_state_encoder.getstate(), int.from_bytes( b"\x02" b"\xc3\xa6" b"\x00\x00\x00\x00\x00\x00\x00\x00", 'little')) buffer_state_encoder.encode('\u0300') self.assertEqual(buffer_state_encoder.getstate(), 0) # iso2022_jp stores state without using a buffer non_buffer_state_encoder = codecs.getincrementalencoder('iso2022_jp')() self.assertEqual(non_buffer_state_encoder.getstate(), int.from_bytes( b"\x00" b"\x42\x42\x00\x00\x00\x00\x00\x00", 'little')) non_buffer_state_encoder.encode('\u3042') self.assertEqual(non_buffer_state_encoder.getstate(), int.from_bytes( b"\x00" b"\xc2\x42\x00\x00\x00\x00\x00\x00", 'little')) def test_setstate_validates_input_size(self): encoder = codecs.getincrementalencoder('euc_jp')() pending_size_nine = int.from_bytes( b"\x09" b"\x00\x00\x00\x00\x00\x00\x00\x00" b"\x00\x00\x00\x00\x00\x00\x00\x00", 'little') self.assertRaises(UnicodeError, encoder.setstate, pending_size_nine) def test_setstate_validates_input_bytes(self): encoder = codecs.getincrementalencoder('euc_jp')() invalid_utf8 = int.from_bytes( b"\x01" b"\xff" b"\x00\x00\x00\x00\x00\x00\x00\x00", 'little') self.assertRaises(UnicodeDecodeError, encoder.setstate, invalid_utf8) def test_issue5640(self): encoder = codecs.getincrementalencoder('shift-jis')('backslashreplace') self.assertEqual(encoder.encode('\xff'), b'\\xff') self.assertEqual(encoder.encode('\n'), b'\n') @support.cpython_only def test_subinterp(self): # bpo-42846: Test a CJK codec in a subinterpreter import _testcapi encoding = 'cp932' text = "Python の開発は、1990 年ごろから開始されています。" code = textwrap.dedent(""" import codecs encoding = %r text = %r encoder = codecs.getincrementalencoder(encoding)() text2 = encoder.encode(text).decode(encoding) if text2 != text: raise ValueError(f"encoding issue: {text2!a} != {text!a}") """) % (encoding, text) res = _testcapi.run_in_subinterp(code) self.assertEqual(res, 0) class Test_IncrementalDecoder(unittest.TestCase): def test_dbcs(self): # cp949 decoder is simple with only 1 or 2 bytes sequences. decoder = codecs.getincrementaldecoder('cp949')() self.assertEqual(decoder.decode(b'\xc6\xc4\xc0\xcc\xbd'), '\ud30c\uc774') self.assertEqual(decoder.decode(b'\xe3 \xb8\xb6\xc0\xbb'), '\uc36c \ub9c8\uc744') self.assertEqual(decoder.decode(b''), '') def test_dbcs_keep_buffer(self): decoder = codecs.getincrementaldecoder('cp949')() self.assertEqual(decoder.decode(b'\xc6\xc4\xc0'), '\ud30c') self.assertRaises(UnicodeDecodeError, decoder.decode, b'', True) self.assertEqual(decoder.decode(b'\xcc'), '\uc774') self.assertEqual(decoder.decode(b'\xc6\xc4\xc0'), '\ud30c') self.assertRaises(UnicodeDecodeError, decoder.decode, b'\xcc\xbd', True) self.assertEqual(decoder.decode(b'\xcc'), '\uc774') def test_iso2022(self): decoder = codecs.getincrementaldecoder('iso2022-jp')() ESC = b'\x1b' self.assertEqual(decoder.decode(ESC + b'('), '') self.assertEqual(decoder.decode(b'B', True), '') self.assertEqual(decoder.decode(ESC + b'$'), '') self.assertEqual(decoder.decode(b'B@$'), '\u4e16') self.assertEqual(decoder.decode(b'@$@'), '\u4e16') self.assertEqual(decoder.decode(b'$', True), '\u4e16') self.assertEqual(decoder.reset(), None) self.assertEqual(decoder.decode(b'@$'), '@$') self.assertEqual(decoder.decode(ESC + b'$'), '') self.assertRaises(UnicodeDecodeError, decoder.decode, b'', True) self.assertEqual(decoder.decode(b'B@$'), '\u4e16') def test_decode_unicode(self): # Trying to decode a unicode string should raise a TypeError for enc in ALL_CJKENCODINGS: decoder = codecs.getincrementaldecoder(enc)() self.assertRaises(TypeError, decoder.decode, "") def test_state_methods(self): decoder = codecs.getincrementaldecoder('euc_jp')() # Decode a complete input sequence self.assertEqual(decoder.decode(b'\xa4\xa6'), '\u3046') pending1, _ = decoder.getstate() self.assertEqual(pending1, b'') # Decode first half of a partial input sequence self.assertEqual(decoder.decode(b'\xa4'), '') pending2, flags2 = decoder.getstate() self.assertEqual(pending2, b'\xa4') # Decode second half of a partial input sequence self.assertEqual(decoder.decode(b'\xa6'), '\u3046') pending3, _ = decoder.getstate() self.assertEqual(pending3, b'') # Jump back and decode second half of partial input sequence again decoder.setstate((pending2, flags2)) self.assertEqual(decoder.decode(b'\xa6'), '\u3046') pending4, _ = decoder.getstate() self.assertEqual(pending4, b'') # Ensure state values are preserved correctly decoder.setstate((b'abc', 123456789)) self.assertEqual(decoder.getstate(), (b'abc', 123456789)) def test_setstate_validates_input(self): decoder = codecs.getincrementaldecoder('euc_jp')() self.assertRaises(TypeError, decoder.setstate, 123) self.assertRaises(TypeError, decoder.setstate, ("invalid", 0)) self.assertRaises(TypeError, decoder.setstate, (b"1234", "invalid")) self.assertRaises(UnicodeError, decoder.setstate, (b"123456789", 0)) class Test_StreamReader(unittest.TestCase): def test_bug1728403(self): try: f = open(TESTFN, 'wb') try: f.write(b'\xa1') finally: f.close() f = codecs.open(TESTFN, encoding='cp949') try: self.assertRaises(UnicodeDecodeError, f.read, 2) finally: f.close() finally: os_helper.unlink(TESTFN) class Test_StreamWriter(unittest.TestCase): def test_gb18030(self): s= io.BytesIO() c = codecs.getwriter('gb18030')(s) c.write('123') self.assertEqual(s.getvalue(), b'123') c.write('\U00012345') self.assertEqual(s.getvalue(), b'123\x907\x959') c.write('\uac00\u00ac') self.assertEqual(s.getvalue(), b'123\x907\x959\x827\xcf5\x810\x851') def test_utf_8(self): s= io.BytesIO() c = codecs.getwriter('utf-8')(s) c.write('123') self.assertEqual(s.getvalue(), b'123') c.write('\U00012345') self.assertEqual(s.getvalue(), b'123\xf0\x92\x8d\x85') c.write('\uac00\u00ac') self.assertEqual(s.getvalue(), b'123\xf0\x92\x8d\x85' b'\xea\xb0\x80\xc2\xac') def test_streamwriter_strwrite(self): s = io.BytesIO() wr = codecs.getwriter('gb18030')(s) wr.write('abcd') self.assertEqual(s.getvalue(), b'abcd') class Test_ISO2022(unittest.TestCase): def test_g2(self): iso2022jp2 = b'\x1b(B:hu4:unit\x1b.A\x1bNi de famille' uni = ':hu4:unit\xe9 de famille' self.assertEqual(iso2022jp2.decode('iso2022-jp-2'), uni) def test_iso2022_jp_g0(self): self.assertNotIn(b'\x0e', '\N{SOFT HYPHEN}'.encode('iso-2022-jp-2')) for encoding in ('iso-2022-jp-2004', 'iso-2022-jp-3'): e = '\u3406'.encode(encoding) self.assertFalse(any(x > 0x80 for x in e)) def test_bug1572832(self): for x in range(0x10000, 0x110000): # Any ISO 2022 codec will cause the segfault chr(x).encode('iso_2022_jp', 'ignore') class TestStateful(unittest.TestCase): text = '\u4E16\u4E16' encoding = 'iso-2022-jp' expected = b'\x1b$B@$@$' reset = b'\x1b(B' expected_reset = expected + reset def test_encode(self): self.assertEqual(self.text.encode(self.encoding), self.expected_reset) def test_incrementalencoder(self): encoder = codecs.getincrementalencoder(self.encoding)() output = b''.join( encoder.encode(char) for char in self.text) self.assertEqual(output, self.expected) self.assertEqual(encoder.encode('', final=True), self.reset) self.assertEqual(encoder.encode('', final=True), b'') def test_incrementalencoder_final(self): encoder = codecs.getincrementalencoder(self.encoding)() last_index = len(self.text) - 1 output = b''.join( encoder.encode(char, index == last_index) for index, char in enumerate(self.text)) self.assertEqual(output, self.expected_reset) self.assertEqual(encoder.encode('', final=True), b'') class TestHZStateful(TestStateful): text = '\u804a\u804a' encoding = 'hz' expected = b'~{ADAD' reset = b'~}' expected_reset = expected + reset if __name__ == "__main__": unittest.main()
bsd-3-clause
3c47464e390eceafda9f6d5e648c18ab
38.45098
83
0.601764
3.567376
false
true
false
false
brython-dev/brython
www/src/Lib/test/test_userlist.py
6
2016
# Check every path through every method of UserList from collections import UserList from test import list_tests import unittest class UserListTest(list_tests.CommonTest): type2test = UserList def test_getslice(self): super().test_getslice() l = [0, 1, 2, 3, 4] u = self.type2test(l) for i in range(-3, 6): self.assertEqual(u[:i], l[:i]) self.assertEqual(u[i:], l[i:]) for j in range(-3, 6): self.assertEqual(u[i:j], l[i:j]) def test_slice_type(self): l = [0, 1, 2, 3, 4] u = UserList(l) self.assertIsInstance(u[:], u.__class__) self.assertEqual(u[:],u) def test_add_specials(self): u = UserList("spam") u2 = u + "eggs" self.assertEqual(u2, list("spameggs")) def test_radd_specials(self): u = UserList("eggs") u2 = "spam" + u self.assertEqual(u2, list("spameggs")) u2 = u.__radd__(UserList("spam")) self.assertEqual(u2, list("spameggs")) def test_iadd(self): super().test_iadd() u = [0, 1] u += UserList([0, 1]) self.assertEqual(u, [0, 1, 0, 1]) def test_mixedcmp(self): u = self.type2test([0, 1]) self.assertEqual(u, [0, 1]) self.assertNotEqual(u, [0]) self.assertNotEqual(u, [0, 2]) def test_mixedadd(self): u = self.type2test([0, 1]) self.assertEqual(u + [], u) self.assertEqual(u + [2], [0, 1, 2]) def test_getitemoverwriteiter(self): # Verify that __getitem__ overrides *are* recognized by __iter__ class T(self.type2test): def __getitem__(self, key): return str(key) + '!!!' self.assertEqual(next(iter(T((1,2)))), "0!!!") def test_userlist_copy(self): u = self.type2test([6, 8, 1, 9, 1]) v = u.copy() self.assertEqual(u, v) self.assertEqual(type(u), type(v)) if __name__ == "__main__": unittest.main()
bsd-3-clause
76163bb5cbf0622507518441ced39cd8
28.217391
72
0.528274
3.288744
false
true
false
false
brython-dev/brython
www/gallery/raphael/picker.py
2
1553
import math from browser import document, window Raphael = window.Raphael out = document["output"] vr = document["vr"] vg = document["vg"] vb = document["vb"] vh = document["vh"] vh2 = document["vh2"] vs = document["vs"] vs2 = document["vs2"] vv = document["vv"] vl = document["vl"] def update_color(ev, cp, cp2): cp.color(ev.target.value) cp2.color(ev.target.value) def on_change(item): def callback(clr): out.value = clr item.color(clr) out.style.background = clr out.style.color = "#fff" if Raphael.rgb2hsb(clr).b < .5 else "#000" clr = Raphael.color(clr) vr.html = clr.r vg.html = clr.g vb.html = clr.b vh.html = vh2.html = "%s°" %round(clr.h * 360) vs.html = vs2.html = "%s%%" %round(clr.s * 100) vv.html = "%s%%" %round(clr.v * 100) vl.html = "%s%%" %round(clr.l * 100) return callback def picker(): # this is where colorpicker created cp = Raphael.colorpicker(40, 20, 300, "#eee") cp2 = Raphael.colorwheel(360, 20, 300, "#eee") clr = Raphael.color("#eee") vr.html = clr.r vg.html = clr.g vb.html = clr.b vh.html = vh2.html = "%s°" %round(clr.h * 360) vs.html = vs2.html = "%s%%" %round(clr.s * 100) vv.html = "%s%%" %round(clr.v * 100) vl.html = "%s%%" %round(clr.l * 100) out.bind('keyup', lambda ev: update_color(ev, cp, cp2)) # assigning onchange event handler cp.onchange = on_change(cp2) cp2.onchange = on_change(cp) Raphael(picker)
bsd-3-clause
6000e8509caa550c7bd435ce2c829101
24.016129
75
0.567376
2.669535
false
false
false
false
brython-dev/brython
www/src/Lib/signal.py
14
2438
import _signal from _signal import * from enum import IntEnum as _IntEnum _globals = globals() _IntEnum._convert_( 'Signals', __name__, lambda name: name.isupper() and (name.startswith('SIG') and not name.startswith('SIG_')) or name.startswith('CTRL_')) _IntEnum._convert_( 'Handlers', __name__, lambda name: name in ('SIG_DFL', 'SIG_IGN')) if 'pthread_sigmask' in _globals: _IntEnum._convert_( 'Sigmasks', __name__, lambda name: name in ('SIG_BLOCK', 'SIG_UNBLOCK', 'SIG_SETMASK')) def _int_to_enum(value, enum_klass): """Convert a numeric value to an IntEnum member. If it's not a known member, return the numeric value itself. """ try: return enum_klass(value) except ValueError: return value def _enum_to_int(value): """Convert an IntEnum member to a numeric value. If it's not an IntEnum member return the value itself. """ try: return int(value) except (ValueError, TypeError): return value # Similar to functools.wraps(), but only assign __doc__. # __module__ should be preserved, # __name__ and __qualname__ are already fine, # __annotations__ is not set. def _wraps(wrapped): def decorator(wrapper): wrapper.__doc__ = wrapped.__doc__ return wrapper return decorator @_wraps(_signal.signal) def signal(signalnum, handler): handler = _signal.signal(_enum_to_int(signalnum), _enum_to_int(handler)) return _int_to_enum(handler, Handlers) @_wraps(_signal.getsignal) def getsignal(signalnum): handler = _signal.getsignal(signalnum) return _int_to_enum(handler, Handlers) if 'pthread_sigmask' in _globals: @_wraps(_signal.pthread_sigmask) def pthread_sigmask(how, mask): sigs_set = _signal.pthread_sigmask(how, mask) return set(_int_to_enum(x, Signals) for x in sigs_set) if 'sigpending' in _globals: @_wraps(_signal.sigpending) def sigpending(): return {_int_to_enum(x, Signals) for x in _signal.sigpending()} if 'sigwait' in _globals: @_wraps(_signal.sigwait) def sigwait(sigset): retsig = _signal.sigwait(sigset) return _int_to_enum(retsig, Signals) if 'valid_signals' in _globals: @_wraps(_signal.valid_signals) def valid_signals(): return {_int_to_enum(x, Signals) for x in _signal.valid_signals()} del _globals, _wraps
bsd-3-clause
1977448442b3dd8e529a1d2e1d5208d7
25.5
77
0.629204
3.458156
false
false
false
false
brython-dev/brython
www/src/Lib/copyreg.py
1
7677
"""Helper to provide extensibility for pickle. This is only useful to add pickle support for extension types defined in C, not for instances of user-defined classes. """ __all__ = ["pickle", "constructor", "add_extension", "remove_extension", "clear_extension_cache"] dispatch_table = {} def pickle(ob_type, pickle_function, constructor_ob=None): if not callable(pickle_function): raise TypeError("reduction functions must be callable") dispatch_table[ob_type] = pickle_function # The constructor_ob function is a vestige of safe for unpickling. # There is no reason for the caller to pass it anymore. if constructor_ob is not None: constructor(constructor_ob) def constructor(object): if not callable(object): raise TypeError("constructors must be callable") # Example: provide pickling support for complex numbers. try: complex except NameError: pass else: def pickle_complex(c): return complex, (c.real, c.imag) pickle(complex, pickle_complex, complex) def pickle_union(obj): import functools, operator return functools.reduce, (operator.or_, obj.__args__) pickle(type(int | str), pickle_union) # Support for pickling new-style objects def _reconstructor(cls, base, state): if base is object: obj = object.__new__(cls) else: obj = base.__new__(cls, state) if base.__init__ != object.__init__: base.__init__(obj, state) return obj _HEAPTYPE = 1<<9 _new_type = type(int.__new__) # Python code for object.__reduce_ex__ for protocols 0 and 1 def _reduce_ex(self, proto): assert proto < 2 cls = self.__class__ for base in cls.__mro__: if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE: break new = base.__new__ if isinstance(new, _new_type) and new.__self__ is base: break else: base = object # not really reachable if base is object: state = None else: if base is cls: raise TypeError(f"cannot pickle {cls.__name__!r} object") state = base(self) args = (cls, base, state) try: getstate = self.__getstate__ except AttributeError: if getattr(self, "__slots__", None): raise TypeError(f"cannot pickle {cls.__name__!r} object: " f"a class that defines __slots__ without " f"defining __getstate__ cannot be pickled " f"with protocol {proto}") from None try: dict = self.__dict__ except AttributeError: dict = None else: if (type(self).__getstate__ is object.__getstate__ and getattr(self, "__slots__", None)): raise TypeError("a class that defines __slots__ without " "defining __getstate__ cannot be pickled") dict = getstate() if dict: return _reconstructor, args, dict else: return _reconstructor, args # Helper for __reduce_ex__ protocol 2 def __newobj__(cls, *args): return cls.__new__(cls, *args) def __newobj_ex__(cls, args, kwargs): """Used by pickle protocol 4, instead of __newobj__ to allow classes with keyword-only arguments to be pickled correctly. """ return cls.__new__(cls, *args, **kwargs) def _slotnames(cls): """Return a list of slot names for a given class. This needs to find slots defined by the class and its bases, so we can't simply return the __slots__ attribute. We must walk down the Method Resolution Order and concatenate the __slots__ of each class found there. (This assumes classes don't modify their __slots__ attribute to misrepresent their slots after the class is defined.) """ # Get the value from a cache in the class if possible names = cls.__dict__.get("__slotnames__") if names is not None: return names # Not cached -- calculate the value names = [] if not hasattr(cls, "__slots__"): # This class has no slots pass else: # Slots found -- gather slot names from all base classes for c in cls.__mro__: if "__slots__" in c.__dict__: slots = c.__dict__['__slots__'] # if class has a single slot, it can be given as a string if isinstance(slots, str): slots = (slots,) for name in slots: # special descriptors if name in ("__dict__", "__weakref__"): continue # mangled names elif name.startswith('__') and not name.endswith('__'): stripped = c.__name__.lstrip('_') if stripped: names.append('_%s%s' % (stripped, name)) else: names.append(name) else: names.append(name) # Cache the outcome in the class if at all possible try: cls.__slotnames__ = names except: pass # But don't die if we can't return names # A registry of extension codes. This is an ad-hoc compression # mechanism. Whenever a global reference to <module>, <name> is about # to be pickled, the (<module>, <name>) tuple is looked up here to see # if it is a registered extension code for it. Extension codes are # universal, so that the meaning of a pickle does not depend on # context. (There are also some codes reserved for local use that # don't have this restriction.) Codes are positive ints; 0 is # reserved. _extension_registry = {} # key -> code _inverted_registry = {} # code -> key _extension_cache = {} # code -> object # Don't ever rebind those names: pickling grabs a reference to them when # it's initialized, and won't see a rebinding. def add_extension(module, name, code): """Register an extension code.""" code = int(code) if not 1 <= code <= 0x7fffffff: raise ValueError("code out of range") key = (module, name) if (_extension_registry.get(key) == code and _inverted_registry.get(code) == key): return # Redundant registrations are benign if key in _extension_registry: raise ValueError("key %s is already registered with code %s" % (key, _extension_registry[key])) if code in _inverted_registry: raise ValueError("code %s is already in use for key %s" % (code, _inverted_registry[code])) _extension_registry[key] = code _inverted_registry[code] = key def remove_extension(module, name, code): """Unregister an extension code. For testing only.""" key = (module, name) if (_extension_registry.get(key) != code or _inverted_registry.get(code) != key): raise ValueError("key %s is not registered with code %s" % (key, code)) del _extension_registry[key] del _inverted_registry[code] if code in _extension_cache: del _extension_cache[code] def clear_extension_cache(): _extension_cache.clear() # Standard extension code assignments # Reserved ranges # First Last Count Purpose # 1 127 127 Reserved for Python standard library # 128 191 64 Reserved for Zope # 192 239 48 Reserved for 3rd parties # 240 255 16 Reserved for private use (will never be assigned) # 256 Inf Inf Reserved for future assignment # Extension codes are assigned by the Python Software Foundation.
bsd-3-clause
54b32ede3f27c533bf6dac25e5cbed97
33.426009
77
0.590726
4.176823
false
false
false
false
brython-dev/brython
www/tests/test_fstrings.py
1
1843
from tester import assertRaises try: "{:+3}".format('essai') except ValueError: pass x = f'{"""a}c"""[1]}' """ab cd""" assert x == "}" d = {0: 'zero'} assert f"""{d[0 ]}""" == "zero" assert f'{"quoted string"}' == "quoted string" assert f'{{ {4*10} }}' == "{ 40 }" assert f'{{{4*10}}}' == "{40}" x = 25.48765433 assert f'{x:.3f}' == "25.488" nb = 3 assert f'{x:.{nb}f}' == "25.488" def fn(l, incr): result = l[0] l[0] += incr return result lst = [0] assert f'{fn(lst,2)} {fn(lst,3)}' == '0 2' assert f'{fn(lst,2)} {fn(lst,3)}' == '5 7' assert lst == [10] d = {0: 10, 1: 20} for i in range(3): if i == 2: try: f'{i}:{d[i]}' raise AssertionError("should have raised KeyError") except KeyError: pass else: f'{i}:{d[i]}' for x in (32, 100, 'fifty'): try: f'x = {x:+3}' except ValueError: if x != 'fifty': raise # quote inside fstring t = 8 assert f"'{t}px'" == "'8px'" # issue 1086 d = f'''ddf u{123} zz''' assert d == "ddf\nu123\nzz" # issue 1183 a = f"" assert a == "" entry = "hello" a = f""" <p> {entry} </p> """ assert a == """ <p> hello </p> """ # debug f-strings (new in Python 3.8) x = 5.323 assert f"{x = :.1f}" == "x = 5.3" y = 8 assert f"{y=}" == "y=8" # issue 1267 a = 5 assert f'{"is" if a == 1 else "are"}' == "are" a = 1 assert f'{"is" if a == 1 else "are"}' == "is" # issue 1427 from math import cos, radians theta = 30 assert f'{theta=} {cos(radians(theta))=:.3f}' == \ "theta=30 cos(radians(theta))=0.866" # issue 1554 assertRaises(SyntaxError, exec, 'f"Bad format {}"') # issue 1734 assert f'[{"Text":10}]' == '[Text ]' assert f'[{"Text:":10}]' == '[Text: ]' x = 45 assert f'{x}' 'b' == '45b' # issue 1863 a = 2 s = f'foo { a }' assert s == 'foo 2' print("passed all tests")
bsd-3-clause
e6483291e006a061b79df8141f66ca27
15.175439
63
0.501356
2.412304
false
false
false
false
brython-dev/brython
www/src/Lib/encodings/cp861.py
35
35331
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP861.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_map) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_map)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp861', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Map decoding_map = codecs.make_identity_dict(range(256)) decoding_map.update({ 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE 0x008b: 0x00d0, # LATIN CAPITAL LETTER ETH 0x008c: 0x00f0, # LATIN SMALL LETTER ETH 0x008d: 0x00de, # LATIN CAPITAL LETTER THORN 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS 0x0095: 0x00fe, # LATIN SMALL LETTER THORN 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX 0x0097: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE 0x0098: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS 0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE 0x009c: 0x00a3, # POUND SIGN 0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE 0x009e: 0x20a7, # PESETA SIGN 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE 0x00a4: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE 0x00a5: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE 0x00a6: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE 0x00a7: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE 0x00a8: 0x00bf, # INVERTED QUESTION MARK 0x00a9: 0x2310, # REVERSED NOT SIGN 0x00aa: 0x00ac, # NOT SIGN 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00b0: 0x2591, # LIGHT SHADE 0x00b1: 0x2592, # MEDIUM SHADE 0x00b2: 0x2593, # DARK SHADE 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x00db: 0x2588, # FULL BLOCK 0x00dc: 0x2584, # LOWER HALF BLOCK 0x00dd: 0x258c, # LEFT HALF BLOCK 0x00de: 0x2590, # RIGHT HALF BLOCK 0x00df: 0x2580, # UPPER HALF BLOCK 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA 0x00e3: 0x03c0, # GREEK SMALL LETTER PI 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA 0x00e6: 0x00b5, # MICRO SIGN 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA 0x00ec: 0x221e, # INFINITY 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON 0x00ef: 0x2229, # INTERSECTION 0x00f0: 0x2261, # IDENTICAL TO 0x00f1: 0x00b1, # PLUS-MINUS SIGN 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO 0x00f4: 0x2320, # TOP HALF INTEGRAL 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL 0x00f6: 0x00f7, # DIVISION SIGN 0x00f7: 0x2248, # ALMOST EQUAL TO 0x00f8: 0x00b0, # DEGREE SIGN 0x00f9: 0x2219, # BULLET OPERATOR 0x00fa: 0x00b7, # MIDDLE DOT 0x00fb: 0x221a, # SQUARE ROOT 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N 0x00fd: 0x00b2, # SUPERSCRIPT TWO 0x00fe: 0x25a0, # BLACK SQUARE 0x00ff: 0x00a0, # NO-BREAK SPACE }) ### Decoding Table decoding_table = ( '\x00' # 0x0000 -> NULL '\x01' # 0x0001 -> START OF HEADING '\x02' # 0x0002 -> START OF TEXT '\x03' # 0x0003 -> END OF TEXT '\x04' # 0x0004 -> END OF TRANSMISSION '\x05' # 0x0005 -> ENQUIRY '\x06' # 0x0006 -> ACKNOWLEDGE '\x07' # 0x0007 -> BELL '\x08' # 0x0008 -> BACKSPACE '\t' # 0x0009 -> HORIZONTAL TABULATION '\n' # 0x000a -> LINE FEED '\x0b' # 0x000b -> VERTICAL TABULATION '\x0c' # 0x000c -> FORM FEED '\r' # 0x000d -> CARRIAGE RETURN '\x0e' # 0x000e -> SHIFT OUT '\x0f' # 0x000f -> SHIFT IN '\x10' # 0x0010 -> DATA LINK ESCAPE '\x11' # 0x0011 -> DEVICE CONTROL ONE '\x12' # 0x0012 -> DEVICE CONTROL TWO '\x13' # 0x0013 -> DEVICE CONTROL THREE '\x14' # 0x0014 -> DEVICE CONTROL FOUR '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE '\x16' # 0x0016 -> SYNCHRONOUS IDLE '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK '\x18' # 0x0018 -> CANCEL '\x19' # 0x0019 -> END OF MEDIUM '\x1a' # 0x001a -> SUBSTITUTE '\x1b' # 0x001b -> ESCAPE '\x1c' # 0x001c -> FILE SEPARATOR '\x1d' # 0x001d -> GROUP SEPARATOR '\x1e' # 0x001e -> RECORD SEPARATOR '\x1f' # 0x001f -> UNIT SEPARATOR ' ' # 0x0020 -> SPACE '!' # 0x0021 -> EXCLAMATION MARK '"' # 0x0022 -> QUOTATION MARK '#' # 0x0023 -> NUMBER SIGN '$' # 0x0024 -> DOLLAR SIGN '%' # 0x0025 -> PERCENT SIGN '&' # 0x0026 -> AMPERSAND "'" # 0x0027 -> APOSTROPHE '(' # 0x0028 -> LEFT PARENTHESIS ')' # 0x0029 -> RIGHT PARENTHESIS '*' # 0x002a -> ASTERISK '+' # 0x002b -> PLUS SIGN ',' # 0x002c -> COMMA '-' # 0x002d -> HYPHEN-MINUS '.' # 0x002e -> FULL STOP '/' # 0x002f -> SOLIDUS '0' # 0x0030 -> DIGIT ZERO '1' # 0x0031 -> DIGIT ONE '2' # 0x0032 -> DIGIT TWO '3' # 0x0033 -> DIGIT THREE '4' # 0x0034 -> DIGIT FOUR '5' # 0x0035 -> DIGIT FIVE '6' # 0x0036 -> DIGIT SIX '7' # 0x0037 -> DIGIT SEVEN '8' # 0x0038 -> DIGIT EIGHT '9' # 0x0039 -> DIGIT NINE ':' # 0x003a -> COLON ';' # 0x003b -> SEMICOLON '<' # 0x003c -> LESS-THAN SIGN '=' # 0x003d -> EQUALS SIGN '>' # 0x003e -> GREATER-THAN SIGN '?' # 0x003f -> QUESTION MARK '@' # 0x0040 -> COMMERCIAL AT 'A' # 0x0041 -> LATIN CAPITAL LETTER A 'B' # 0x0042 -> LATIN CAPITAL LETTER B 'C' # 0x0043 -> LATIN CAPITAL LETTER C 'D' # 0x0044 -> LATIN CAPITAL LETTER D 'E' # 0x0045 -> LATIN CAPITAL LETTER E 'F' # 0x0046 -> LATIN CAPITAL LETTER F 'G' # 0x0047 -> LATIN CAPITAL LETTER G 'H' # 0x0048 -> LATIN CAPITAL LETTER H 'I' # 0x0049 -> LATIN CAPITAL LETTER I 'J' # 0x004a -> LATIN CAPITAL LETTER J 'K' # 0x004b -> LATIN CAPITAL LETTER K 'L' # 0x004c -> LATIN CAPITAL LETTER L 'M' # 0x004d -> LATIN CAPITAL LETTER M 'N' # 0x004e -> LATIN CAPITAL LETTER N 'O' # 0x004f -> LATIN CAPITAL LETTER O 'P' # 0x0050 -> LATIN CAPITAL LETTER P 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q 'R' # 0x0052 -> LATIN CAPITAL LETTER R 'S' # 0x0053 -> LATIN CAPITAL LETTER S 'T' # 0x0054 -> LATIN CAPITAL LETTER T 'U' # 0x0055 -> LATIN CAPITAL LETTER U 'V' # 0x0056 -> LATIN CAPITAL LETTER V 'W' # 0x0057 -> LATIN CAPITAL LETTER W 'X' # 0x0058 -> LATIN CAPITAL LETTER X 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y 'Z' # 0x005a -> LATIN CAPITAL LETTER Z '[' # 0x005b -> LEFT SQUARE BRACKET '\\' # 0x005c -> REVERSE SOLIDUS ']' # 0x005d -> RIGHT SQUARE BRACKET '^' # 0x005e -> CIRCUMFLEX ACCENT '_' # 0x005f -> LOW LINE '`' # 0x0060 -> GRAVE ACCENT 'a' # 0x0061 -> LATIN SMALL LETTER A 'b' # 0x0062 -> LATIN SMALL LETTER B 'c' # 0x0063 -> LATIN SMALL LETTER C 'd' # 0x0064 -> LATIN SMALL LETTER D 'e' # 0x0065 -> LATIN SMALL LETTER E 'f' # 0x0066 -> LATIN SMALL LETTER F 'g' # 0x0067 -> LATIN SMALL LETTER G 'h' # 0x0068 -> LATIN SMALL LETTER H 'i' # 0x0069 -> LATIN SMALL LETTER I 'j' # 0x006a -> LATIN SMALL LETTER J 'k' # 0x006b -> LATIN SMALL LETTER K 'l' # 0x006c -> LATIN SMALL LETTER L 'm' # 0x006d -> LATIN SMALL LETTER M 'n' # 0x006e -> LATIN SMALL LETTER N 'o' # 0x006f -> LATIN SMALL LETTER O 'p' # 0x0070 -> LATIN SMALL LETTER P 'q' # 0x0071 -> LATIN SMALL LETTER Q 'r' # 0x0072 -> LATIN SMALL LETTER R 's' # 0x0073 -> LATIN SMALL LETTER S 't' # 0x0074 -> LATIN SMALL LETTER T 'u' # 0x0075 -> LATIN SMALL LETTER U 'v' # 0x0076 -> LATIN SMALL LETTER V 'w' # 0x0077 -> LATIN SMALL LETTER W 'x' # 0x0078 -> LATIN SMALL LETTER X 'y' # 0x0079 -> LATIN SMALL LETTER Y 'z' # 0x007a -> LATIN SMALL LETTER Z '{' # 0x007b -> LEFT CURLY BRACKET '|' # 0x007c -> VERTICAL LINE '}' # 0x007d -> RIGHT CURLY BRACKET '~' # 0x007e -> TILDE '\x7f' # 0x007f -> DELETE '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX '\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS '\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE '\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA '\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX '\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS '\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE '\xd0' # 0x008b -> LATIN CAPITAL LETTER ETH '\xf0' # 0x008c -> LATIN SMALL LETTER ETH '\xde' # 0x008d -> LATIN CAPITAL LETTER THORN '\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS '\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE '\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE '\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX '\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS '\xfe' # 0x0095 -> LATIN SMALL LETTER THORN '\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX '\xdd' # 0x0097 -> LATIN CAPITAL LETTER Y WITH ACUTE '\xfd' # 0x0098 -> LATIN SMALL LETTER Y WITH ACUTE '\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS '\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE '\xa3' # 0x009c -> POUND SIGN '\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE '\u20a7' # 0x009e -> PESETA SIGN '\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE '\xc1' # 0x00a4 -> LATIN CAPITAL LETTER A WITH ACUTE '\xcd' # 0x00a5 -> LATIN CAPITAL LETTER I WITH ACUTE '\xd3' # 0x00a6 -> LATIN CAPITAL LETTER O WITH ACUTE '\xda' # 0x00a7 -> LATIN CAPITAL LETTER U WITH ACUTE '\xbf' # 0x00a8 -> INVERTED QUESTION MARK '\u2310' # 0x00a9 -> REVERSED NOT SIGN '\xac' # 0x00aa -> NOT SIGN '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER '\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK '\u2591' # 0x00b0 -> LIGHT SHADE '\u2592' # 0x00b1 -> MEDIUM SHADE '\u2593' # 0x00b2 -> DARK SHADE '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT '\u2588' # 0x00db -> FULL BLOCK '\u2584' # 0x00dc -> LOWER HALF BLOCK '\u258c' # 0x00dd -> LEFT HALF BLOCK '\u2590' # 0x00de -> RIGHT HALF BLOCK '\u2580' # 0x00df -> UPPER HALF BLOCK '\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S '\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA '\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI '\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA '\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA '\xb5' # 0x00e6 -> MICRO SIGN '\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU '\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI '\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA '\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA '\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA '\u221e' # 0x00ec -> INFINITY '\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI '\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON '\u2229' # 0x00ef -> INTERSECTION '\u2261' # 0x00f0 -> IDENTICAL TO '\xb1' # 0x00f1 -> PLUS-MINUS SIGN '\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO '\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO '\u2320' # 0x00f4 -> TOP HALF INTEGRAL '\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL '\xf7' # 0x00f6 -> DIVISION SIGN '\u2248' # 0x00f7 -> ALMOST EQUAL TO '\xb0' # 0x00f8 -> DEGREE SIGN '\u2219' # 0x00f9 -> BULLET OPERATOR '\xb7' # 0x00fa -> MIDDLE DOT '\u221a' # 0x00fb -> SQUARE ROOT '\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N '\xb2' # 0x00fd -> SUPERSCRIPT TWO '\u25a0' # 0x00fe -> BLACK SQUARE '\xa0' # 0x00ff -> NO-BREAK SPACE ) ### Encoding Map encoding_map = { 0x0000: 0x0000, # NULL 0x0001: 0x0001, # START OF HEADING 0x0002: 0x0002, # START OF TEXT 0x0003: 0x0003, # END OF TEXT 0x0004: 0x0004, # END OF TRANSMISSION 0x0005: 0x0005, # ENQUIRY 0x0006: 0x0006, # ACKNOWLEDGE 0x0007: 0x0007, # BELL 0x0008: 0x0008, # BACKSPACE 0x0009: 0x0009, # HORIZONTAL TABULATION 0x000a: 0x000a, # LINE FEED 0x000b: 0x000b, # VERTICAL TABULATION 0x000c: 0x000c, # FORM FEED 0x000d: 0x000d, # CARRIAGE RETURN 0x000e: 0x000e, # SHIFT OUT 0x000f: 0x000f, # SHIFT IN 0x0010: 0x0010, # DATA LINK ESCAPE 0x0011: 0x0011, # DEVICE CONTROL ONE 0x0012: 0x0012, # DEVICE CONTROL TWO 0x0013: 0x0013, # DEVICE CONTROL THREE 0x0014: 0x0014, # DEVICE CONTROL FOUR 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE 0x0016: 0x0016, # SYNCHRONOUS IDLE 0x0017: 0x0017, # END OF TRANSMISSION BLOCK 0x0018: 0x0018, # CANCEL 0x0019: 0x0019, # END OF MEDIUM 0x001a: 0x001a, # SUBSTITUTE 0x001b: 0x001b, # ESCAPE 0x001c: 0x001c, # FILE SEPARATOR 0x001d: 0x001d, # GROUP SEPARATOR 0x001e: 0x001e, # RECORD SEPARATOR 0x001f: 0x001f, # UNIT SEPARATOR 0x0020: 0x0020, # SPACE 0x0021: 0x0021, # EXCLAMATION MARK 0x0022: 0x0022, # QUOTATION MARK 0x0023: 0x0023, # NUMBER SIGN 0x0024: 0x0024, # DOLLAR SIGN 0x0025: 0x0025, # PERCENT SIGN 0x0026: 0x0026, # AMPERSAND 0x0027: 0x0027, # APOSTROPHE 0x0028: 0x0028, # LEFT PARENTHESIS 0x0029: 0x0029, # RIGHT PARENTHESIS 0x002a: 0x002a, # ASTERISK 0x002b: 0x002b, # PLUS SIGN 0x002c: 0x002c, # COMMA 0x002d: 0x002d, # HYPHEN-MINUS 0x002e: 0x002e, # FULL STOP 0x002f: 0x002f, # SOLIDUS 0x0030: 0x0030, # DIGIT ZERO 0x0031: 0x0031, # DIGIT ONE 0x0032: 0x0032, # DIGIT TWO 0x0033: 0x0033, # DIGIT THREE 0x0034: 0x0034, # DIGIT FOUR 0x0035: 0x0035, # DIGIT FIVE 0x0036: 0x0036, # DIGIT SIX 0x0037: 0x0037, # DIGIT SEVEN 0x0038: 0x0038, # DIGIT EIGHT 0x0039: 0x0039, # DIGIT NINE 0x003a: 0x003a, # COLON 0x003b: 0x003b, # SEMICOLON 0x003c: 0x003c, # LESS-THAN SIGN 0x003d: 0x003d, # EQUALS SIGN 0x003e: 0x003e, # GREATER-THAN SIGN 0x003f: 0x003f, # QUESTION MARK 0x0040: 0x0040, # COMMERCIAL AT 0x0041: 0x0041, # LATIN CAPITAL LETTER A 0x0042: 0x0042, # LATIN CAPITAL LETTER B 0x0043: 0x0043, # LATIN CAPITAL LETTER C 0x0044: 0x0044, # LATIN CAPITAL LETTER D 0x0045: 0x0045, # LATIN CAPITAL LETTER E 0x0046: 0x0046, # LATIN CAPITAL LETTER F 0x0047: 0x0047, # LATIN CAPITAL LETTER G 0x0048: 0x0048, # LATIN CAPITAL LETTER H 0x0049: 0x0049, # LATIN CAPITAL LETTER I 0x004a: 0x004a, # LATIN CAPITAL LETTER J 0x004b: 0x004b, # LATIN CAPITAL LETTER K 0x004c: 0x004c, # LATIN CAPITAL LETTER L 0x004d: 0x004d, # LATIN CAPITAL LETTER M 0x004e: 0x004e, # LATIN CAPITAL LETTER N 0x004f: 0x004f, # LATIN CAPITAL LETTER O 0x0050: 0x0050, # LATIN CAPITAL LETTER P 0x0051: 0x0051, # LATIN CAPITAL LETTER Q 0x0052: 0x0052, # LATIN CAPITAL LETTER R 0x0053: 0x0053, # LATIN CAPITAL LETTER S 0x0054: 0x0054, # LATIN CAPITAL LETTER T 0x0055: 0x0055, # LATIN CAPITAL LETTER U 0x0056: 0x0056, # LATIN CAPITAL LETTER V 0x0057: 0x0057, # LATIN CAPITAL LETTER W 0x0058: 0x0058, # LATIN CAPITAL LETTER X 0x0059: 0x0059, # LATIN CAPITAL LETTER Y 0x005a: 0x005a, # LATIN CAPITAL LETTER Z 0x005b: 0x005b, # LEFT SQUARE BRACKET 0x005c: 0x005c, # REVERSE SOLIDUS 0x005d: 0x005d, # RIGHT SQUARE BRACKET 0x005e: 0x005e, # CIRCUMFLEX ACCENT 0x005f: 0x005f, # LOW LINE 0x0060: 0x0060, # GRAVE ACCENT 0x0061: 0x0061, # LATIN SMALL LETTER A 0x0062: 0x0062, # LATIN SMALL LETTER B 0x0063: 0x0063, # LATIN SMALL LETTER C 0x0064: 0x0064, # LATIN SMALL LETTER D 0x0065: 0x0065, # LATIN SMALL LETTER E 0x0066: 0x0066, # LATIN SMALL LETTER F 0x0067: 0x0067, # LATIN SMALL LETTER G 0x0068: 0x0068, # LATIN SMALL LETTER H 0x0069: 0x0069, # LATIN SMALL LETTER I 0x006a: 0x006a, # LATIN SMALL LETTER J 0x006b: 0x006b, # LATIN SMALL LETTER K 0x006c: 0x006c, # LATIN SMALL LETTER L 0x006d: 0x006d, # LATIN SMALL LETTER M 0x006e: 0x006e, # LATIN SMALL LETTER N 0x006f: 0x006f, # LATIN SMALL LETTER O 0x0070: 0x0070, # LATIN SMALL LETTER P 0x0071: 0x0071, # LATIN SMALL LETTER Q 0x0072: 0x0072, # LATIN SMALL LETTER R 0x0073: 0x0073, # LATIN SMALL LETTER S 0x0074: 0x0074, # LATIN SMALL LETTER T 0x0075: 0x0075, # LATIN SMALL LETTER U 0x0076: 0x0076, # LATIN SMALL LETTER V 0x0077: 0x0077, # LATIN SMALL LETTER W 0x0078: 0x0078, # LATIN SMALL LETTER X 0x0079: 0x0079, # LATIN SMALL LETTER Y 0x007a: 0x007a, # LATIN SMALL LETTER Z 0x007b: 0x007b, # LEFT CURLY BRACKET 0x007c: 0x007c, # VERTICAL LINE 0x007d: 0x007d, # RIGHT CURLY BRACKET 0x007e: 0x007e, # TILDE 0x007f: 0x007f, # DELETE 0x00a0: 0x00ff, # NO-BREAK SPACE 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK 0x00a3: 0x009c, # POUND SIGN 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00ac: 0x00aa, # NOT SIGN 0x00b0: 0x00f8, # DEGREE SIGN 0x00b1: 0x00f1, # PLUS-MINUS SIGN 0x00b2: 0x00fd, # SUPERSCRIPT TWO 0x00b5: 0x00e6, # MICRO SIGN 0x00b7: 0x00fa, # MIDDLE DOT 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF 0x00bf: 0x00a8, # INVERTED QUESTION MARK 0x00c1: 0x00a4, # LATIN CAPITAL LETTER A WITH ACUTE 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE 0x00cd: 0x00a5, # LATIN CAPITAL LETTER I WITH ACUTE 0x00d0: 0x008b, # LATIN CAPITAL LETTER ETH 0x00d3: 0x00a6, # LATIN CAPITAL LETTER O WITH ACUTE 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS 0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE 0x00da: 0x00a7, # LATIN CAPITAL LETTER U WITH ACUTE 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS 0x00dd: 0x0097, # LATIN CAPITAL LETTER Y WITH ACUTE 0x00de: 0x008d, # LATIN CAPITAL LETTER THORN 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE 0x00f0: 0x008c, # LATIN SMALL LETTER ETH 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS 0x00f7: 0x00f6, # DIVISION SIGN 0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS 0x00fd: 0x0098, # LATIN SMALL LETTER Y WITH ACUTE 0x00fe: 0x0095, # LATIN SMALL LETTER THORN 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON 0x03c0: 0x00e3, # GREEK SMALL LETTER PI 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N 0x20a7: 0x009e, # PESETA SIGN 0x2219: 0x00f9, # BULLET OPERATOR 0x221a: 0x00fb, # SQUARE ROOT 0x221e: 0x00ec, # INFINITY 0x2229: 0x00ef, # INTERSECTION 0x2248: 0x00f7, # ALMOST EQUAL TO 0x2261: 0x00f0, # IDENTICAL TO 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO 0x2310: 0x00a9, # REVERSED NOT SIGN 0x2320: 0x00f4, # TOP HALF INTEGRAL 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x2580: 0x00df, # UPPER HALF BLOCK 0x2584: 0x00dc, # LOWER HALF BLOCK 0x2588: 0x00db, # FULL BLOCK 0x258c: 0x00dd, # LEFT HALF BLOCK 0x2590: 0x00de, # RIGHT HALF BLOCK 0x2591: 0x00b0, # LIGHT SHADE 0x2592: 0x00b1, # MEDIUM SHADE 0x2593: 0x00b2, # DARK SHADE 0x25a0: 0x00fe, # BLACK SQUARE }
bsd-3-clause
49bb36e89e5d170f2200185221b81436
48.617479
97
0.592737
3.019744
false
false
false
false
brython-dev/brython
www/src/Lib/multiprocessing/pool.py
694
23263
# # Module providing the `Pool` class for managing a process pool # # multiprocessing/pool.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['Pool'] # # Imports # import threading import queue import itertools import collections import time from multiprocessing import Process, cpu_count, TimeoutError from multiprocessing.util import Finalize, debug # # Constants representing the state of a pool # RUN = 0 CLOSE = 1 TERMINATE = 2 # # Miscellaneous # job_counter = itertools.count() def mapstar(args): return list(map(*args)) def starmapstar(args): return list(itertools.starmap(args[0], args[1])) # # Code run by worker processes # class MaybeEncodingError(Exception): """Wraps possible unpickleable errors, so they can be safely sent through the socket.""" def __init__(self, exc, value): self.exc = repr(exc) self.value = repr(value) super(MaybeEncodingError, self).__init__(self.exc, self.value) def __str__(self): return "Error sending result: '%s'. Reason: '%s'" % (self.value, self.exc) def __repr__(self): return "<MaybeEncodingError: %s>" % str(self) def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None): assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0) put = outqueue.put get = inqueue.get if hasattr(inqueue, '_writer'): inqueue._writer.close() outqueue._reader.close() if initializer is not None: initializer(*initargs) completed = 0 while maxtasks is None or (maxtasks and completed < maxtasks): try: task = get() except (EOFError, IOError): debug('worker got EOFError or IOError -- exiting') break if task is None: debug('worker got sentinel -- exiting') break job, i, func, args, kwds = task try: result = (True, func(*args, **kwds)) except Exception as e: result = (False, e) try: put((job, i, result)) except Exception as e: wrapped = MaybeEncodingError(e, result[1]) debug("Possible encoding error while sending result: %s" % ( wrapped)) put((job, i, (False, wrapped))) completed += 1 debug('worker exiting after %d tasks' % completed) # # Class representing a process pool # class Pool(object): ''' Class which supports an async version of applying functions to arguments. ''' Process = Process def __init__(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None): self._setup_queues() self._taskqueue = queue.Queue() self._cache = {} self._state = RUN self._maxtasksperchild = maxtasksperchild self._initializer = initializer self._initargs = initargs if processes is None: try: processes = cpu_count() except NotImplementedError: processes = 1 if processes < 1: raise ValueError("Number of processes must be at least 1") if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') self._processes = processes self._pool = [] self._repopulate_pool() self._worker_handler = threading.Thread( target=Pool._handle_workers, args=(self, ) ) self._worker_handler.daemon = True self._worker_handler._state = RUN self._worker_handler.start() self._task_handler = threading.Thread( target=Pool._handle_tasks, args=(self._taskqueue, self._quick_put, self._outqueue, self._pool) ) self._task_handler.daemon = True self._task_handler._state = RUN self._task_handler.start() self._result_handler = threading.Thread( target=Pool._handle_results, args=(self._outqueue, self._quick_get, self._cache) ) self._result_handler.daemon = True self._result_handler._state = RUN self._result_handler.start() self._terminate = Finalize( self, self._terminate_pool, args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, self._worker_handler, self._task_handler, self._result_handler, self._cache), exitpriority=15 ) def _join_exited_workers(self): """Cleanup after any worker processes which have exited due to reaching their specified lifetime. Returns True if any workers were cleaned up. """ cleaned = False for i in reversed(range(len(self._pool))): worker = self._pool[i] if worker.exitcode is not None: # worker exited debug('cleaning up worker %d' % i) worker.join() cleaned = True del self._pool[i] return cleaned def _repopulate_pool(self): """Bring the number of pool processes up to the specified number, for use after reaping workers which have exited. """ for i in range(self._processes - len(self._pool)): w = self.Process(target=worker, args=(self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild) ) self._pool.append(w) w.name = w.name.replace('Process', 'PoolWorker') w.daemon = True w.start() debug('added worker') def _maintain_pool(self): """Clean up any exited workers and start replacements for them. """ if self._join_exited_workers(): self._repopulate_pool() def _setup_queues(self): from .queues import SimpleQueue self._inqueue = SimpleQueue() self._outqueue = SimpleQueue() self._quick_put = self._inqueue._writer.send self._quick_get = self._outqueue._reader.recv def apply(self, func, args=(), kwds={}): ''' Equivalent of `func(*args, **kwds)`. ''' assert self._state == RUN return self.apply_async(func, args, kwds).get() def map(self, func, iterable, chunksize=None): ''' Apply `func` to each element in `iterable`, collecting the results in a list that is returned. ''' return self._map_async(func, iterable, mapstar, chunksize).get() def starmap(self, func, iterable, chunksize=None): ''' Like `map()` method but the elements of the `iterable` are expected to be iterables as well and will be unpacked as arguments. Hence `func` and (a, b) becomes func(a, b). ''' return self._map_async(func, iterable, starmapstar, chunksize).get() def starmap_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `starmap()` method. ''' return self._map_async(func, iterable, starmapstar, chunksize, callback, error_callback) def imap(self, func, iterable, chunksize=1): ''' Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. ''' if self._state != RUN: raise ValueError("Pool not running") if chunksize == 1: result = IMapIterator(self._cache) self._taskqueue.put((((result._job, i, func, (x,), {}) for i, x in enumerate(iterable)), result._set_length)) return result else: assert chunksize > 1 task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapIterator(self._cache) self._taskqueue.put((((result._job, i, mapstar, (x,), {}) for i, x in enumerate(task_batches)), result._set_length)) return (item for chunk in result for item in chunk) def imap_unordered(self, func, iterable, chunksize=1): ''' Like `imap()` method but ordering of results is arbitrary. ''' if self._state != RUN: raise ValueError("Pool not running") if chunksize == 1: result = IMapUnorderedIterator(self._cache) self._taskqueue.put((((result._job, i, func, (x,), {}) for i, x in enumerate(iterable)), result._set_length)) return result else: assert chunksize > 1 task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapUnorderedIterator(self._cache) self._taskqueue.put((((result._job, i, mapstar, (x,), {}) for i, x in enumerate(task_batches)), result._set_length)) return (item for chunk in result for item in chunk) def apply_async(self, func, args=(), kwds={}, callback=None, error_callback=None): ''' Asynchronous version of `apply()` method. ''' if self._state != RUN: raise ValueError("Pool not running") result = ApplyResult(self._cache, callback, error_callback) self._taskqueue.put(([(result._job, None, func, args, kwds)], None)) return result def map_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `map()` method. ''' return self._map_async(func, iterable, mapstar, chunksize, callback, error_callback) def _map_async(self, func, iterable, mapper, chunksize=None, callback=None, error_callback=None): ''' Helper function to implement map, starmap and their async counterparts. ''' if self._state != RUN: raise ValueError("Pool not running") if not hasattr(iterable, '__len__'): iterable = list(iterable) if chunksize is None: chunksize, extra = divmod(len(iterable), len(self._pool) * 4) if extra: chunksize += 1 if len(iterable) == 0: chunksize = 0 task_batches = Pool._get_tasks(func, iterable, chunksize) result = MapResult(self._cache, chunksize, len(iterable), callback, error_callback=error_callback) self._taskqueue.put((((result._job, i, mapper, (x,), {}) for i, x in enumerate(task_batches)), None)) return result @staticmethod def _handle_workers(pool): thread = threading.current_thread() # Keep maintaining workers until the cache gets drained, unless the pool # is terminated. while thread._state == RUN or (pool._cache and thread._state != TERMINATE): pool._maintain_pool() time.sleep(0.1) # send sentinel to stop workers pool._taskqueue.put(None) debug('worker handler exiting') @staticmethod def _handle_tasks(taskqueue, put, outqueue, pool): thread = threading.current_thread() for taskseq, set_length in iter(taskqueue.get, None): i = -1 for i, task in enumerate(taskseq): if thread._state: debug('task handler found thread._state != RUN') break try: put(task) except IOError: debug('could not put task on queue') break else: if set_length: debug('doing set_length()') set_length(i+1) continue break else: debug('task handler got sentinel') try: # tell result handler to finish when cache is empty debug('task handler sending sentinel to result handler') outqueue.put(None) # tell workers there is no more work debug('task handler sending sentinel to workers') for p in pool: put(None) except IOError: debug('task handler got IOError when sending sentinels') debug('task handler exiting') @staticmethod def _handle_results(outqueue, get, cache): thread = threading.current_thread() while 1: try: task = get() except (IOError, EOFError): debug('result handler got EOFError/IOError -- exiting') return if thread._state: assert thread._state == TERMINATE debug('result handler found thread._state=TERMINATE') break if task is None: debug('result handler got sentinel') break job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass while cache and thread._state != TERMINATE: try: task = get() except (IOError, EOFError): debug('result handler got EOFError/IOError -- exiting') return if task is None: debug('result handler ignoring extra sentinel') continue job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass if hasattr(outqueue, '_reader'): debug('ensuring that outqueue is not full') # If we don't make room available in outqueue then # attempts to add the sentinel (None) to outqueue may # block. There is guaranteed to be no more than 2 sentinels. try: for i in range(10): if not outqueue._reader.poll(): break get() except (IOError, EOFError): pass debug('result handler exiting: len(cache)=%s, thread._state=%s', len(cache), thread._state) @staticmethod def _get_tasks(func, it, size): it = iter(it) while 1: x = tuple(itertools.islice(it, size)) if not x: return yield (func, x) def __reduce__(self): raise NotImplementedError( 'pool objects cannot be passed between processes or pickled' ) def close(self): debug('closing pool') if self._state == RUN: self._state = CLOSE self._worker_handler._state = CLOSE def terminate(self): debug('terminating pool') self._state = TERMINATE self._worker_handler._state = TERMINATE self._terminate() def join(self): debug('joining pool') assert self._state in (CLOSE, TERMINATE) self._worker_handler.join() self._task_handler.join() self._result_handler.join() for p in self._pool: p.join() @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # task_handler may be blocked trying to put items on inqueue debug('removing tasks from inqueue until task handler finished') inqueue._rlock.acquire() while task_handler.is_alive() and inqueue._reader.poll(): inqueue._reader.recv() time.sleep(0) @classmethod def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, worker_handler, task_handler, result_handler, cache): # this is guaranteed to only be called once debug('finalizing pool') worker_handler._state = TERMINATE task_handler._state = TERMINATE debug('helping task handler/workers to finish') cls._help_stuff_finish(inqueue, task_handler, len(pool)) assert result_handler.is_alive() or len(cache) == 0 result_handler._state = TERMINATE outqueue.put(None) # sentinel # We must wait for the worker handler to exit before terminating # workers because we don't want workers to be restarted behind our back. debug('joining worker handler') if threading.current_thread() is not worker_handler: worker_handler.join() # Terminate workers which haven't already finished. if pool and hasattr(pool[0], 'terminate'): debug('terminating workers') for p in pool: if p.exitcode is None: p.terminate() debug('joining task handler') if threading.current_thread() is not task_handler: task_handler.join() debug('joining result handler') if threading.current_thread() is not result_handler: result_handler.join() if pool and hasattr(pool[0], 'terminate'): debug('joining pool workers') for p in pool: if p.is_alive(): # worker has not yet exited debug('cleaning up worker %d' % p.pid) p.join() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() # # Class whose instances are returned by `Pool.apply_async()` # class ApplyResult(object): def __init__(self, cache, callback, error_callback): self._event = threading.Event() self._job = next(job_counter) self._cache = cache self._callback = callback self._error_callback = error_callback cache[self._job] = self def ready(self): return self._event.is_set() def successful(self): assert self.ready() return self._success def wait(self, timeout=None): self._event.wait(timeout) def get(self, timeout=None): self.wait(timeout) if not self.ready(): raise TimeoutError if self._success: return self._value else: raise self._value def _set(self, i, obj): self._success, self._value = obj if self._callback and self._success: self._callback(self._value) if self._error_callback and not self._success: self._error_callback(self._value) self._event.set() del self._cache[self._job] AsyncResult = ApplyResult # create alias -- see #17805 # # Class whose instances are returned by `Pool.map_async()` # class MapResult(ApplyResult): def __init__(self, cache, chunksize, length, callback, error_callback): ApplyResult.__init__(self, cache, callback, error_callback=error_callback) self._success = True self._value = [None] * length self._chunksize = chunksize if chunksize <= 0: self._number_left = 0 self._event.set() del cache[self._job] else: self._number_left = length//chunksize + bool(length % chunksize) def _set(self, i, success_result): success, result = success_result if success: self._value[i*self._chunksize:(i+1)*self._chunksize] = result self._number_left -= 1 if self._number_left == 0: if self._callback: self._callback(self._value) del self._cache[self._job] self._event.set() else: self._success = False self._value = result if self._error_callback: self._error_callback(self._value) del self._cache[self._job] self._event.set() # # Class whose instances are returned by `Pool.imap()` # class IMapIterator(object): def __init__(self, cache): self._cond = threading.Condition(threading.Lock()) self._job = next(job_counter) self._cache = cache self._items = collections.deque() self._index = 0 self._length = None self._unsorted = {} cache[self._job] = self def __iter__(self): return self def next(self, timeout=None): self._cond.acquire() try: try: item = self._items.popleft() except IndexError: if self._index == self._length: raise StopIteration self._cond.wait(timeout) try: item = self._items.popleft() except IndexError: if self._index == self._length: raise StopIteration raise TimeoutError finally: self._cond.release() success, value = item if success: return value raise value __next__ = next # XXX def _set(self, i, obj): self._cond.acquire() try: if self._index == i: self._items.append(obj) self._index += 1 while self._index in self._unsorted: obj = self._unsorted.pop(self._index) self._items.append(obj) self._index += 1 self._cond.notify() else: self._unsorted[i] = obj if self._index == self._length: del self._cache[self._job] finally: self._cond.release() def _set_length(self, length): self._cond.acquire() try: self._length = length if self._index == self._length: self._cond.notify() del self._cache[self._job] finally: self._cond.release() # # Class whose instances are returned by `Pool.imap_unordered()` # class IMapUnorderedIterator(IMapIterator): def _set(self, i, obj): self._cond.acquire() try: self._items.append(obj) self._index += 1 self._cond.notify() if self._index == self._length: del self._cache[self._job] finally: self._cond.release() # # # class ThreadPool(Pool): from .dummy import Process def __init__(self, processes=None, initializer=None, initargs=()): Pool.__init__(self, processes, initializer, initargs) def _setup_queues(self): self._inqueue = queue.Queue() self._outqueue = queue.Queue() self._quick_put = self._inqueue.put self._quick_get = self._outqueue.get @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # put sentinels at head of inqueue to make workers finish inqueue.not_empty.acquire() try: inqueue.queue.clear() inqueue.queue.extend([None] * size) inqueue.not_empty.notify_all() finally: inqueue.not_empty.release()
bsd-3-clause
1f82e950a32e3e1bc48bcd6644b2e262
30.867123
83
0.543438
4.425147
false
false
false
false
brython-dev/brython
www/src/Lib/pathlib.py
1
48572
import fnmatch import functools import io import ntpath import os import posixpath import re import sys import warnings from _collections_abc import Sequence from errno import ENOENT, ENOTDIR, EBADF, ELOOP from operator import attrgetter from stat import S_ISDIR, S_ISLNK, S_ISREG, S_ISSOCK, S_ISBLK, S_ISCHR, S_ISFIFO from urllib.parse import quote_from_bytes as urlquote_from_bytes __all__ = [ "PurePath", "PurePosixPath", "PureWindowsPath", "Path", "PosixPath", "WindowsPath", ] # # Internals # _WINERROR_NOT_READY = 21 # drive exists but is not accessible _WINERROR_INVALID_NAME = 123 # fix for bpo-35306 _WINERROR_CANT_RESOLVE_FILENAME = 1921 # broken symlink pointing to itself # EBADF - guard against macOS `stat` throwing EBADF _IGNORED_ERRNOS = (ENOENT, ENOTDIR, EBADF, ELOOP) _IGNORED_WINERRORS = ( _WINERROR_NOT_READY, _WINERROR_INVALID_NAME, _WINERROR_CANT_RESOLVE_FILENAME) def _ignore_error(exception): return (getattr(exception, 'errno', None) in _IGNORED_ERRNOS or getattr(exception, 'winerror', None) in _IGNORED_WINERRORS) def _is_wildcard_pattern(pat): # Whether this pattern needs actual matching using fnmatch, or can # be looked up directly as a file. return "*" in pat or "?" in pat or "[" in pat class _Flavour(object): """A flavour implements a particular (platform-specific) set of path semantics.""" def __init__(self): self.join = self.sep.join def parse_parts(self, parts): parsed = [] sep = self.sep altsep = self.altsep drv = root = '' it = reversed(parts) for part in it: if not part: continue if altsep: part = part.replace(altsep, sep) drv, root, rel = self.splitroot(part) if sep in rel: for x in reversed(rel.split(sep)): if x and x != '.': parsed.append(sys.intern(x)) else: if rel and rel != '.': parsed.append(sys.intern(rel)) if drv or root: if not drv: # If no drive is present, try to find one in the previous # parts. This makes the result of parsing e.g. # ("C:", "/", "a") reasonably intuitive. for part in it: if not part: continue if altsep: part = part.replace(altsep, sep) drv = self.splitroot(part)[0] if drv: break break if drv or root: parsed.append(drv + root) parsed.reverse() return drv, root, parsed def join_parsed_parts(self, drv, root, parts, drv2, root2, parts2): """ Join the two paths represented by the respective (drive, root, parts) tuples. Return a new (drive, root, parts) tuple. """ if root2: if not drv2 and drv: return drv, root2, [drv + root2] + parts2[1:] elif drv2: if drv2 == drv or self.casefold(drv2) == self.casefold(drv): # Same drive => second path is relative to the first return drv, root, parts + parts2[1:] else: # Second path is non-anchored (common case) return drv, root, parts + parts2 return drv2, root2, parts2 class _WindowsFlavour(_Flavour): # Reference for Windows paths can be found at # http://msdn.microsoft.com/en-us/library/aa365247%28v=vs.85%29.aspx sep = '\\' altsep = '/' has_drv = True pathmod = ntpath is_supported = (os.name == 'nt') drive_letters = set('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ') ext_namespace_prefix = '\\\\?\\' reserved_names = ( {'CON', 'PRN', 'AUX', 'NUL', 'CONIN$', 'CONOUT$'} | {'COM%s' % c for c in '123456789\xb9\xb2\xb3'} | {'LPT%s' % c for c in '123456789\xb9\xb2\xb3'} ) # Interesting findings about extended paths: # * '\\?\c:\a' is an extended path, which bypasses normal Windows API # path processing. Thus relative paths are not resolved and slash is not # translated to backslash. It has the native NT path limit of 32767 # characters, but a bit less after resolving device symbolic links, # such as '\??\C:' => '\Device\HarddiskVolume2'. # * '\\?\c:/a' looks for a device named 'C:/a' because slash is a # regular name character in the object namespace. # * '\\?\c:\foo/bar' is invalid because '/' is illegal in NT filesystems. # The only path separator at the filesystem level is backslash. # * '//?/c:\a' and '//?/c:/a' are effectively equivalent to '\\.\c:\a' and # thus limited to MAX_PATH. # * Prior to Windows 8, ANSI API bytes paths are limited to MAX_PATH, # even with the '\\?\' prefix. def splitroot(self, part, sep=sep): first = part[0:1] second = part[1:2] if (second == sep and first == sep): # XXX extended paths should also disable the collapsing of "." # components (according to MSDN docs). prefix, part = self._split_extended_path(part) first = part[0:1] second = part[1:2] else: prefix = '' third = part[2:3] if (second == sep and first == sep and third != sep): # is a UNC path: # vvvvvvvvvvvvvvvvvvvvv root # \\machine\mountpoint\directory\etc\... # directory ^^^^^^^^^^^^^^ index = part.find(sep, 2) if index != -1: index2 = part.find(sep, index + 1) # a UNC path can't have two slashes in a row # (after the initial two) if index2 != index + 1: if index2 == -1: index2 = len(part) if prefix: return prefix + part[1:index2], sep, part[index2+1:] else: return part[:index2], sep, part[index2+1:] drv = root = '' if second == ':' and first in self.drive_letters: drv = part[:2] part = part[2:] first = third if first == sep: root = first part = part.lstrip(sep) return prefix + drv, root, part def casefold(self, s): return s.lower() def casefold_parts(self, parts): return [p.lower() for p in parts] def compile_pattern(self, pattern): return re.compile(fnmatch.translate(pattern), re.IGNORECASE).fullmatch def _split_extended_path(self, s, ext_prefix=ext_namespace_prefix): prefix = '' if s.startswith(ext_prefix): prefix = s[:4] s = s[4:] if s.startswith('UNC\\'): prefix += s[:3] s = '\\' + s[3:] return prefix, s def is_reserved(self, parts): # NOTE: the rules for reserved names seem somewhat complicated # (e.g. r"..\NUL" is reserved but not r"foo\NUL" if "foo" does not # exist). We err on the side of caution and return True for paths # which are not considered reserved by Windows. if not parts: return False if parts[0].startswith('\\\\'): # UNC paths are never reserved return False name = parts[-1].partition('.')[0].partition(':')[0].rstrip(' ') return name.upper() in self.reserved_names def make_uri(self, path): # Under Windows, file URIs use the UTF-8 encoding. drive = path.drive if len(drive) == 2 and drive[1] == ':': # It's a path on a local drive => 'file:///c:/a/b' rest = path.as_posix()[2:].lstrip('/') return 'file:///%s/%s' % ( drive, urlquote_from_bytes(rest.encode('utf-8'))) else: # It's a path on a network drive => 'file://host/share/a/b' return 'file:' + urlquote_from_bytes(path.as_posix().encode('utf-8')) class _PosixFlavour(_Flavour): sep = '/' altsep = '' has_drv = False pathmod = posixpath is_supported = (os.name != 'nt') def splitroot(self, part, sep=sep): if part and part[0] == sep: stripped_part = part.lstrip(sep) # According to POSIX path resolution: # http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap04.html#tag_04_11 # "A pathname that begins with two successive slashes may be # interpreted in an implementation-defined manner, although more # than two leading slashes shall be treated as a single slash". if len(part) - len(stripped_part) == 2: return '', sep * 2, stripped_part else: return '', sep, stripped_part else: return '', '', part def casefold(self, s): return s def casefold_parts(self, parts): return parts def compile_pattern(self, pattern): return re.compile(fnmatch.translate(pattern)).fullmatch def is_reserved(self, parts): return False def make_uri(self, path): # We represent the path using the local filesystem encoding, # for portability to other applications. bpath = bytes(path) return 'file://' + urlquote_from_bytes(bpath) _windows_flavour = _WindowsFlavour() _posix_flavour = _PosixFlavour() # # Globbing helpers # def _make_selector(pattern_parts, flavour): pat = pattern_parts[0] child_parts = pattern_parts[1:] if not pat: return _TerminatingSelector() if pat == '**': cls = _RecursiveWildcardSelector elif '**' in pat: raise ValueError("Invalid pattern: '**' can only be an entire path component") elif _is_wildcard_pattern(pat): cls = _WildcardSelector else: cls = _PreciseSelector return cls(pat, child_parts, flavour) if hasattr(functools, "lru_cache"): _make_selector = functools.lru_cache()(_make_selector) class _Selector: """A selector matches a specific glob pattern part against the children of a given path.""" def __init__(self, child_parts, flavour): self.child_parts = child_parts if child_parts: self.successor = _make_selector(child_parts, flavour) self.dironly = True else: self.successor = _TerminatingSelector() self.dironly = False def select_from(self, parent_path): """Iterate over all child paths of `parent_path` matched by this selector. This can contain parent_path itself.""" path_cls = type(parent_path) is_dir = path_cls.is_dir exists = path_cls.exists scandir = path_cls._scandir if not is_dir(parent_path): return iter([]) return self._select_from(parent_path, is_dir, exists, scandir) class _TerminatingSelector: def _select_from(self, parent_path, is_dir, exists, scandir): yield parent_path class _PreciseSelector(_Selector): def __init__(self, name, child_parts, flavour): self.name = name _Selector.__init__(self, child_parts, flavour) def _select_from(self, parent_path, is_dir, exists, scandir): try: path = parent_path._make_child_relpath(self.name) if (is_dir if self.dironly else exists)(path): for p in self.successor._select_from(path, is_dir, exists, scandir): yield p except PermissionError: return class _WildcardSelector(_Selector): def __init__(self, pat, child_parts, flavour): self.match = flavour.compile_pattern(pat) _Selector.__init__(self, child_parts, flavour) def _select_from(self, parent_path, is_dir, exists, scandir): try: with scandir(parent_path) as scandir_it: entries = list(scandir_it) for entry in entries: if self.dironly: try: # "entry.is_dir()" can raise PermissionError # in some cases (see bpo-38894), which is not # among the errors ignored by _ignore_error() if not entry.is_dir(): continue except OSError as e: if not _ignore_error(e): raise continue name = entry.name if self.match(name): path = parent_path._make_child_relpath(name) for p in self.successor._select_from(path, is_dir, exists, scandir): yield p except PermissionError: return class _RecursiveWildcardSelector(_Selector): def __init__(self, pat, child_parts, flavour): _Selector.__init__(self, child_parts, flavour) def _iterate_directories(self, parent_path, is_dir, scandir): yield parent_path try: with scandir(parent_path) as scandir_it: entries = list(scandir_it) for entry in entries: entry_is_dir = False try: entry_is_dir = entry.is_dir() except OSError as e: if not _ignore_error(e): raise if entry_is_dir and not entry.is_symlink(): path = parent_path._make_child_relpath(entry.name) for p in self._iterate_directories(path, is_dir, scandir): yield p except PermissionError: return def _select_from(self, parent_path, is_dir, exists, scandir): try: yielded = set() try: successor_select = self.successor._select_from for starting_point in self._iterate_directories(parent_path, is_dir, scandir): for p in successor_select(starting_point, is_dir, exists, scandir): if p not in yielded: yield p yielded.add(p) finally: yielded.clear() except PermissionError: return # # Public API # class _PathParents(Sequence): """This object provides sequence-like access to the logical ancestors of a path. Don't try to construct it yourself.""" __slots__ = ('_pathcls', '_drv', '_root', '_parts') def __init__(self, path): # We don't store the instance to avoid reference cycles self._pathcls = type(path) self._drv = path._drv self._root = path._root self._parts = path._parts def __len__(self): if self._drv or self._root: return len(self._parts) - 1 else: return len(self._parts) def __getitem__(self, idx): if isinstance(idx, slice): return tuple(self[i] for i in range(*idx.indices(len(self)))) if idx >= len(self) or idx < -len(self): raise IndexError(idx) if idx < 0: idx += len(self) return self._pathcls._from_parsed_parts(self._drv, self._root, self._parts[:-idx - 1]) def __repr__(self): return "<{}.parents>".format(self._pathcls.__name__) class PurePath(object): """Base class for manipulating paths without I/O. PurePath represents a filesystem path and offers operations which don't imply any actual filesystem I/O. Depending on your system, instantiating a PurePath will return either a PurePosixPath or a PureWindowsPath object. You can also instantiate either of these classes directly, regardless of your system. """ __slots__ = ( '_drv', '_root', '_parts', '_str', '_hash', '_pparts', '_cached_cparts', ) def __new__(cls, *args): """Construct a PurePath from one or several strings and or existing PurePath objects. The strings and path objects are combined so as to yield a canonicalized path, which is incorporated into the new PurePath object. """ if cls is PurePath: cls = PureWindowsPath if os.name == 'nt' else PurePosixPath return cls._from_parts(args) def __reduce__(self): # Using the parts tuple helps share interned path parts # when pickling related paths. return (self.__class__, tuple(self._parts)) @classmethod def _parse_args(cls, args): # This is useful when you don't want to create an instance, just # canonicalize some constructor arguments. parts = [] for a in args: if isinstance(a, PurePath): parts += a._parts else: a = os.fspath(a) if isinstance(a, str): # Force-cast str subclasses to str (issue #21127) parts.append(str(a)) else: raise TypeError( "argument should be a str object or an os.PathLike " "object returning str, not %r" % type(a)) return cls._flavour.parse_parts(parts) @classmethod def _from_parts(cls, args): # We need to call _parse_args on the instance, so as to get the # right flavour. self = object.__new__(cls) drv, root, parts = self._parse_args(args) self._drv = drv self._root = root self._parts = parts return self @classmethod def _from_parsed_parts(cls, drv, root, parts): self = object.__new__(cls) self._drv = drv self._root = root self._parts = parts return self @classmethod def _format_parsed_parts(cls, drv, root, parts): if drv or root: return drv + root + cls._flavour.join(parts[1:]) else: return cls._flavour.join(parts) def _make_child(self, args): drv, root, parts = self._parse_args(args) drv, root, parts = self._flavour.join_parsed_parts( self._drv, self._root, self._parts, drv, root, parts) return self._from_parsed_parts(drv, root, parts) def __str__(self): """Return the string representation of the path, suitable for passing to system calls.""" try: return self._str except AttributeError: self._str = self._format_parsed_parts(self._drv, self._root, self._parts) or '.' return self._str def __fspath__(self): return str(self) def as_posix(self): """Return the string representation of the path with forward (/) slashes.""" f = self._flavour return str(self).replace(f.sep, '/') def __bytes__(self): """Return the bytes representation of the path. This is only recommended to use under Unix.""" return os.fsencode(self) def __repr__(self): return "{}({!r})".format(self.__class__.__name__, self.as_posix()) def as_uri(self): """Return the path as a 'file' URI.""" if not self.is_absolute(): raise ValueError("relative path can't be expressed as a file URI") return self._flavour.make_uri(self) @property def _cparts(self): # Cached casefolded parts, for hashing and comparison try: return self._cached_cparts except AttributeError: self._cached_cparts = self._flavour.casefold_parts(self._parts) return self._cached_cparts def __eq__(self, other): if not isinstance(other, PurePath): return NotImplemented return self._cparts == other._cparts and self._flavour is other._flavour def __hash__(self): try: return self._hash except AttributeError: self._hash = hash(tuple(self._cparts)) return self._hash def __lt__(self, other): if not isinstance(other, PurePath) or self._flavour is not other._flavour: return NotImplemented return self._cparts < other._cparts def __le__(self, other): if not isinstance(other, PurePath) or self._flavour is not other._flavour: return NotImplemented return self._cparts <= other._cparts def __gt__(self, other): if not isinstance(other, PurePath) or self._flavour is not other._flavour: return NotImplemented return self._cparts > other._cparts def __ge__(self, other): if not isinstance(other, PurePath) or self._flavour is not other._flavour: return NotImplemented return self._cparts >= other._cparts drive = property(attrgetter('_drv'), doc="""The drive prefix (letter or UNC path), if any.""") root = property(attrgetter('_root'), doc="""The root of the path, if any.""") @property def anchor(self): """The concatenation of the drive and root, or ''.""" anchor = self._drv + self._root return anchor @property def name(self): """The final path component, if any.""" parts = self._parts if len(parts) == (1 if (self._drv or self._root) else 0): return '' return parts[-1] @property def suffix(self): """ The final component's last suffix, if any. This includes the leading period. For example: '.txt' """ name = self.name i = name.rfind('.') if 0 < i < len(name) - 1: return name[i:] else: return '' @property def suffixes(self): """ A list of the final component's suffixes, if any. These include the leading periods. For example: ['.tar', '.gz'] """ name = self.name if name.endswith('.'): return [] name = name.lstrip('.') return ['.' + suffix for suffix in name.split('.')[1:]] @property def stem(self): """The final path component, minus its last suffix.""" name = self.name i = name.rfind('.') if 0 < i < len(name) - 1: return name[:i] else: return name def with_name(self, name): """Return a new path with the file name changed.""" if not self.name: raise ValueError("%r has an empty name" % (self,)) drv, root, parts = self._flavour.parse_parts((name,)) if (not name or name[-1] in [self._flavour.sep, self._flavour.altsep] or drv or root or len(parts) != 1): raise ValueError("Invalid name %r" % (name)) return self._from_parsed_parts(self._drv, self._root, self._parts[:-1] + [name]) def with_stem(self, stem): """Return a new path with the stem changed.""" return self.with_name(stem + self.suffix) def with_suffix(self, suffix): """Return a new path with the file suffix changed. If the path has no suffix, add given suffix. If the given suffix is an empty string, remove the suffix from the path. """ f = self._flavour if f.sep in suffix or f.altsep and f.altsep in suffix: raise ValueError("Invalid suffix %r" % (suffix,)) if suffix and not suffix.startswith('.') or suffix == '.': raise ValueError("Invalid suffix %r" % (suffix)) name = self.name if not name: raise ValueError("%r has an empty name" % (self,)) old_suffix = self.suffix if not old_suffix: name = name + suffix else: name = name[:-len(old_suffix)] + suffix return self._from_parsed_parts(self._drv, self._root, self._parts[:-1] + [name]) def relative_to(self, *other): """Return the relative path to another path identified by the passed arguments. If the operation is not possible (because this is not a subpath of the other path), raise ValueError. """ # For the purpose of this method, drive and root are considered # separate parts, i.e.: # Path('c:/').relative_to('c:') gives Path('/') # Path('c:/').relative_to('/') raise ValueError if not other: raise TypeError("need at least one argument") parts = self._parts drv = self._drv root = self._root if root: abs_parts = [drv, root] + parts[1:] else: abs_parts = parts to_drv, to_root, to_parts = self._parse_args(other) if to_root: to_abs_parts = [to_drv, to_root] + to_parts[1:] else: to_abs_parts = to_parts n = len(to_abs_parts) cf = self._flavour.casefold_parts if (root or drv) if n == 0 else cf(abs_parts[:n]) != cf(to_abs_parts): formatted = self._format_parsed_parts(to_drv, to_root, to_parts) raise ValueError("{!r} is not in the subpath of {!r}" " OR one path is relative and the other is absolute." .format(str(self), str(formatted))) return self._from_parsed_parts('', root if n == 1 else '', abs_parts[n:]) def is_relative_to(self, *other): """Return True if the path is relative to another path or False. """ try: self.relative_to(*other) return True except ValueError: return False @property def parts(self): """An object providing sequence-like access to the components in the filesystem path.""" # We cache the tuple to avoid building a new one each time .parts # is accessed. XXX is this necessary? try: return self._pparts except AttributeError: self._pparts = tuple(self._parts) return self._pparts def joinpath(self, *args): """Combine this path with one or several arguments, and return a new path representing either a subpath (if all arguments are relative paths) or a totally different path (if one of the arguments is anchored). """ return self._make_child(args) def __truediv__(self, key): try: return self._make_child((key,)) except TypeError: return NotImplemented def __rtruediv__(self, key): try: return self._from_parts([key] + self._parts) except TypeError: return NotImplemented @property def parent(self): """The logical parent of the path.""" drv = self._drv root = self._root parts = self._parts if len(parts) == 1 and (drv or root): return self return self._from_parsed_parts(drv, root, parts[:-1]) @property def parents(self): """A sequence of this path's logical parents.""" return _PathParents(self) def is_absolute(self): """True if the path is absolute (has both a root and, if applicable, a drive).""" if not self._root: return False return not self._flavour.has_drv or bool(self._drv) def is_reserved(self): """Return True if the path contains one of the special names reserved by the system, if any.""" return self._flavour.is_reserved(self._parts) def match(self, path_pattern): """ Return True if this path matches the given pattern. """ cf = self._flavour.casefold path_pattern = cf(path_pattern) drv, root, pat_parts = self._flavour.parse_parts((path_pattern,)) if not pat_parts: raise ValueError("empty pattern") if drv and drv != cf(self._drv): return False if root and root != cf(self._root): return False parts = self._cparts if drv or root: if len(pat_parts) != len(parts): return False pat_parts = pat_parts[1:] elif len(pat_parts) > len(parts): return False for part, pat in zip(reversed(parts), reversed(pat_parts)): if not fnmatch.fnmatchcase(part, pat): return False return True # Can't subclass os.PathLike from PurePath and keep the constructor # optimizations in PurePath._parse_args(). os.PathLike.register(PurePath) class PurePosixPath(PurePath): """PurePath subclass for non-Windows systems. On a POSIX system, instantiating a PurePath should return this object. However, you can also instantiate it directly on any system. """ _flavour = _posix_flavour __slots__ = () class PureWindowsPath(PurePath): """PurePath subclass for Windows systems. On a Windows system, instantiating a PurePath should return this object. However, you can also instantiate it directly on any system. """ _flavour = _windows_flavour __slots__ = () # Filesystem-accessing classes class Path(PurePath): """PurePath subclass that can make system calls. Path represents a filesystem path but unlike PurePath, also offers methods to do system calls on path objects. Depending on your system, instantiating a Path will return either a PosixPath or a WindowsPath object. You can also instantiate a PosixPath or WindowsPath directly, but cannot instantiate a WindowsPath on a POSIX system or vice versa. """ __slots__ = () def __new__(cls, *args, **kwargs): if cls is Path: cls = WindowsPath if os.name == 'nt' else PosixPath self = cls._from_parts(args) if not self._flavour.is_supported: raise NotImplementedError("cannot instantiate %r on your system" % (cls.__name__,)) return self def _make_child_relpath(self, part): # This is an optimization used for dir walking. `part` must be # a single part relative to this path. parts = self._parts + [part] return self._from_parsed_parts(self._drv, self._root, parts) def __enter__(self): # In previous versions of pathlib, __exit__() marked this path as # closed; subsequent attempts to perform I/O would raise an IOError. # This functionality was never documented, and had the effect of # making Path objects mutable, contrary to PEP 428. # In Python 3.9 __exit__() was made a no-op. # In Python 3.11 __enter__() began emitting DeprecationWarning. # In Python 3.13 __enter__() and __exit__() should be removed. warnings.warn("pathlib.Path.__enter__() is deprecated and scheduled " "for removal in Python 3.13; Path objects as a context " "manager is a no-op", DeprecationWarning, stacklevel=2) return self def __exit__(self, t, v, tb): pass # Public API @classmethod def cwd(cls): """Return a new path pointing to the current working directory (as returned by os.getcwd()). """ return cls(os.getcwd()) @classmethod def home(cls): """Return a new path pointing to the user's home directory (as returned by os.path.expanduser('~')). """ return cls("~").expanduser() def samefile(self, other_path): """Return whether other_path is the same or not as this file (as returned by os.path.samefile()). """ st = self.stat() try: other_st = other_path.stat() except AttributeError: other_st = self.__class__(other_path).stat() return os.path.samestat(st, other_st) def iterdir(self): """Iterate over the files in this directory. Does not yield any result for the special paths '.' and '..'. """ for name in os.listdir(self): yield self._make_child_relpath(name) def _scandir(self): # bpo-24132: a future version of pathlib will support subclassing of # pathlib.Path to customize how the filesystem is accessed. This # includes scandir(), which is used to implement glob(). return os.scandir(self) def glob(self, pattern): """Iterate over this subtree and yield all existing files (of any kind, including directories) matching the given relative pattern. """ sys.audit("pathlib.Path.glob", self, pattern) if not pattern: raise ValueError("Unacceptable pattern: {!r}".format(pattern)) drv, root, pattern_parts = self._flavour.parse_parts((pattern,)) if drv or root: raise NotImplementedError("Non-relative patterns are unsupported") if pattern[-1] in (self._flavour.sep, self._flavour.altsep): pattern_parts.append('') selector = _make_selector(tuple(pattern_parts), self._flavour) for p in selector.select_from(self): yield p def rglob(self, pattern): """Recursively yield all existing files (of any kind, including directories) matching the given relative pattern, anywhere in this subtree. """ sys.audit("pathlib.Path.rglob", self, pattern) drv, root, pattern_parts = self._flavour.parse_parts((pattern,)) if drv or root: raise NotImplementedError("Non-relative patterns are unsupported") if pattern and pattern[-1] in (self._flavour.sep, self._flavour.altsep): pattern_parts.append('') selector = _make_selector(("**",) + tuple(pattern_parts), self._flavour) for p in selector.select_from(self): yield p def absolute(self): """Return an absolute version of this path by prepending the current working directory. No normalization or symlink resolution is performed. Use resolve() to get the canonical path to a file. """ if self.is_absolute(): return self return self._from_parts([self.cwd()] + self._parts) def resolve(self, strict=False): """ Make the path absolute, resolving all symlinks on the way and also normalizing it. """ def check_eloop(e): winerror = getattr(e, 'winerror', 0) if e.errno == ELOOP or winerror == _WINERROR_CANT_RESOLVE_FILENAME: raise RuntimeError("Symlink loop from %r" % e.filename) try: s = os.path.realpath(self, strict=strict) except OSError as e: check_eloop(e) raise p = self._from_parts((s,)) # In non-strict mode, realpath() doesn't raise on symlink loops. # Ensure we get an exception by calling stat() if not strict: try: p.stat() except OSError as e: check_eloop(e) return p def stat(self, *, follow_symlinks=True): """ Return the result of the stat() system call on this path, like os.stat() does. """ return os.stat(self, follow_symlinks=follow_symlinks) def owner(self): """ Return the login name of the file owner. """ try: import pwd return pwd.getpwuid(self.stat().st_uid).pw_name except ImportError: raise NotImplementedError("Path.owner() is unsupported on this system") def group(self): """ Return the group name of the file gid. """ try: import grp return grp.getgrgid(self.stat().st_gid).gr_name except ImportError: raise NotImplementedError("Path.group() is unsupported on this system") def open(self, mode='r', buffering=-1, encoding=None, errors=None, newline=None): """ Open the file pointed by this path and return a file object, as the built-in open() function does. """ if "b" not in mode: encoding = io.text_encoding(encoding) return io.open(self, mode, buffering, encoding, errors, newline) def read_bytes(self): """ Open the file in bytes mode, read it, and close the file. """ with self.open(mode='rb') as f: return f.read() def read_text(self, encoding=None, errors=None): """ Open the file in text mode, read it, and close the file. """ encoding = io.text_encoding(encoding) with self.open(mode='r', encoding=encoding, errors=errors) as f: return f.read() def write_bytes(self, data): """ Open the file in bytes mode, write to it, and close the file. """ # type-check for the buffer interface before truncating the file view = memoryview(data) with self.open(mode='wb') as f: return f.write(view) def write_text(self, data, encoding=None, errors=None, newline=None): """ Open the file in text mode, write to it, and close the file. """ if not isinstance(data, str): raise TypeError('data must be str, not %s' % data.__class__.__name__) encoding = io.text_encoding(encoding) with self.open(mode='w', encoding=encoding, errors=errors, newline=newline) as f: return f.write(data) def readlink(self): """ Return the path to which the symbolic link points. """ if not hasattr(os, "readlink"): raise NotImplementedError("os.readlink() not available on this system") return self._from_parts((os.readlink(self),)) def touch(self, mode=0o666, exist_ok=True): """ Create this file with the given access mode, if it doesn't exist. """ if exist_ok: # First try to bump modification time # Implementation note: GNU touch uses the UTIME_NOW option of # the utimensat() / futimens() functions. try: os.utime(self, None) except OSError: # Avoid exception chaining pass else: return flags = os.O_CREAT | os.O_WRONLY if not exist_ok: flags |= os.O_EXCL fd = os.open(self, flags, mode) os.close(fd) def mkdir(self, mode=0o777, parents=False, exist_ok=False): """ Create a new directory at this given path. """ try: os.mkdir(self, mode) except FileNotFoundError: if not parents or self.parent == self: raise self.parent.mkdir(parents=True, exist_ok=True) self.mkdir(mode, parents=False, exist_ok=exist_ok) except OSError: # Cannot rely on checking for EEXIST, since the operating system # could give priority to other errors like EACCES or EROFS if not exist_ok or not self.is_dir(): raise def chmod(self, mode, *, follow_symlinks=True): """ Change the permissions of the path, like os.chmod(). """ os.chmod(self, mode, follow_symlinks=follow_symlinks) def lchmod(self, mode): """ Like chmod(), except if the path points to a symlink, the symlink's permissions are changed, rather than its target's. """ self.chmod(mode, follow_symlinks=False) def unlink(self, missing_ok=False): """ Remove this file or link. If the path is a directory, use rmdir() instead. """ try: os.unlink(self) except FileNotFoundError: if not missing_ok: raise def rmdir(self): """ Remove this directory. The directory must be empty. """ os.rmdir(self) def lstat(self): """ Like stat(), except if the path points to a symlink, the symlink's status information is returned, rather than its target's. """ return self.stat(follow_symlinks=False) def rename(self, target): """ Rename this path to the target path. The target path may be absolute or relative. Relative paths are interpreted relative to the current working directory, *not* the directory of the Path object. Returns the new Path instance pointing to the target path. """ os.rename(self, target) return self.__class__(target) def replace(self, target): """ Rename this path to the target path, overwriting if that path exists. The target path may be absolute or relative. Relative paths are interpreted relative to the current working directory, *not* the directory of the Path object. Returns the new Path instance pointing to the target path. """ os.replace(self, target) return self.__class__(target) def symlink_to(self, target, target_is_directory=False): """ Make this path a symlink pointing to the target path. Note the order of arguments (link, target) is the reverse of os.symlink. """ if not hasattr(os, "symlink"): raise NotImplementedError("os.symlink() not available on this system") os.symlink(target, self, target_is_directory) def hardlink_to(self, target): """ Make this path a hard link pointing to the same file as *target*. Note the order of arguments (self, target) is the reverse of os.link's. """ if not hasattr(os, "link"): raise NotImplementedError("os.link() not available on this system") os.link(target, self) def link_to(self, target): """ Make the target path a hard link pointing to this path. Note this function does not make this path a hard link to *target*, despite the implication of the function and argument names. The order of arguments (target, link) is the reverse of Path.symlink_to, but matches that of os.link. Deprecated since Python 3.10 and scheduled for removal in Python 3.12. Use `hardlink_to()` instead. """ warnings.warn("pathlib.Path.link_to() is deprecated and is scheduled " "for removal in Python 3.12. " "Use pathlib.Path.hardlink_to() instead.", DeprecationWarning, stacklevel=2) self.__class__(target).hardlink_to(self) # Convenience functions for querying the stat results def exists(self): """ Whether this path exists. """ try: self.stat() except OSError as e: if not _ignore_error(e): raise return False except ValueError: # Non-encodable path return False return True def is_dir(self): """ Whether this path is a directory. """ try: return S_ISDIR(self.stat().st_mode) except OSError as e: if not _ignore_error(e): raise # Path doesn't exist or is a broken symlink # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) return False except ValueError: # Non-encodable path return False def is_file(self): """ Whether this path is a regular file (also True for symlinks pointing to regular files). """ try: return S_ISREG(self.stat().st_mode) except OSError as e: if not _ignore_error(e): raise # Path doesn't exist or is a broken symlink # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) return False except ValueError: # Non-encodable path return False def is_mount(self): """ Check if this path is a POSIX mount point """ # Need to exist and be a dir if not self.exists() or not self.is_dir(): return False try: parent_dev = self.parent.stat().st_dev except OSError: return False dev = self.stat().st_dev if dev != parent_dev: return True ino = self.stat().st_ino parent_ino = self.parent.stat().st_ino return ino == parent_ino def is_symlink(self): """ Whether this path is a symbolic link. """ try: return S_ISLNK(self.lstat().st_mode) except OSError as e: if not _ignore_error(e): raise # Path doesn't exist return False except ValueError: # Non-encodable path return False def is_block_device(self): """ Whether this path is a block device. """ try: return S_ISBLK(self.stat().st_mode) except OSError as e: if not _ignore_error(e): raise # Path doesn't exist or is a broken symlink # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) return False except ValueError: # Non-encodable path return False def is_char_device(self): """ Whether this path is a character device. """ try: return S_ISCHR(self.stat().st_mode) except OSError as e: if not _ignore_error(e): raise # Path doesn't exist or is a broken symlink # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) return False except ValueError: # Non-encodable path return False def is_fifo(self): """ Whether this path is a FIFO. """ try: return S_ISFIFO(self.stat().st_mode) except OSError as e: if not _ignore_error(e): raise # Path doesn't exist or is a broken symlink # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) return False except ValueError: # Non-encodable path return False def is_socket(self): """ Whether this path is a socket. """ try: return S_ISSOCK(self.stat().st_mode) except OSError as e: if not _ignore_error(e): raise # Path doesn't exist or is a broken symlink # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) return False except ValueError: # Non-encodable path return False def expanduser(self): """ Return a new path with expanded ~ and ~user constructs (as returned by os.path.expanduser) """ if (not (self._drv or self._root) and self._parts and self._parts[0][:1] == '~'): homedir = os.path.expanduser(self._parts[0]) if homedir[:1] == "~": raise RuntimeError("Could not determine home directory.") return self._from_parts([homedir] + self._parts[1:]) return self class PosixPath(Path, PurePosixPath): """Path subclass for non-Windows systems. On a POSIX system, instantiating a Path should return this object. """ __slots__ = () class WindowsPath(Path, PureWindowsPath): """Path subclass for Windows systems. On a Windows system, instantiating a Path should return this object. """ __slots__ = () def is_mount(self): raise NotImplementedError("Path.is_mount() is unsupported on this system")
bsd-3-clause
6b067c9a97d3113e9bc1debd70785a70
33.54623
110
0.558738
4.181474
false
false
false
false
brython-dev/brython
www/src/Lib/test/test_enumerate.py
3
9356
import unittest import operator import sys import pickle import gc from test import support class G: 'Sequence using __getitem__' def __init__(self, seqn): self.seqn = seqn def __getitem__(self, i): return self.seqn[i] class I: 'Sequence using iterator protocol' def __init__(self, seqn): self.seqn = seqn self.i = 0 def __iter__(self): return self def __next__(self): if self.i >= len(self.seqn): raise StopIteration v = self.seqn[self.i] self.i += 1 return v class Ig: 'Sequence using iterator protocol defined with a generator' def __init__(self, seqn): self.seqn = seqn self.i = 0 def __iter__(self): for val in self.seqn: yield val class X: 'Missing __getitem__ and __iter__' def __init__(self, seqn): self.seqn = seqn self.i = 0 def __next__(self): if self.i >= len(self.seqn): raise StopIteration v = self.seqn[self.i] self.i += 1 return v class E: 'Test propagation of exceptions' def __init__(self, seqn): self.seqn = seqn self.i = 0 def __iter__(self): return self def __next__(self): 3 // 0 class N: 'Iterator missing __next__()' def __init__(self, seqn): self.seqn = seqn self.i = 0 def __iter__(self): return self class PickleTest: # Helper to check picklability def check_pickle(self, itorg, seq): for proto in range(pickle.HIGHEST_PROTOCOL + 1): d = pickle.dumps(itorg, proto) it = pickle.loads(d) self.assertEqual(type(itorg), type(it)) self.assertEqual(list(it), seq) it = pickle.loads(d) try: next(it) except StopIteration: self.assertFalse(seq[1:]) continue d = pickle.dumps(it, proto) it = pickle.loads(d) self.assertEqual(list(it), seq[1:]) class EnumerateTestCase(unittest.TestCase, PickleTest): enum = enumerate seq, res = 'abc', [(0,'a'), (1,'b'), (2,'c')] def test_basicfunction(self): self.assertEqual(type(self.enum(self.seq)), self.enum) e = self.enum(self.seq) self.assertEqual(iter(e), e) self.assertEqual(list(self.enum(self.seq)), self.res) self.enum.__doc__ def test_pickle(self): self.check_pickle(self.enum(self.seq), self.res) def test_getitemseqn(self): self.assertEqual(list(self.enum(G(self.seq))), self.res) e = self.enum(G('')) self.assertRaises(StopIteration, next, e) def test_iteratorseqn(self): self.assertEqual(list(self.enum(I(self.seq))), self.res) e = self.enum(I('')) self.assertRaises(StopIteration, next, e) def test_iteratorgenerator(self): self.assertEqual(list(self.enum(Ig(self.seq))), self.res) e = self.enum(Ig('')) self.assertRaises(StopIteration, next, e) def test_noniterable(self): self.assertRaises(TypeError, self.enum, X(self.seq)) def test_illformediterable(self): self.assertRaises(TypeError, self.enum, N(self.seq)) def test_exception_propagation(self): self.assertRaises(ZeroDivisionError, list, self.enum(E(self.seq))) def test_argumentcheck(self): self.assertRaises(TypeError, self.enum) # no arguments self.assertRaises(TypeError, self.enum, 1) # wrong type (not iterable) self.assertRaises(TypeError, self.enum, 'abc', 'a') # wrong type self.assertRaises(TypeError, self.enum, 'abc', 2, 3) # too many arguments def test_kwargs(self): self.assertEqual(list(self.enum(iterable=Ig(self.seq))), self.res) expected = list(self.enum(Ig(self.seq), 0)) self.assertEqual(list(self.enum(iterable=Ig(self.seq), start=0)), expected) self.assertEqual(list(self.enum(start=0, iterable=Ig(self.seq))), expected) self.assertRaises(TypeError, self.enum, iterable=[], x=3) self.assertRaises(TypeError, self.enum, start=0, x=3) self.assertRaises(TypeError, self.enum, x=0, y=3) self.assertRaises(TypeError, self.enum, x=0) @support.cpython_only def test_tuple_reuse(self): # Tests an implementation detail where tuple is reused # whenever nothing else holds a reference to it self.assertEqual(len(set(map(id, list(enumerate(self.seq))))), len(self.seq)) self.assertEqual(len(set(map(id, enumerate(self.seq)))), min(1,len(self.seq))) @support.cpython_only def test_enumerate_result_gc(self): # bpo-42536: enumerate's tuple-reuse speed trick breaks the GC's # assumptions about what can be untracked. Make sure we re-track result # tuples whenever we reuse them. it = self.enum([[]]) gc.collect() # That GC collection probably untracked the recycled internal result # tuple, which is initialized to (None, None). Make sure it's re-tracked # when it's mutated and returned from __next__: self.assertTrue(gc.is_tracked(next(it))) class MyEnum(enumerate): pass class SubclassTestCase(EnumerateTestCase): enum = MyEnum class TestEmpty(EnumerateTestCase): seq, res = '', [] class TestBig(EnumerateTestCase): seq = range(10,20000,2) res = list(zip(range(20000), seq)) class TestReversed(unittest.TestCase, PickleTest): def test_simple(self): class A: def __getitem__(self, i): if i < 5: return str(i) raise StopIteration def __len__(self): return 5 for data in ('abc', range(5), tuple(enumerate('abc')), A(), range(1,17,5), dict.fromkeys('abcde')): self.assertEqual(list(data)[::-1], list(reversed(data))) # don't allow keyword arguments self.assertRaises(TypeError, reversed, [], a=1) def test_range_optimization(self): x = range(1) self.assertEqual(type(reversed(x)), type(iter(x))) def test_len(self): for s in ('hello', tuple('hello'), list('hello'), range(5)): self.assertEqual(operator.length_hint(reversed(s)), len(s)) r = reversed(s) list(r) self.assertEqual(operator.length_hint(r), 0) class SeqWithWeirdLen: called = False def __len__(self): if not self.called: self.called = True return 10 raise ZeroDivisionError def __getitem__(self, index): return index r = reversed(SeqWithWeirdLen()) self.assertRaises(ZeroDivisionError, operator.length_hint, r) def test_gc(self): class Seq: def __len__(self): return 10 def __getitem__(self, index): return index s = Seq() r = reversed(s) s.r = r def test_args(self): self.assertRaises(TypeError, reversed) self.assertRaises(TypeError, reversed, [], 'extra') @unittest.skipUnless(hasattr(sys, 'getrefcount'), 'test needs sys.getrefcount()') def test_bug1229429(self): # this bug was never in reversed, it was in # PyObject_CallMethod, and reversed_new calls that sometimes. def f(): pass r = f.__reversed__ = object() rc = sys.getrefcount(r) for i in range(10): try: reversed(f) except TypeError: pass else: self.fail("non-callable __reversed__ didn't raise!") self.assertEqual(rc, sys.getrefcount(r)) def test_objmethods(self): # Objects must have __len__() and __getitem__() implemented. class NoLen(object): def __getitem__(self, i): return 1 nl = NoLen() self.assertRaises(TypeError, reversed, nl) class NoGetItem(object): def __len__(self): return 2 ngi = NoGetItem() self.assertRaises(TypeError, reversed, ngi) class Blocked(object): def __getitem__(self, i): return 1 def __len__(self): return 2 __reversed__ = None b = Blocked() self.assertRaises(TypeError, reversed, b) def test_pickle(self): for data in 'abc', range(5), tuple(enumerate('abc')), range(1,17,5): self.check_pickle(reversed(data), list(data)[::-1]) class EnumerateStartTestCase(EnumerateTestCase): def test_basicfunction(self): e = self.enum(self.seq) self.assertEqual(iter(e), e) self.assertEqual(list(self.enum(self.seq)), self.res) class TestStart(EnumerateStartTestCase): def enum(self, iterable, start=11): return enumerate(iterable, start=start) seq, res = 'abc', [(11, 'a'), (12, 'b'), (13, 'c')] class TestLongStart(EnumerateStartTestCase): def enum(self, iterable, start=sys.maxsize + 1): return enumerate(iterable, start=start) seq, res = 'abc', [(sys.maxsize+1,'a'), (sys.maxsize+2,'b'), (sys.maxsize+3,'c')] if __name__ == "__main__": unittest.main()
bsd-3-clause
aa58cbe040a506ee38fe826364e09e1e
30.608108
86
0.573856
3.831286
false
true
false
false
brython-dev/brython
www/src/Lib/encodings/cp1006.py
35
13875
""" Python Character Mapping Codec cp1006 generated from 'MAPPINGS/VENDORS/MISC/CP1006.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp1006', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> NULL '\x01' # 0x01 -> START OF HEADING '\x02' # 0x02 -> START OF TEXT '\x03' # 0x03 -> END OF TEXT '\x04' # 0x04 -> END OF TRANSMISSION '\x05' # 0x05 -> ENQUIRY '\x06' # 0x06 -> ACKNOWLEDGE '\x07' # 0x07 -> BELL '\x08' # 0x08 -> BACKSPACE '\t' # 0x09 -> HORIZONTAL TABULATION '\n' # 0x0A -> LINE FEED '\x0b' # 0x0B -> VERTICAL TABULATION '\x0c' # 0x0C -> FORM FEED '\r' # 0x0D -> CARRIAGE RETURN '\x0e' # 0x0E -> SHIFT OUT '\x0f' # 0x0F -> SHIFT IN '\x10' # 0x10 -> DATA LINK ESCAPE '\x11' # 0x11 -> DEVICE CONTROL ONE '\x12' # 0x12 -> DEVICE CONTROL TWO '\x13' # 0x13 -> DEVICE CONTROL THREE '\x14' # 0x14 -> DEVICE CONTROL FOUR '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE '\x16' # 0x16 -> SYNCHRONOUS IDLE '\x17' # 0x17 -> END OF TRANSMISSION BLOCK '\x18' # 0x18 -> CANCEL '\x19' # 0x19 -> END OF MEDIUM '\x1a' # 0x1A -> SUBSTITUTE '\x1b' # 0x1B -> ESCAPE '\x1c' # 0x1C -> FILE SEPARATOR '\x1d' # 0x1D -> GROUP SEPARATOR '\x1e' # 0x1E -> RECORD SEPARATOR '\x1f' # 0x1F -> UNIT SEPARATOR ' ' # 0x20 -> SPACE '!' # 0x21 -> EXCLAMATION MARK '"' # 0x22 -> QUOTATION MARK '#' # 0x23 -> NUMBER SIGN '$' # 0x24 -> DOLLAR SIGN '%' # 0x25 -> PERCENT SIGN '&' # 0x26 -> AMPERSAND "'" # 0x27 -> APOSTROPHE '(' # 0x28 -> LEFT PARENTHESIS ')' # 0x29 -> RIGHT PARENTHESIS '*' # 0x2A -> ASTERISK '+' # 0x2B -> PLUS SIGN ',' # 0x2C -> COMMA '-' # 0x2D -> HYPHEN-MINUS '.' # 0x2E -> FULL STOP '/' # 0x2F -> SOLIDUS '0' # 0x30 -> DIGIT ZERO '1' # 0x31 -> DIGIT ONE '2' # 0x32 -> DIGIT TWO '3' # 0x33 -> DIGIT THREE '4' # 0x34 -> DIGIT FOUR '5' # 0x35 -> DIGIT FIVE '6' # 0x36 -> DIGIT SIX '7' # 0x37 -> DIGIT SEVEN '8' # 0x38 -> DIGIT EIGHT '9' # 0x39 -> DIGIT NINE ':' # 0x3A -> COLON ';' # 0x3B -> SEMICOLON '<' # 0x3C -> LESS-THAN SIGN '=' # 0x3D -> EQUALS SIGN '>' # 0x3E -> GREATER-THAN SIGN '?' # 0x3F -> QUESTION MARK '@' # 0x40 -> COMMERCIAL AT 'A' # 0x41 -> LATIN CAPITAL LETTER A 'B' # 0x42 -> LATIN CAPITAL LETTER B 'C' # 0x43 -> LATIN CAPITAL LETTER C 'D' # 0x44 -> LATIN CAPITAL LETTER D 'E' # 0x45 -> LATIN CAPITAL LETTER E 'F' # 0x46 -> LATIN CAPITAL LETTER F 'G' # 0x47 -> LATIN CAPITAL LETTER G 'H' # 0x48 -> LATIN CAPITAL LETTER H 'I' # 0x49 -> LATIN CAPITAL LETTER I 'J' # 0x4A -> LATIN CAPITAL LETTER J 'K' # 0x4B -> LATIN CAPITAL LETTER K 'L' # 0x4C -> LATIN CAPITAL LETTER L 'M' # 0x4D -> LATIN CAPITAL LETTER M 'N' # 0x4E -> LATIN CAPITAL LETTER N 'O' # 0x4F -> LATIN CAPITAL LETTER O 'P' # 0x50 -> LATIN CAPITAL LETTER P 'Q' # 0x51 -> LATIN CAPITAL LETTER Q 'R' # 0x52 -> LATIN CAPITAL LETTER R 'S' # 0x53 -> LATIN CAPITAL LETTER S 'T' # 0x54 -> LATIN CAPITAL LETTER T 'U' # 0x55 -> LATIN CAPITAL LETTER U 'V' # 0x56 -> LATIN CAPITAL LETTER V 'W' # 0x57 -> LATIN CAPITAL LETTER W 'X' # 0x58 -> LATIN CAPITAL LETTER X 'Y' # 0x59 -> LATIN CAPITAL LETTER Y 'Z' # 0x5A -> LATIN CAPITAL LETTER Z '[' # 0x5B -> LEFT SQUARE BRACKET '\\' # 0x5C -> REVERSE SOLIDUS ']' # 0x5D -> RIGHT SQUARE BRACKET '^' # 0x5E -> CIRCUMFLEX ACCENT '_' # 0x5F -> LOW LINE '`' # 0x60 -> GRAVE ACCENT 'a' # 0x61 -> LATIN SMALL LETTER A 'b' # 0x62 -> LATIN SMALL LETTER B 'c' # 0x63 -> LATIN SMALL LETTER C 'd' # 0x64 -> LATIN SMALL LETTER D 'e' # 0x65 -> LATIN SMALL LETTER E 'f' # 0x66 -> LATIN SMALL LETTER F 'g' # 0x67 -> LATIN SMALL LETTER G 'h' # 0x68 -> LATIN SMALL LETTER H 'i' # 0x69 -> LATIN SMALL LETTER I 'j' # 0x6A -> LATIN SMALL LETTER J 'k' # 0x6B -> LATIN SMALL LETTER K 'l' # 0x6C -> LATIN SMALL LETTER L 'm' # 0x6D -> LATIN SMALL LETTER M 'n' # 0x6E -> LATIN SMALL LETTER N 'o' # 0x6F -> LATIN SMALL LETTER O 'p' # 0x70 -> LATIN SMALL LETTER P 'q' # 0x71 -> LATIN SMALL LETTER Q 'r' # 0x72 -> LATIN SMALL LETTER R 's' # 0x73 -> LATIN SMALL LETTER S 't' # 0x74 -> LATIN SMALL LETTER T 'u' # 0x75 -> LATIN SMALL LETTER U 'v' # 0x76 -> LATIN SMALL LETTER V 'w' # 0x77 -> LATIN SMALL LETTER W 'x' # 0x78 -> LATIN SMALL LETTER X 'y' # 0x79 -> LATIN SMALL LETTER Y 'z' # 0x7A -> LATIN SMALL LETTER Z '{' # 0x7B -> LEFT CURLY BRACKET '|' # 0x7C -> VERTICAL LINE '}' # 0x7D -> RIGHT CURLY BRACKET '~' # 0x7E -> TILDE '\x7f' # 0x7F -> DELETE '\x80' # 0x80 -> <control> '\x81' # 0x81 -> <control> '\x82' # 0x82 -> <control> '\x83' # 0x83 -> <control> '\x84' # 0x84 -> <control> '\x85' # 0x85 -> <control> '\x86' # 0x86 -> <control> '\x87' # 0x87 -> <control> '\x88' # 0x88 -> <control> '\x89' # 0x89 -> <control> '\x8a' # 0x8A -> <control> '\x8b' # 0x8B -> <control> '\x8c' # 0x8C -> <control> '\x8d' # 0x8D -> <control> '\x8e' # 0x8E -> <control> '\x8f' # 0x8F -> <control> '\x90' # 0x90 -> <control> '\x91' # 0x91 -> <control> '\x92' # 0x92 -> <control> '\x93' # 0x93 -> <control> '\x94' # 0x94 -> <control> '\x95' # 0x95 -> <control> '\x96' # 0x96 -> <control> '\x97' # 0x97 -> <control> '\x98' # 0x98 -> <control> '\x99' # 0x99 -> <control> '\x9a' # 0x9A -> <control> '\x9b' # 0x9B -> <control> '\x9c' # 0x9C -> <control> '\x9d' # 0x9D -> <control> '\x9e' # 0x9E -> <control> '\x9f' # 0x9F -> <control> '\xa0' # 0xA0 -> NO-BREAK SPACE '\u06f0' # 0xA1 -> EXTENDED ARABIC-INDIC DIGIT ZERO '\u06f1' # 0xA2 -> EXTENDED ARABIC-INDIC DIGIT ONE '\u06f2' # 0xA3 -> EXTENDED ARABIC-INDIC DIGIT TWO '\u06f3' # 0xA4 -> EXTENDED ARABIC-INDIC DIGIT THREE '\u06f4' # 0xA5 -> EXTENDED ARABIC-INDIC DIGIT FOUR '\u06f5' # 0xA6 -> EXTENDED ARABIC-INDIC DIGIT FIVE '\u06f6' # 0xA7 -> EXTENDED ARABIC-INDIC DIGIT SIX '\u06f7' # 0xA8 -> EXTENDED ARABIC-INDIC DIGIT SEVEN '\u06f8' # 0xA9 -> EXTENDED ARABIC-INDIC DIGIT EIGHT '\u06f9' # 0xAA -> EXTENDED ARABIC-INDIC DIGIT NINE '\u060c' # 0xAB -> ARABIC COMMA '\u061b' # 0xAC -> ARABIC SEMICOLON '\xad' # 0xAD -> SOFT HYPHEN '\u061f' # 0xAE -> ARABIC QUESTION MARK '\ufe81' # 0xAF -> ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM '\ufe8d' # 0xB0 -> ARABIC LETTER ALEF ISOLATED FORM '\ufe8e' # 0xB1 -> ARABIC LETTER ALEF FINAL FORM '\ufe8e' # 0xB2 -> ARABIC LETTER ALEF FINAL FORM '\ufe8f' # 0xB3 -> ARABIC LETTER BEH ISOLATED FORM '\ufe91' # 0xB4 -> ARABIC LETTER BEH INITIAL FORM '\ufb56' # 0xB5 -> ARABIC LETTER PEH ISOLATED FORM '\ufb58' # 0xB6 -> ARABIC LETTER PEH INITIAL FORM '\ufe93' # 0xB7 -> ARABIC LETTER TEH MARBUTA ISOLATED FORM '\ufe95' # 0xB8 -> ARABIC LETTER TEH ISOLATED FORM '\ufe97' # 0xB9 -> ARABIC LETTER TEH INITIAL FORM '\ufb66' # 0xBA -> ARABIC LETTER TTEH ISOLATED FORM '\ufb68' # 0xBB -> ARABIC LETTER TTEH INITIAL FORM '\ufe99' # 0xBC -> ARABIC LETTER THEH ISOLATED FORM '\ufe9b' # 0xBD -> ARABIC LETTER THEH INITIAL FORM '\ufe9d' # 0xBE -> ARABIC LETTER JEEM ISOLATED FORM '\ufe9f' # 0xBF -> ARABIC LETTER JEEM INITIAL FORM '\ufb7a' # 0xC0 -> ARABIC LETTER TCHEH ISOLATED FORM '\ufb7c' # 0xC1 -> ARABIC LETTER TCHEH INITIAL FORM '\ufea1' # 0xC2 -> ARABIC LETTER HAH ISOLATED FORM '\ufea3' # 0xC3 -> ARABIC LETTER HAH INITIAL FORM '\ufea5' # 0xC4 -> ARABIC LETTER KHAH ISOLATED FORM '\ufea7' # 0xC5 -> ARABIC LETTER KHAH INITIAL FORM '\ufea9' # 0xC6 -> ARABIC LETTER DAL ISOLATED FORM '\ufb84' # 0xC7 -> ARABIC LETTER DAHAL ISOLATED FORMN '\ufeab' # 0xC8 -> ARABIC LETTER THAL ISOLATED FORM '\ufead' # 0xC9 -> ARABIC LETTER REH ISOLATED FORM '\ufb8c' # 0xCA -> ARABIC LETTER RREH ISOLATED FORM '\ufeaf' # 0xCB -> ARABIC LETTER ZAIN ISOLATED FORM '\ufb8a' # 0xCC -> ARABIC LETTER JEH ISOLATED FORM '\ufeb1' # 0xCD -> ARABIC LETTER SEEN ISOLATED FORM '\ufeb3' # 0xCE -> ARABIC LETTER SEEN INITIAL FORM '\ufeb5' # 0xCF -> ARABIC LETTER SHEEN ISOLATED FORM '\ufeb7' # 0xD0 -> ARABIC LETTER SHEEN INITIAL FORM '\ufeb9' # 0xD1 -> ARABIC LETTER SAD ISOLATED FORM '\ufebb' # 0xD2 -> ARABIC LETTER SAD INITIAL FORM '\ufebd' # 0xD3 -> ARABIC LETTER DAD ISOLATED FORM '\ufebf' # 0xD4 -> ARABIC LETTER DAD INITIAL FORM '\ufec1' # 0xD5 -> ARABIC LETTER TAH ISOLATED FORM '\ufec5' # 0xD6 -> ARABIC LETTER ZAH ISOLATED FORM '\ufec9' # 0xD7 -> ARABIC LETTER AIN ISOLATED FORM '\ufeca' # 0xD8 -> ARABIC LETTER AIN FINAL FORM '\ufecb' # 0xD9 -> ARABIC LETTER AIN INITIAL FORM '\ufecc' # 0xDA -> ARABIC LETTER AIN MEDIAL FORM '\ufecd' # 0xDB -> ARABIC LETTER GHAIN ISOLATED FORM '\ufece' # 0xDC -> ARABIC LETTER GHAIN FINAL FORM '\ufecf' # 0xDD -> ARABIC LETTER GHAIN INITIAL FORM '\ufed0' # 0xDE -> ARABIC LETTER GHAIN MEDIAL FORM '\ufed1' # 0xDF -> ARABIC LETTER FEH ISOLATED FORM '\ufed3' # 0xE0 -> ARABIC LETTER FEH INITIAL FORM '\ufed5' # 0xE1 -> ARABIC LETTER QAF ISOLATED FORM '\ufed7' # 0xE2 -> ARABIC LETTER QAF INITIAL FORM '\ufed9' # 0xE3 -> ARABIC LETTER KAF ISOLATED FORM '\ufedb' # 0xE4 -> ARABIC LETTER KAF INITIAL FORM '\ufb92' # 0xE5 -> ARABIC LETTER GAF ISOLATED FORM '\ufb94' # 0xE6 -> ARABIC LETTER GAF INITIAL FORM '\ufedd' # 0xE7 -> ARABIC LETTER LAM ISOLATED FORM '\ufedf' # 0xE8 -> ARABIC LETTER LAM INITIAL FORM '\ufee0' # 0xE9 -> ARABIC LETTER LAM MEDIAL FORM '\ufee1' # 0xEA -> ARABIC LETTER MEEM ISOLATED FORM '\ufee3' # 0xEB -> ARABIC LETTER MEEM INITIAL FORM '\ufb9e' # 0xEC -> ARABIC LETTER NOON GHUNNA ISOLATED FORM '\ufee5' # 0xED -> ARABIC LETTER NOON ISOLATED FORM '\ufee7' # 0xEE -> ARABIC LETTER NOON INITIAL FORM '\ufe85' # 0xEF -> ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM '\ufeed' # 0xF0 -> ARABIC LETTER WAW ISOLATED FORM '\ufba6' # 0xF1 -> ARABIC LETTER HEH GOAL ISOLATED FORM '\ufba8' # 0xF2 -> ARABIC LETTER HEH GOAL INITIAL FORM '\ufba9' # 0xF3 -> ARABIC LETTER HEH GOAL MEDIAL FORM '\ufbaa' # 0xF4 -> ARABIC LETTER HEH DOACHASHMEE ISOLATED FORM '\ufe80' # 0xF5 -> ARABIC LETTER HAMZA ISOLATED FORM '\ufe89' # 0xF6 -> ARABIC LETTER YEH WITH HAMZA ABOVE ISOLATED FORM '\ufe8a' # 0xF7 -> ARABIC LETTER YEH WITH HAMZA ABOVE FINAL FORM '\ufe8b' # 0xF8 -> ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM '\ufef1' # 0xF9 -> ARABIC LETTER YEH ISOLATED FORM '\ufef2' # 0xFA -> ARABIC LETTER YEH FINAL FORM '\ufef3' # 0xFB -> ARABIC LETTER YEH INITIAL FORM '\ufbb0' # 0xFC -> ARABIC LETTER YEH BARREE WITH HAMZA ABOVE ISOLATED FORM '\ufbae' # 0xFD -> ARABIC LETTER YEH BARREE ISOLATED FORM '\ufe7c' # 0xFE -> ARABIC SHADDA ISOLATED FORM '\ufe7d' # 0xFF -> ARABIC SHADDA MEDIAL FORM ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
bsd-3-clause
bec8f31e4cd97ea31736b2ef0dd28dd6
43.19544
109
0.529946
3.094335
false
false
false
false
brython-dev/brython
www/src/Lib/encodings/iso8859_8.py
35
11343
""" Python Character Mapping Codec iso8859_8 generated from 'MAPPINGS/ISO8859/8859-8.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='iso8859-8', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> NULL '\x01' # 0x01 -> START OF HEADING '\x02' # 0x02 -> START OF TEXT '\x03' # 0x03 -> END OF TEXT '\x04' # 0x04 -> END OF TRANSMISSION '\x05' # 0x05 -> ENQUIRY '\x06' # 0x06 -> ACKNOWLEDGE '\x07' # 0x07 -> BELL '\x08' # 0x08 -> BACKSPACE '\t' # 0x09 -> HORIZONTAL TABULATION '\n' # 0x0A -> LINE FEED '\x0b' # 0x0B -> VERTICAL TABULATION '\x0c' # 0x0C -> FORM FEED '\r' # 0x0D -> CARRIAGE RETURN '\x0e' # 0x0E -> SHIFT OUT '\x0f' # 0x0F -> SHIFT IN '\x10' # 0x10 -> DATA LINK ESCAPE '\x11' # 0x11 -> DEVICE CONTROL ONE '\x12' # 0x12 -> DEVICE CONTROL TWO '\x13' # 0x13 -> DEVICE CONTROL THREE '\x14' # 0x14 -> DEVICE CONTROL FOUR '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE '\x16' # 0x16 -> SYNCHRONOUS IDLE '\x17' # 0x17 -> END OF TRANSMISSION BLOCK '\x18' # 0x18 -> CANCEL '\x19' # 0x19 -> END OF MEDIUM '\x1a' # 0x1A -> SUBSTITUTE '\x1b' # 0x1B -> ESCAPE '\x1c' # 0x1C -> FILE SEPARATOR '\x1d' # 0x1D -> GROUP SEPARATOR '\x1e' # 0x1E -> RECORD SEPARATOR '\x1f' # 0x1F -> UNIT SEPARATOR ' ' # 0x20 -> SPACE '!' # 0x21 -> EXCLAMATION MARK '"' # 0x22 -> QUOTATION MARK '#' # 0x23 -> NUMBER SIGN '$' # 0x24 -> DOLLAR SIGN '%' # 0x25 -> PERCENT SIGN '&' # 0x26 -> AMPERSAND "'" # 0x27 -> APOSTROPHE '(' # 0x28 -> LEFT PARENTHESIS ')' # 0x29 -> RIGHT PARENTHESIS '*' # 0x2A -> ASTERISK '+' # 0x2B -> PLUS SIGN ',' # 0x2C -> COMMA '-' # 0x2D -> HYPHEN-MINUS '.' # 0x2E -> FULL STOP '/' # 0x2F -> SOLIDUS '0' # 0x30 -> DIGIT ZERO '1' # 0x31 -> DIGIT ONE '2' # 0x32 -> DIGIT TWO '3' # 0x33 -> DIGIT THREE '4' # 0x34 -> DIGIT FOUR '5' # 0x35 -> DIGIT FIVE '6' # 0x36 -> DIGIT SIX '7' # 0x37 -> DIGIT SEVEN '8' # 0x38 -> DIGIT EIGHT '9' # 0x39 -> DIGIT NINE ':' # 0x3A -> COLON ';' # 0x3B -> SEMICOLON '<' # 0x3C -> LESS-THAN SIGN '=' # 0x3D -> EQUALS SIGN '>' # 0x3E -> GREATER-THAN SIGN '?' # 0x3F -> QUESTION MARK '@' # 0x40 -> COMMERCIAL AT 'A' # 0x41 -> LATIN CAPITAL LETTER A 'B' # 0x42 -> LATIN CAPITAL LETTER B 'C' # 0x43 -> LATIN CAPITAL LETTER C 'D' # 0x44 -> LATIN CAPITAL LETTER D 'E' # 0x45 -> LATIN CAPITAL LETTER E 'F' # 0x46 -> LATIN CAPITAL LETTER F 'G' # 0x47 -> LATIN CAPITAL LETTER G 'H' # 0x48 -> LATIN CAPITAL LETTER H 'I' # 0x49 -> LATIN CAPITAL LETTER I 'J' # 0x4A -> LATIN CAPITAL LETTER J 'K' # 0x4B -> LATIN CAPITAL LETTER K 'L' # 0x4C -> LATIN CAPITAL LETTER L 'M' # 0x4D -> LATIN CAPITAL LETTER M 'N' # 0x4E -> LATIN CAPITAL LETTER N 'O' # 0x4F -> LATIN CAPITAL LETTER O 'P' # 0x50 -> LATIN CAPITAL LETTER P 'Q' # 0x51 -> LATIN CAPITAL LETTER Q 'R' # 0x52 -> LATIN CAPITAL LETTER R 'S' # 0x53 -> LATIN CAPITAL LETTER S 'T' # 0x54 -> LATIN CAPITAL LETTER T 'U' # 0x55 -> LATIN CAPITAL LETTER U 'V' # 0x56 -> LATIN CAPITAL LETTER V 'W' # 0x57 -> LATIN CAPITAL LETTER W 'X' # 0x58 -> LATIN CAPITAL LETTER X 'Y' # 0x59 -> LATIN CAPITAL LETTER Y 'Z' # 0x5A -> LATIN CAPITAL LETTER Z '[' # 0x5B -> LEFT SQUARE BRACKET '\\' # 0x5C -> REVERSE SOLIDUS ']' # 0x5D -> RIGHT SQUARE BRACKET '^' # 0x5E -> CIRCUMFLEX ACCENT '_' # 0x5F -> LOW LINE '`' # 0x60 -> GRAVE ACCENT 'a' # 0x61 -> LATIN SMALL LETTER A 'b' # 0x62 -> LATIN SMALL LETTER B 'c' # 0x63 -> LATIN SMALL LETTER C 'd' # 0x64 -> LATIN SMALL LETTER D 'e' # 0x65 -> LATIN SMALL LETTER E 'f' # 0x66 -> LATIN SMALL LETTER F 'g' # 0x67 -> LATIN SMALL LETTER G 'h' # 0x68 -> LATIN SMALL LETTER H 'i' # 0x69 -> LATIN SMALL LETTER I 'j' # 0x6A -> LATIN SMALL LETTER J 'k' # 0x6B -> LATIN SMALL LETTER K 'l' # 0x6C -> LATIN SMALL LETTER L 'm' # 0x6D -> LATIN SMALL LETTER M 'n' # 0x6E -> LATIN SMALL LETTER N 'o' # 0x6F -> LATIN SMALL LETTER O 'p' # 0x70 -> LATIN SMALL LETTER P 'q' # 0x71 -> LATIN SMALL LETTER Q 'r' # 0x72 -> LATIN SMALL LETTER R 's' # 0x73 -> LATIN SMALL LETTER S 't' # 0x74 -> LATIN SMALL LETTER T 'u' # 0x75 -> LATIN SMALL LETTER U 'v' # 0x76 -> LATIN SMALL LETTER V 'w' # 0x77 -> LATIN SMALL LETTER W 'x' # 0x78 -> LATIN SMALL LETTER X 'y' # 0x79 -> LATIN SMALL LETTER Y 'z' # 0x7A -> LATIN SMALL LETTER Z '{' # 0x7B -> LEFT CURLY BRACKET '|' # 0x7C -> VERTICAL LINE '}' # 0x7D -> RIGHT CURLY BRACKET '~' # 0x7E -> TILDE '\x7f' # 0x7F -> DELETE '\x80' # 0x80 -> <control> '\x81' # 0x81 -> <control> '\x82' # 0x82 -> <control> '\x83' # 0x83 -> <control> '\x84' # 0x84 -> <control> '\x85' # 0x85 -> <control> '\x86' # 0x86 -> <control> '\x87' # 0x87 -> <control> '\x88' # 0x88 -> <control> '\x89' # 0x89 -> <control> '\x8a' # 0x8A -> <control> '\x8b' # 0x8B -> <control> '\x8c' # 0x8C -> <control> '\x8d' # 0x8D -> <control> '\x8e' # 0x8E -> <control> '\x8f' # 0x8F -> <control> '\x90' # 0x90 -> <control> '\x91' # 0x91 -> <control> '\x92' # 0x92 -> <control> '\x93' # 0x93 -> <control> '\x94' # 0x94 -> <control> '\x95' # 0x95 -> <control> '\x96' # 0x96 -> <control> '\x97' # 0x97 -> <control> '\x98' # 0x98 -> <control> '\x99' # 0x99 -> <control> '\x9a' # 0x9A -> <control> '\x9b' # 0x9B -> <control> '\x9c' # 0x9C -> <control> '\x9d' # 0x9D -> <control> '\x9e' # 0x9E -> <control> '\x9f' # 0x9F -> <control> '\xa0' # 0xA0 -> NO-BREAK SPACE '\ufffe' '\xa2' # 0xA2 -> CENT SIGN '\xa3' # 0xA3 -> POUND SIGN '\xa4' # 0xA4 -> CURRENCY SIGN '\xa5' # 0xA5 -> YEN SIGN '\xa6' # 0xA6 -> BROKEN BAR '\xa7' # 0xA7 -> SECTION SIGN '\xa8' # 0xA8 -> DIAERESIS '\xa9' # 0xA9 -> COPYRIGHT SIGN '\xd7' # 0xAA -> MULTIPLICATION SIGN '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK '\xac' # 0xAC -> NOT SIGN '\xad' # 0xAD -> SOFT HYPHEN '\xae' # 0xAE -> REGISTERED SIGN '\xaf' # 0xAF -> MACRON '\xb0' # 0xB0 -> DEGREE SIGN '\xb1' # 0xB1 -> PLUS-MINUS SIGN '\xb2' # 0xB2 -> SUPERSCRIPT TWO '\xb3' # 0xB3 -> SUPERSCRIPT THREE '\xb4' # 0xB4 -> ACUTE ACCENT '\xb5' # 0xB5 -> MICRO SIGN '\xb6' # 0xB6 -> PILCROW SIGN '\xb7' # 0xB7 -> MIDDLE DOT '\xb8' # 0xB8 -> CEDILLA '\xb9' # 0xB9 -> SUPERSCRIPT ONE '\xf7' # 0xBA -> DIVISION SIGN '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK '\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER '\xbd' # 0xBD -> VULGAR FRACTION ONE HALF '\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\ufffe' '\u2017' # 0xDF -> DOUBLE LOW LINE '\u05d0' # 0xE0 -> HEBREW LETTER ALEF '\u05d1' # 0xE1 -> HEBREW LETTER BET '\u05d2' # 0xE2 -> HEBREW LETTER GIMEL '\u05d3' # 0xE3 -> HEBREW LETTER DALET '\u05d4' # 0xE4 -> HEBREW LETTER HE '\u05d5' # 0xE5 -> HEBREW LETTER VAV '\u05d6' # 0xE6 -> HEBREW LETTER ZAYIN '\u05d7' # 0xE7 -> HEBREW LETTER HET '\u05d8' # 0xE8 -> HEBREW LETTER TET '\u05d9' # 0xE9 -> HEBREW LETTER YOD '\u05da' # 0xEA -> HEBREW LETTER FINAL KAF '\u05db' # 0xEB -> HEBREW LETTER KAF '\u05dc' # 0xEC -> HEBREW LETTER LAMED '\u05dd' # 0xED -> HEBREW LETTER FINAL MEM '\u05de' # 0xEE -> HEBREW LETTER MEM '\u05df' # 0xEF -> HEBREW LETTER FINAL NUN '\u05e0' # 0xF0 -> HEBREW LETTER NUN '\u05e1' # 0xF1 -> HEBREW LETTER SAMEKH '\u05e2' # 0xF2 -> HEBREW LETTER AYIN '\u05e3' # 0xF3 -> HEBREW LETTER FINAL PE '\u05e4' # 0xF4 -> HEBREW LETTER PE '\u05e5' # 0xF5 -> HEBREW LETTER FINAL TSADI '\u05e6' # 0xF6 -> HEBREW LETTER TSADI '\u05e7' # 0xF7 -> HEBREW LETTER QOF '\u05e8' # 0xF8 -> HEBREW LETTER RESH '\u05e9' # 0xF9 -> HEBREW LETTER SHIN '\u05ea' # 0xFA -> HEBREW LETTER TAV '\ufffe' '\ufffe' '\u200e' # 0xFD -> LEFT-TO-RIGHT MARK '\u200f' # 0xFE -> RIGHT-TO-LEFT MARK '\ufffe' ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
bsd-3-clause
771e83aeba9cdaec7da4a4a57573d141
34.947883
107
0.47677
3.032888
false
false
false
false
brython-dev/brython
www/src/Lib/test/test_copy.py
1
27028
"""Unit tests for the copy module.""" import copy import copyreg import weakref import abc from operator import le, lt, ge, gt, eq, ne import unittest from test import support order_comparisons = le, lt, ge, gt equality_comparisons = eq, ne comparisons = order_comparisons + equality_comparisons class TestCopy(unittest.TestCase): # Attempt full line coverage of copy.py from top to bottom def test_exceptions(self): self.assertIs(copy.Error, copy.error) self.assertTrue(issubclass(copy.Error, Exception)) # The copy() method def test_copy_basic(self): x = 42 y = copy.copy(x) self.assertEqual(x, y) def test_copy_copy(self): class C(object): def __init__(self, foo): self.foo = foo def __copy__(self): return C(self.foo) x = C(42) y = copy.copy(x) self.assertEqual(y.__class__, x.__class__) self.assertEqual(y.foo, x.foo) def test_copy_registry(self): class C(object): def __new__(cls, foo): obj = object.__new__(cls) obj.foo = foo return obj def pickle_C(obj): return (C, (obj.foo,)) x = C(42) self.assertRaises(TypeError, copy.copy, x) copyreg.pickle(C, pickle_C, C) y = copy.copy(x) def test_copy_reduce_ex(self): class C(object): def __reduce_ex__(self, proto): c.append(1) return "" def __reduce__(self): self.fail("shouldn't call this") c = [] x = C() y = copy.copy(x) self.assertIs(y, x) self.assertEqual(c, [1]) def test_copy_reduce(self): class C(object): def __reduce__(self): c.append(1) return "" c = [] x = C() y = copy.copy(x) self.assertIs(y, x) self.assertEqual(c, [1]) def test_copy_cant(self): class C(object): def __getattribute__(self, name): if name.startswith("__reduce"): raise AttributeError(name) return object.__getattribute__(self, name) x = C() self.assertRaises(copy.Error, copy.copy, x) # Type-specific _copy_xxx() methods def test_copy_atomic(self): class Classic: pass class NewStyle(object): pass def f(): pass class WithMetaclass(metaclass=abc.ABCMeta): pass tests = [None, ..., NotImplemented, 42, 2**100, 3.14, True, False, 1j, "hello", "hello\u1234", f.__code__, b"world", bytes(range(256)), range(10), slice(1, 10, 2), NewStyle, Classic, max, WithMetaclass, property()] for x in tests: self.assertIs(copy.copy(x), x) def test_copy_list(self): x = [1, 2, 3] y = copy.copy(x) self.assertEqual(y, x) self.assertIsNot(y, x) x = [] y = copy.copy(x) self.assertEqual(y, x) self.assertIsNot(y, x) def test_copy_tuple(self): x = (1, 2, 3) self.assertIs(copy.copy(x), x) x = () self.assertIs(copy.copy(x), x) x = (1, 2, 3, []) self.assertIs(copy.copy(x), x) def test_copy_dict(self): x = {"foo": 1, "bar": 2} y = copy.copy(x) self.assertEqual(y, x) self.assertIsNot(y, x) x = {} y = copy.copy(x) self.assertEqual(y, x) self.assertIsNot(y, x) def test_copy_set(self): x = {1, 2, 3} y = copy.copy(x) self.assertEqual(y, x) self.assertIsNot(y, x) x = set() y = copy.copy(x) self.assertEqual(y, x) self.assertIsNot(y, x) def test_copy_frozenset(self): x = frozenset({1, 2, 3}) self.assertIs(copy.copy(x), x) x = frozenset() self.assertIs(copy.copy(x), x) def test_copy_bytearray(self): x = bytearray(b'abc') y = copy.copy(x) self.assertEqual(y, x) self.assertIsNot(y, x) x = bytearray() y = copy.copy(x) self.assertEqual(y, x) self.assertIsNot(y, x) def test_copy_inst_vanilla(self): class C: def __init__(self, foo): self.foo = foo def __eq__(self, other): return self.foo == other.foo x = C(42) self.assertEqual(copy.copy(x), x) def test_copy_inst_copy(self): class C: def __init__(self, foo): self.foo = foo def __copy__(self): return C(self.foo) def __eq__(self, other): return self.foo == other.foo x = C(42) self.assertEqual(copy.copy(x), x) def test_copy_inst_getinitargs(self): class C: def __init__(self, foo): self.foo = foo def __getinitargs__(self): return (self.foo,) def __eq__(self, other): return self.foo == other.foo x = C(42) self.assertEqual(copy.copy(x), x) def test_copy_inst_getnewargs(self): class C(int): def __new__(cls, foo): self = int.__new__(cls) self.foo = foo return self def __getnewargs__(self): return self.foo, def __eq__(self, other): return self.foo == other.foo x = C(42) y = copy.copy(x) self.assertIsInstance(y, C) self.assertEqual(y, x) self.assertIsNot(y, x) self.assertEqual(y.foo, x.foo) def test_copy_inst_getnewargs_ex(self): class C(int): def __new__(cls, *, foo): self = int.__new__(cls) self.foo = foo return self def __getnewargs_ex__(self): return (), {'foo': self.foo} def __eq__(self, other): return self.foo == other.foo x = C(foo=42) y = copy.copy(x) self.assertIsInstance(y, C) self.assertEqual(y, x) self.assertIsNot(y, x) self.assertEqual(y.foo, x.foo) def test_copy_inst_getstate(self): class C: def __init__(self, foo): self.foo = foo def __getstate__(self): return {"foo": self.foo} def __eq__(self, other): return self.foo == other.foo x = C(42) self.assertEqual(copy.copy(x), x) def test_copy_inst_setstate(self): class C: def __init__(self, foo): self.foo = foo def __setstate__(self, state): self.foo = state["foo"] def __eq__(self, other): return self.foo == other.foo x = C(42) self.assertEqual(copy.copy(x), x) def test_copy_inst_getstate_setstate(self): class C: def __init__(self, foo): self.foo = foo def __getstate__(self): return self.foo def __setstate__(self, state): self.foo = state def __eq__(self, other): return self.foo == other.foo x = C(42) self.assertEqual(copy.copy(x), x) # State with boolean value is false (issue #25718) x = C(0.0) self.assertEqual(copy.copy(x), x) # The deepcopy() method def test_deepcopy_basic(self): x = 42 y = copy.deepcopy(x) self.assertEqual(y, x) def test_deepcopy_memo(self): # Tests of reflexive objects are under type-specific sections below. # This tests only repetitions of objects. x = [] x = [x, x] y = copy.deepcopy(x) self.assertEqual(y, x) self.assertIsNot(y, x) self.assertIsNot(y[0], x[0]) self.assertIs(y[0], y[1]) def test_deepcopy_issubclass(self): # XXX Note: there's no way to test the TypeError coming out of # issubclass() -- this can only happen when an extension # module defines a "type" that doesn't formally inherit from # type. class Meta(type): pass class C(metaclass=Meta): pass self.assertEqual(copy.deepcopy(C), C) def test_deepcopy_deepcopy(self): class C(object): def __init__(self, foo): self.foo = foo def __deepcopy__(self, memo=None): return C(self.foo) x = C(42) y = copy.deepcopy(x) self.assertEqual(y.__class__, x.__class__) self.assertEqual(y.foo, x.foo) def test_deepcopy_registry(self): class C(object): def __new__(cls, foo): obj = object.__new__(cls) obj.foo = foo return obj def pickle_C(obj): return (C, (obj.foo,)) x = C(42) self.assertRaises(TypeError, copy.deepcopy, x) copyreg.pickle(C, pickle_C, C) y = copy.deepcopy(x) def test_deepcopy_reduce_ex(self): class C(object): def __reduce_ex__(self, proto): c.append(1) return "" def __reduce__(self): self.fail("shouldn't call this") c = [] x = C() y = copy.deepcopy(x) self.assertIs(y, x) self.assertEqual(c, [1]) def test_deepcopy_reduce(self): class C(object): def __reduce__(self): c.append(1) return "" c = [] x = C() y = copy.deepcopy(x) self.assertIs(y, x) self.assertEqual(c, [1]) def test_deepcopy_cant(self): class C(object): def __getattribute__(self, name): if name.startswith("__reduce"): raise AttributeError(name) return object.__getattribute__(self, name) x = C() self.assertRaises(copy.Error, copy.deepcopy, x) # Type-specific _deepcopy_xxx() methods def test_deepcopy_atomic(self): class Classic: pass class NewStyle(object): pass def f(): pass tests = [None, 42, 2**100, 3.14, True, False, 1j, "hello", "hello\u1234", f.__code__, NewStyle, range(10), Classic, max, property()] for x in tests: self.assertIs(copy.deepcopy(x), x) def test_deepcopy_list(self): x = [[1, 2], 3] y = copy.deepcopy(x) self.assertEqual(y, x) self.assertIsNot(x, y) self.assertIsNot(x[0], y[0]) def test_deepcopy_reflexive_list(self): x = [] x.append(x) y = copy.deepcopy(x) for op in comparisons: self.assertRaises(RecursionError, op, y, x) self.assertIsNot(y, x) self.assertIs(y[0], y) self.assertEqual(len(y), 1) def test_deepcopy_empty_tuple(self): x = () y = copy.deepcopy(x) self.assertIs(x, y) def test_deepcopy_tuple(self): x = ([1, 2], 3) y = copy.deepcopy(x) self.assertEqual(y, x) self.assertIsNot(x, y) self.assertIsNot(x[0], y[0]) def test_deepcopy_tuple_of_immutables(self): x = ((1, 2), 3) y = copy.deepcopy(x) self.assertIs(x, y) def test_deepcopy_reflexive_tuple(self): x = ([],) x[0].append(x) y = copy.deepcopy(x) for op in comparisons: self.assertRaises(RecursionError, op, y, x) self.assertIsNot(y, x) self.assertIsNot(y[0], x[0]) self.assertIs(y[0][0], y) def test_deepcopy_dict(self): x = {"foo": [1, 2], "bar": 3} y = copy.deepcopy(x) self.assertEqual(y, x) self.assertIsNot(x, y) self.assertIsNot(x["foo"], y["foo"]) def test_deepcopy_reflexive_dict(self): x = {} x['foo'] = x y = copy.deepcopy(x) for op in order_comparisons: self.assertRaises(TypeError, op, y, x) for op in equality_comparisons: self.assertRaises(RecursionError, op, y, x) self.assertIsNot(y, x) self.assertIs(y['foo'], y) self.assertEqual(len(y), 1) def test_deepcopy_keepalive(self): memo = {} x = [] y = copy.deepcopy(x, memo) self.assertIs(memo[id(memo)][0], x) def test_deepcopy_dont_memo_immutable(self): memo = {} x = [1, 2, 3, 4] y = copy.deepcopy(x, memo) self.assertEqual(y, x) # There's the entry for the new list, and the keep alive. self.assertEqual(len(memo), 2) memo = {} x = [(1, 2)] y = copy.deepcopy(x, memo) self.assertEqual(y, x) # Tuples with immutable contents are immutable for deepcopy. self.assertEqual(len(memo), 2) def test_deepcopy_inst_vanilla(self): class C: def __init__(self, foo): self.foo = foo def __eq__(self, other): return self.foo == other.foo x = C([42]) y = copy.deepcopy(x) self.assertEqual(y, x) self.assertIsNot(y.foo, x.foo) def test_deepcopy_inst_deepcopy(self): class C: def __init__(self, foo): self.foo = foo def __deepcopy__(self, memo): return C(copy.deepcopy(self.foo, memo)) def __eq__(self, other): return self.foo == other.foo x = C([42]) y = copy.deepcopy(x) self.assertEqual(y, x) self.assertIsNot(y, x) self.assertIsNot(y.foo, x.foo) def test_deepcopy_inst_getinitargs(self): class C: def __init__(self, foo): self.foo = foo def __getinitargs__(self): return (self.foo,) def __eq__(self, other): return self.foo == other.foo x = C([42]) y = copy.deepcopy(x) self.assertEqual(y, x) self.assertIsNot(y, x) self.assertIsNot(y.foo, x.foo) def test_deepcopy_inst_getnewargs(self): class C(int): def __new__(cls, foo): self = int.__new__(cls) self.foo = foo return self def __getnewargs__(self): return self.foo, def __eq__(self, other): return self.foo == other.foo x = C([42]) y = copy.deepcopy(x) self.assertIsInstance(y, C) self.assertEqual(y, x) self.assertIsNot(y, x) self.assertEqual(y.foo, x.foo) self.assertIsNot(y.foo, x.foo) def test_deepcopy_inst_getnewargs_ex(self): class C(int): def __new__(cls, *, foo): self = int.__new__(cls) self.foo = foo return self def __getnewargs_ex__(self): return (), {'foo': self.foo} def __eq__(self, other): return self.foo == other.foo x = C(foo=[42]) y = copy.deepcopy(x) self.assertIsInstance(y, C) self.assertEqual(y, x) self.assertIsNot(y, x) self.assertEqual(y.foo, x.foo) self.assertIsNot(y.foo, x.foo) def test_deepcopy_inst_getstate(self): class C: def __init__(self, foo): self.foo = foo def __getstate__(self): return {"foo": self.foo} def __eq__(self, other): return self.foo == other.foo x = C([42]) y = copy.deepcopy(x) self.assertEqual(y, x) self.assertIsNot(y, x) self.assertIsNot(y.foo, x.foo) def test_deepcopy_inst_setstate(self): class C: def __init__(self, foo): self.foo = foo def __setstate__(self, state): self.foo = state["foo"] def __eq__(self, other): return self.foo == other.foo x = C([42]) y = copy.deepcopy(x) self.assertEqual(y, x) self.assertIsNot(y, x) self.assertIsNot(y.foo, x.foo) def test_deepcopy_inst_getstate_setstate(self): class C: def __init__(self, foo): self.foo = foo def __getstate__(self): return self.foo def __setstate__(self, state): self.foo = state def __eq__(self, other): return self.foo == other.foo x = C([42]) y = copy.deepcopy(x) self.assertEqual(y, x) self.assertIsNot(y, x) self.assertIsNot(y.foo, x.foo) # State with boolean value is false (issue #25718) x = C([]) y = copy.deepcopy(x) self.assertEqual(y, x) self.assertIsNot(y, x) self.assertIsNot(y.foo, x.foo) def test_deepcopy_reflexive_inst(self): class C: pass x = C() x.foo = x y = copy.deepcopy(x) self.assertIsNot(y, x) self.assertIs(y.foo, y) # _reconstruct() def test_reconstruct_string(self): class C(object): def __reduce__(self): return "" x = C() y = copy.copy(x) self.assertIs(y, x) y = copy.deepcopy(x) self.assertIs(y, x) def test_reconstruct_nostate(self): class C(object): def __reduce__(self): return (C, ()) x = C() x.foo = 42 y = copy.copy(x) self.assertIs(y.__class__, x.__class__) y = copy.deepcopy(x) self.assertIs(y.__class__, x.__class__) def test_reconstruct_state(self): class C(object): def __reduce__(self): return (C, (), self.__dict__) def __eq__(self, other): return self.__dict__ == other.__dict__ x = C() x.foo = [42] y = copy.copy(x) self.assertEqual(y, x) y = copy.deepcopy(x) self.assertEqual(y, x) self.assertIsNot(y.foo, x.foo) def test_reconstruct_state_setstate(self): class C(object): def __reduce__(self): return (C, (), self.__dict__) def __setstate__(self, state): self.__dict__.update(state) def __eq__(self, other): return self.__dict__ == other.__dict__ x = C() x.foo = [42] y = copy.copy(x) self.assertEqual(y, x) y = copy.deepcopy(x) self.assertEqual(y, x) self.assertIsNot(y.foo, x.foo) def test_reconstruct_reflexive(self): class C(object): pass x = C() x.foo = x y = copy.deepcopy(x) self.assertIsNot(y, x) self.assertIs(y.foo, y) # Additions for Python 2.3 and pickle protocol 2 def test_reduce_4tuple(self): class C(list): def __reduce__(self): return (C, (), self.__dict__, iter(self)) def __eq__(self, other): return (list(self) == list(other) and self.__dict__ == other.__dict__) x = C([[1, 2], 3]) y = copy.copy(x) self.assertEqual(x, y) self.assertIsNot(x, y) self.assertIs(x[0], y[0]) y = copy.deepcopy(x) self.assertEqual(x, y) self.assertIsNot(x, y) self.assertIsNot(x[0], y[0]) def test_reduce_5tuple(self): class C(dict): def __reduce__(self): return (C, (), self.__dict__, None, self.items()) def __eq__(self, other): return (dict(self) == dict(other) and self.__dict__ == other.__dict__) x = C([("foo", [1, 2]), ("bar", 3)]) y = copy.copy(x) self.assertEqual(x, y) self.assertIsNot(x, y) self.assertIs(x["foo"], y["foo"]) y = copy.deepcopy(x) self.assertEqual(x, y) self.assertIsNot(x, y) self.assertIsNot(x["foo"], y["foo"]) def test_reduce_6tuple(self): def state_setter(*args, **kwargs): self.fail("shouldn't call this") class C: def __reduce__(self): return C, (), self.__dict__, None, None, state_setter x = C() with self.assertRaises(TypeError): copy.copy(x) with self.assertRaises(TypeError): copy.deepcopy(x) def test_reduce_6tuple_none(self): class C: def __reduce__(self): return C, (), self.__dict__, None, None, None x = C() with self.assertRaises(TypeError): copy.copy(x) with self.assertRaises(TypeError): copy.deepcopy(x) def test_copy_slots(self): class C(object): __slots__ = ["foo"] x = C() x.foo = [42] y = copy.copy(x) self.assertIs(x.foo, y.foo) def test_deepcopy_slots(self): class C(object): __slots__ = ["foo"] x = C() x.foo = [42] y = copy.deepcopy(x) self.assertEqual(x.foo, y.foo) self.assertIsNot(x.foo, y.foo) def test_deepcopy_dict_subclass(self): class C(dict): def __init__(self, d=None): if not d: d = {} self._keys = list(d.keys()) super().__init__(d) def __setitem__(self, key, item): super().__setitem__(key, item) if key not in self._keys: self._keys.append(key) x = C(d={'foo':0}) y = copy.deepcopy(x) self.assertEqual(x, y) self.assertEqual(x._keys, y._keys) self.assertIsNot(x, y) x['bar'] = 1 self.assertNotEqual(x, y) self.assertNotEqual(x._keys, y._keys) def test_copy_list_subclass(self): class C(list): pass x = C([[1, 2], 3]) x.foo = [4, 5] y = copy.copy(x) self.assertEqual(list(x), list(y)) self.assertEqual(x.foo, y.foo) self.assertIs(x[0], y[0]) self.assertIs(x.foo, y.foo) def test_deepcopy_list_subclass(self): class C(list): pass x = C([[1, 2], 3]) x.foo = [4, 5] y = copy.deepcopy(x) self.assertEqual(list(x), list(y)) self.assertEqual(x.foo, y.foo) self.assertIsNot(x[0], y[0]) self.assertIsNot(x.foo, y.foo) def test_copy_tuple_subclass(self): class C(tuple): pass x = C([1, 2, 3]) self.assertEqual(tuple(x), (1, 2, 3)) y = copy.copy(x) self.assertEqual(tuple(y), (1, 2, 3)) def test_deepcopy_tuple_subclass(self): class C(tuple): pass x = C([[1, 2], 3]) self.assertEqual(tuple(x), ([1, 2], 3)) y = copy.deepcopy(x) self.assertEqual(tuple(y), ([1, 2], 3)) self.assertIsNot(x, y) self.assertIsNot(x[0], y[0]) def test_getstate_exc(self): class EvilState(object): def __getstate__(self): raise ValueError("ain't got no stickin' state") self.assertRaises(ValueError, copy.copy, EvilState()) def test_copy_function(self): self.assertEqual(copy.copy(global_foo), global_foo) def foo(x, y): return x+y self.assertEqual(copy.copy(foo), foo) bar = lambda: None self.assertEqual(copy.copy(bar), bar) def test_deepcopy_function(self): self.assertEqual(copy.deepcopy(global_foo), global_foo) def foo(x, y): return x+y self.assertEqual(copy.deepcopy(foo), foo) bar = lambda: None self.assertEqual(copy.deepcopy(bar), bar) def _check_weakref(self, _copy): class C(object): pass obj = C() x = weakref.ref(obj) y = _copy(x) self.assertIs(y, x) del obj y = _copy(x) self.assertIs(y, x) def test_copy_weakref(self): self._check_weakref(copy.copy) def test_deepcopy_weakref(self): self._check_weakref(copy.deepcopy) def _check_copy_weakdict(self, _dicttype): class C(object): pass a, b, c, d = [C() for i in range(4)] u = _dicttype() u[a] = b u[c] = d v = copy.copy(u) self.assertIsNot(v, u) self.assertEqual(v, u) self.assertEqual(v[a], b) self.assertEqual(v[c], d) self.assertEqual(len(v), 2) del c, d support.gc_collect() # For PyPy or other GCs. self.assertEqual(len(v), 1) x, y = C(), C() # The underlying containers are decoupled v[x] = y self.assertNotIn(x, u) def test_copy_weakkeydict(self): self._check_copy_weakdict(weakref.WeakKeyDictionary) def test_copy_weakvaluedict(self): self._check_copy_weakdict(weakref.WeakValueDictionary) def test_deepcopy_weakkeydict(self): class C(object): def __init__(self, i): self.i = i a, b, c, d = [C(i) for i in range(4)] u = weakref.WeakKeyDictionary() u[a] = b u[c] = d # Keys aren't copied, values are v = copy.deepcopy(u) self.assertNotEqual(v, u) self.assertEqual(len(v), 2) self.assertIsNot(v[a], b) self.assertIsNot(v[c], d) self.assertEqual(v[a].i, b.i) self.assertEqual(v[c].i, d.i) del c support.gc_collect() # For PyPy or other GCs. self.assertEqual(len(v), 1) def test_deepcopy_weakvaluedict(self): class C(object): def __init__(self, i): self.i = i a, b, c, d = [C(i) for i in range(4)] u = weakref.WeakValueDictionary() u[a] = b u[c] = d # Keys are copied, values aren't v = copy.deepcopy(u) self.assertNotEqual(v, u) self.assertEqual(len(v), 2) (x, y), (z, t) = sorted(v.items(), key=lambda pair: pair[0].i) self.assertIsNot(x, a) self.assertEqual(x.i, a.i) self.assertIs(y, b) self.assertIsNot(z, c) self.assertEqual(z.i, c.i) self.assertIs(t, d) del x, y, z, t del d support.gc_collect() # For PyPy or other GCs. self.assertEqual(len(v), 1) def test_deepcopy_bound_method(self): class Foo(object): def m(self): pass f = Foo() f.b = f.m g = copy.deepcopy(f) self.assertEqual(g.m, g.b) self.assertIs(g.b.__self__, g) g.b() def global_foo(x, y): return x+y if __name__ == "__main__": unittest.main()
bsd-3-clause
a3055748b0d3bdfca437b5e5fb4c6f4f
28.93134
76
0.488863
3.657375
false
true
false
false
brython-dev/brython
www/src/Lib/encodings/cp720.py
35
13995
"""Python Character Mapping Codec cp720 generated on Windows: Vista 6.0.6002 SP2 Multiprocessor Free with the command: python Tools/unicode/genwincodec.py 720 """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp720', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( '\x00' # 0x00 -> CONTROL CHARACTER '\x01' # 0x01 -> CONTROL CHARACTER '\x02' # 0x02 -> CONTROL CHARACTER '\x03' # 0x03 -> CONTROL CHARACTER '\x04' # 0x04 -> CONTROL CHARACTER '\x05' # 0x05 -> CONTROL CHARACTER '\x06' # 0x06 -> CONTROL CHARACTER '\x07' # 0x07 -> CONTROL CHARACTER '\x08' # 0x08 -> CONTROL CHARACTER '\t' # 0x09 -> CONTROL CHARACTER '\n' # 0x0A -> CONTROL CHARACTER '\x0b' # 0x0B -> CONTROL CHARACTER '\x0c' # 0x0C -> CONTROL CHARACTER '\r' # 0x0D -> CONTROL CHARACTER '\x0e' # 0x0E -> CONTROL CHARACTER '\x0f' # 0x0F -> CONTROL CHARACTER '\x10' # 0x10 -> CONTROL CHARACTER '\x11' # 0x11 -> CONTROL CHARACTER '\x12' # 0x12 -> CONTROL CHARACTER '\x13' # 0x13 -> CONTROL CHARACTER '\x14' # 0x14 -> CONTROL CHARACTER '\x15' # 0x15 -> CONTROL CHARACTER '\x16' # 0x16 -> CONTROL CHARACTER '\x17' # 0x17 -> CONTROL CHARACTER '\x18' # 0x18 -> CONTROL CHARACTER '\x19' # 0x19 -> CONTROL CHARACTER '\x1a' # 0x1A -> CONTROL CHARACTER '\x1b' # 0x1B -> CONTROL CHARACTER '\x1c' # 0x1C -> CONTROL CHARACTER '\x1d' # 0x1D -> CONTROL CHARACTER '\x1e' # 0x1E -> CONTROL CHARACTER '\x1f' # 0x1F -> CONTROL CHARACTER ' ' # 0x20 -> SPACE '!' # 0x21 -> EXCLAMATION MARK '"' # 0x22 -> QUOTATION MARK '#' # 0x23 -> NUMBER SIGN '$' # 0x24 -> DOLLAR SIGN '%' # 0x25 -> PERCENT SIGN '&' # 0x26 -> AMPERSAND "'" # 0x27 -> APOSTROPHE '(' # 0x28 -> LEFT PARENTHESIS ')' # 0x29 -> RIGHT PARENTHESIS '*' # 0x2A -> ASTERISK '+' # 0x2B -> PLUS SIGN ',' # 0x2C -> COMMA '-' # 0x2D -> HYPHEN-MINUS '.' # 0x2E -> FULL STOP '/' # 0x2F -> SOLIDUS '0' # 0x30 -> DIGIT ZERO '1' # 0x31 -> DIGIT ONE '2' # 0x32 -> DIGIT TWO '3' # 0x33 -> DIGIT THREE '4' # 0x34 -> DIGIT FOUR '5' # 0x35 -> DIGIT FIVE '6' # 0x36 -> DIGIT SIX '7' # 0x37 -> DIGIT SEVEN '8' # 0x38 -> DIGIT EIGHT '9' # 0x39 -> DIGIT NINE ':' # 0x3A -> COLON ';' # 0x3B -> SEMICOLON '<' # 0x3C -> LESS-THAN SIGN '=' # 0x3D -> EQUALS SIGN '>' # 0x3E -> GREATER-THAN SIGN '?' # 0x3F -> QUESTION MARK '@' # 0x40 -> COMMERCIAL AT 'A' # 0x41 -> LATIN CAPITAL LETTER A 'B' # 0x42 -> LATIN CAPITAL LETTER B 'C' # 0x43 -> LATIN CAPITAL LETTER C 'D' # 0x44 -> LATIN CAPITAL LETTER D 'E' # 0x45 -> LATIN CAPITAL LETTER E 'F' # 0x46 -> LATIN CAPITAL LETTER F 'G' # 0x47 -> LATIN CAPITAL LETTER G 'H' # 0x48 -> LATIN CAPITAL LETTER H 'I' # 0x49 -> LATIN CAPITAL LETTER I 'J' # 0x4A -> LATIN CAPITAL LETTER J 'K' # 0x4B -> LATIN CAPITAL LETTER K 'L' # 0x4C -> LATIN CAPITAL LETTER L 'M' # 0x4D -> LATIN CAPITAL LETTER M 'N' # 0x4E -> LATIN CAPITAL LETTER N 'O' # 0x4F -> LATIN CAPITAL LETTER O 'P' # 0x50 -> LATIN CAPITAL LETTER P 'Q' # 0x51 -> LATIN CAPITAL LETTER Q 'R' # 0x52 -> LATIN CAPITAL LETTER R 'S' # 0x53 -> LATIN CAPITAL LETTER S 'T' # 0x54 -> LATIN CAPITAL LETTER T 'U' # 0x55 -> LATIN CAPITAL LETTER U 'V' # 0x56 -> LATIN CAPITAL LETTER V 'W' # 0x57 -> LATIN CAPITAL LETTER W 'X' # 0x58 -> LATIN CAPITAL LETTER X 'Y' # 0x59 -> LATIN CAPITAL LETTER Y 'Z' # 0x5A -> LATIN CAPITAL LETTER Z '[' # 0x5B -> LEFT SQUARE BRACKET '\\' # 0x5C -> REVERSE SOLIDUS ']' # 0x5D -> RIGHT SQUARE BRACKET '^' # 0x5E -> CIRCUMFLEX ACCENT '_' # 0x5F -> LOW LINE '`' # 0x60 -> GRAVE ACCENT 'a' # 0x61 -> LATIN SMALL LETTER A 'b' # 0x62 -> LATIN SMALL LETTER B 'c' # 0x63 -> LATIN SMALL LETTER C 'd' # 0x64 -> LATIN SMALL LETTER D 'e' # 0x65 -> LATIN SMALL LETTER E 'f' # 0x66 -> LATIN SMALL LETTER F 'g' # 0x67 -> LATIN SMALL LETTER G 'h' # 0x68 -> LATIN SMALL LETTER H 'i' # 0x69 -> LATIN SMALL LETTER I 'j' # 0x6A -> LATIN SMALL LETTER J 'k' # 0x6B -> LATIN SMALL LETTER K 'l' # 0x6C -> LATIN SMALL LETTER L 'm' # 0x6D -> LATIN SMALL LETTER M 'n' # 0x6E -> LATIN SMALL LETTER N 'o' # 0x6F -> LATIN SMALL LETTER O 'p' # 0x70 -> LATIN SMALL LETTER P 'q' # 0x71 -> LATIN SMALL LETTER Q 'r' # 0x72 -> LATIN SMALL LETTER R 's' # 0x73 -> LATIN SMALL LETTER S 't' # 0x74 -> LATIN SMALL LETTER T 'u' # 0x75 -> LATIN SMALL LETTER U 'v' # 0x76 -> LATIN SMALL LETTER V 'w' # 0x77 -> LATIN SMALL LETTER W 'x' # 0x78 -> LATIN SMALL LETTER X 'y' # 0x79 -> LATIN SMALL LETTER Y 'z' # 0x7A -> LATIN SMALL LETTER Z '{' # 0x7B -> LEFT CURLY BRACKET '|' # 0x7C -> VERTICAL LINE '}' # 0x7D -> RIGHT CURLY BRACKET '~' # 0x7E -> TILDE '\x7f' # 0x7F -> CONTROL CHARACTER '\x80' '\x81' '\xe9' # 0x82 -> LATIN SMALL LETTER E WITH ACUTE '\xe2' # 0x83 -> LATIN SMALL LETTER A WITH CIRCUMFLEX '\x84' '\xe0' # 0x85 -> LATIN SMALL LETTER A WITH GRAVE '\x86' '\xe7' # 0x87 -> LATIN SMALL LETTER C WITH CEDILLA '\xea' # 0x88 -> LATIN SMALL LETTER E WITH CIRCUMFLEX '\xeb' # 0x89 -> LATIN SMALL LETTER E WITH DIAERESIS '\xe8' # 0x8A -> LATIN SMALL LETTER E WITH GRAVE '\xef' # 0x8B -> LATIN SMALL LETTER I WITH DIAERESIS '\xee' # 0x8C -> LATIN SMALL LETTER I WITH CIRCUMFLEX '\x8d' '\x8e' '\x8f' '\x90' '\u0651' # 0x91 -> ARABIC SHADDA '\u0652' # 0x92 -> ARABIC SUKUN '\xf4' # 0x93 -> LATIN SMALL LETTER O WITH CIRCUMFLEX '\xa4' # 0x94 -> CURRENCY SIGN '\u0640' # 0x95 -> ARABIC TATWEEL '\xfb' # 0x96 -> LATIN SMALL LETTER U WITH CIRCUMFLEX '\xf9' # 0x97 -> LATIN SMALL LETTER U WITH GRAVE '\u0621' # 0x98 -> ARABIC LETTER HAMZA '\u0622' # 0x99 -> ARABIC LETTER ALEF WITH MADDA ABOVE '\u0623' # 0x9A -> ARABIC LETTER ALEF WITH HAMZA ABOVE '\u0624' # 0x9B -> ARABIC LETTER WAW WITH HAMZA ABOVE '\xa3' # 0x9C -> POUND SIGN '\u0625' # 0x9D -> ARABIC LETTER ALEF WITH HAMZA BELOW '\u0626' # 0x9E -> ARABIC LETTER YEH WITH HAMZA ABOVE '\u0627' # 0x9F -> ARABIC LETTER ALEF '\u0628' # 0xA0 -> ARABIC LETTER BEH '\u0629' # 0xA1 -> ARABIC LETTER TEH MARBUTA '\u062a' # 0xA2 -> ARABIC LETTER TEH '\u062b' # 0xA3 -> ARABIC LETTER THEH '\u062c' # 0xA4 -> ARABIC LETTER JEEM '\u062d' # 0xA5 -> ARABIC LETTER HAH '\u062e' # 0xA6 -> ARABIC LETTER KHAH '\u062f' # 0xA7 -> ARABIC LETTER DAL '\u0630' # 0xA8 -> ARABIC LETTER THAL '\u0631' # 0xA9 -> ARABIC LETTER REH '\u0632' # 0xAA -> ARABIC LETTER ZAIN '\u0633' # 0xAB -> ARABIC LETTER SEEN '\u0634' # 0xAC -> ARABIC LETTER SHEEN '\u0635' # 0xAD -> ARABIC LETTER SAD '\xab' # 0xAE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK '\xbb' # 0xAF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK '\u2591' # 0xB0 -> LIGHT SHADE '\u2592' # 0xB1 -> MEDIUM SHADE '\u2593' # 0xB2 -> DARK SHADE '\u2502' # 0xB3 -> BOX DRAWINGS LIGHT VERTICAL '\u2524' # 0xB4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT '\u2561' # 0xB5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE '\u2562' # 0xB6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE '\u2556' # 0xB7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE '\u2555' # 0xB8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE '\u2563' # 0xB9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT '\u2551' # 0xBA -> BOX DRAWINGS DOUBLE VERTICAL '\u2557' # 0xBB -> BOX DRAWINGS DOUBLE DOWN AND LEFT '\u255d' # 0xBC -> BOX DRAWINGS DOUBLE UP AND LEFT '\u255c' # 0xBD -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE '\u255b' # 0xBE -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE '\u2510' # 0xBF -> BOX DRAWINGS LIGHT DOWN AND LEFT '\u2514' # 0xC0 -> BOX DRAWINGS LIGHT UP AND RIGHT '\u2534' # 0xC1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL '\u252c' # 0xC2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL '\u251c' # 0xC3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT '\u2500' # 0xC4 -> BOX DRAWINGS LIGHT HORIZONTAL '\u253c' # 0xC5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL '\u255e' # 0xC6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE '\u255f' # 0xC7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE '\u255a' # 0xC8 -> BOX DRAWINGS DOUBLE UP AND RIGHT '\u2554' # 0xC9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT '\u2569' # 0xCA -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL '\u2566' # 0xCB -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL '\u2560' # 0xCC -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT '\u2550' # 0xCD -> BOX DRAWINGS DOUBLE HORIZONTAL '\u256c' # 0xCE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL '\u2567' # 0xCF -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE '\u2568' # 0xD0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE '\u2564' # 0xD1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE '\u2565' # 0xD2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE '\u2559' # 0xD3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE '\u2558' # 0xD4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE '\u2552' # 0xD5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE '\u2553' # 0xD6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE '\u256b' # 0xD7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE '\u256a' # 0xD8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE '\u2518' # 0xD9 -> BOX DRAWINGS LIGHT UP AND LEFT '\u250c' # 0xDA -> BOX DRAWINGS LIGHT DOWN AND RIGHT '\u2588' # 0xDB -> FULL BLOCK '\u2584' # 0xDC -> LOWER HALF BLOCK '\u258c' # 0xDD -> LEFT HALF BLOCK '\u2590' # 0xDE -> RIGHT HALF BLOCK '\u2580' # 0xDF -> UPPER HALF BLOCK '\u0636' # 0xE0 -> ARABIC LETTER DAD '\u0637' # 0xE1 -> ARABIC LETTER TAH '\u0638' # 0xE2 -> ARABIC LETTER ZAH '\u0639' # 0xE3 -> ARABIC LETTER AIN '\u063a' # 0xE4 -> ARABIC LETTER GHAIN '\u0641' # 0xE5 -> ARABIC LETTER FEH '\xb5' # 0xE6 -> MICRO SIGN '\u0642' # 0xE7 -> ARABIC LETTER QAF '\u0643' # 0xE8 -> ARABIC LETTER KAF '\u0644' # 0xE9 -> ARABIC LETTER LAM '\u0645' # 0xEA -> ARABIC LETTER MEEM '\u0646' # 0xEB -> ARABIC LETTER NOON '\u0647' # 0xEC -> ARABIC LETTER HEH '\u0648' # 0xED -> ARABIC LETTER WAW '\u0649' # 0xEE -> ARABIC LETTER ALEF MAKSURA '\u064a' # 0xEF -> ARABIC LETTER YEH '\u2261' # 0xF0 -> IDENTICAL TO '\u064b' # 0xF1 -> ARABIC FATHATAN '\u064c' # 0xF2 -> ARABIC DAMMATAN '\u064d' # 0xF3 -> ARABIC KASRATAN '\u064e' # 0xF4 -> ARABIC FATHA '\u064f' # 0xF5 -> ARABIC DAMMA '\u0650' # 0xF6 -> ARABIC KASRA '\u2248' # 0xF7 -> ALMOST EQUAL TO '\xb0' # 0xF8 -> DEGREE SIGN '\u2219' # 0xF9 -> BULLET OPERATOR '\xb7' # 0xFA -> MIDDLE DOT '\u221a' # 0xFB -> SQUARE ROOT '\u207f' # 0xFC -> SUPERSCRIPT LATIN SMALL LETTER N '\xb2' # 0xFD -> SUPERSCRIPT TWO '\u25a0' # 0xFE -> BLACK SQUARE '\xa0' # 0xFF -> NO-BREAK SPACE ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
bsd-3-clause
d3d44a20db0c119e911431142bf92aa0
43.291262
77
0.526902
3.265282
false
false
false
false
brython-dev/brython
www/src/Lib/test/test__osx_support.py
3
13947
""" Test suite for _osx_support: shared OS X support functions. """ import os import platform import stat import sys import unittest from test.support import os_helper import _osx_support @unittest.skipUnless(sys.platform.startswith("darwin"), "requires OS X") class Test_OSXSupport(unittest.TestCase): def setUp(self): self.maxDiff = None self.prog_name = 'bogus_program_xxxx' self.temp_path_dir = os.path.abspath(os.getcwd()) self.env = self.enterContext(os_helper.EnvironmentVarGuard()) for cv in ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS', 'BLDSHARED', 'LDSHARED', 'CC', 'CXX', 'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS', 'PY_CORE_CFLAGS', 'PY_CORE_LDFLAGS'): if cv in self.env: self.env.unset(cv) def add_expected_saved_initial_values(self, config_vars, expected_vars): # Ensure that the initial values for all modified config vars # are also saved with modified keys. expected_vars.update(('_OSX_SUPPORT_INITIAL_'+ k, config_vars[k]) for k in config_vars if config_vars[k] != expected_vars[k]) def test__find_executable(self): if self.env['PATH']: self.env['PATH'] = self.env['PATH'] + ':' self.env['PATH'] = self.env['PATH'] + os.path.abspath(self.temp_path_dir) os_helper.unlink(self.prog_name) self.assertIsNone(_osx_support._find_executable(self.prog_name)) self.addCleanup(os_helper.unlink, self.prog_name) with open(self.prog_name, 'w') as f: f.write("#!/bin/sh\n/bin/echo OK\n") os.chmod(self.prog_name, stat.S_IRWXU) self.assertEqual(self.prog_name, _osx_support._find_executable(self.prog_name)) def test__read_output(self): if self.env['PATH']: self.env['PATH'] = self.env['PATH'] + ':' self.env['PATH'] = self.env['PATH'] + os.path.abspath(self.temp_path_dir) os_helper.unlink(self.prog_name) self.addCleanup(os_helper.unlink, self.prog_name) with open(self.prog_name, 'w') as f: f.write("#!/bin/sh\n/bin/echo ExpectedOutput\n") os.chmod(self.prog_name, stat.S_IRWXU) self.assertEqual('ExpectedOutput', _osx_support._read_output(self.prog_name)) def test__find_build_tool(self): out = _osx_support._find_build_tool('cc') self.assertTrue(os.path.isfile(out), 'cc not found - check xcode-select') def test__get_system_version(self): self.assertTrue(platform.mac_ver()[0].startswith( _osx_support._get_system_version())) def test__remove_original_values(self): config_vars = { 'CC': 'gcc-test -pthreads', } expected_vars = { 'CC': 'clang -pthreads', } cv = 'CC' newvalue = 'clang -pthreads' _osx_support._save_modified_value(config_vars, cv, newvalue) self.assertNotEqual(expected_vars, config_vars) _osx_support._remove_original_values(config_vars) self.assertEqual(expected_vars, config_vars) def test__save_modified_value(self): config_vars = { 'CC': 'gcc-test -pthreads', } expected_vars = { 'CC': 'clang -pthreads', } self.add_expected_saved_initial_values(config_vars, expected_vars) cv = 'CC' newvalue = 'clang -pthreads' _osx_support._save_modified_value(config_vars, cv, newvalue) self.assertEqual(expected_vars, config_vars) def test__save_modified_value_unchanged(self): config_vars = { 'CC': 'gcc-test -pthreads', } expected_vars = config_vars.copy() cv = 'CC' newvalue = 'gcc-test -pthreads' _osx_support._save_modified_value(config_vars, cv, newvalue) self.assertEqual(expected_vars, config_vars) def test__supports_universal_builds(self): import platform mac_ver_tuple = tuple(int(i) for i in platform.mac_ver()[0].split('.')[0:2]) self.assertEqual(mac_ver_tuple >= (10, 4), _osx_support._supports_universal_builds()) def test__find_appropriate_compiler(self): compilers = ( ('gcc-test', 'i686-apple-darwin11-llvm-gcc-4.2'), ('clang', 'clang version 3.1'), ) config_vars = { 'CC': 'gcc-test -pthreads', 'CXX': 'cc++-test', 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ', 'LDFLAGS': '-arch ppc -arch i386 -g', 'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk', 'BLDSHARED': 'gcc-test -bundle -arch ppc -arch i386 -g', 'LDSHARED': 'gcc-test -bundle -arch ppc -arch i386 ' '-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g', } expected_vars = { 'CC': 'clang -pthreads', 'CXX': 'clang++', 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ', 'LDFLAGS': '-arch ppc -arch i386 -g', 'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk', 'BLDSHARED': 'clang -bundle -arch ppc -arch i386 -g', 'LDSHARED': 'clang -bundle -arch ppc -arch i386 ' '-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g', } self.add_expected_saved_initial_values(config_vars, expected_vars) suffix = (':' + self.env['PATH']) if self.env['PATH'] else '' self.env['PATH'] = os.path.abspath(self.temp_path_dir) + suffix for c_name, c_output in compilers: os_helper.unlink(c_name) self.addCleanup(os_helper.unlink, c_name) with open(c_name, 'w') as f: f.write("#!/bin/sh\n/bin/echo " + c_output) os.chmod(c_name, stat.S_IRWXU) self.assertEqual(expected_vars, _osx_support._find_appropriate_compiler( config_vars)) def test__remove_universal_flags(self): config_vars = { 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ', 'LDFLAGS': '-arch ppc -arch i386 -g', 'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk', 'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g', 'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 ' '-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g', } expected_vars = { 'CFLAGS': '-fno-strict-aliasing -g -O3 ', 'LDFLAGS': ' -g', 'CPPFLAGS': '-I. ', 'BLDSHARED': 'gcc-4.0 -bundle -g', 'LDSHARED': 'gcc-4.0 -bundle -g', } self.add_expected_saved_initial_values(config_vars, expected_vars) self.assertEqual(expected_vars, _osx_support._remove_universal_flags( config_vars)) def test__remove_universal_flags_alternate(self): # bpo-38360: also test the alternate single-argument form of -isysroot config_vars = { 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ', 'LDFLAGS': '-arch ppc -arch i386 -g', 'CPPFLAGS': '-I. -isysroot/Developer/SDKs/MacOSX10.4u.sdk', 'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g', 'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 ' '-isysroot/Developer/SDKs/MacOSX10.4u.sdk -g', } expected_vars = { 'CFLAGS': '-fno-strict-aliasing -g -O3 ', 'LDFLAGS': ' -g', 'CPPFLAGS': '-I. ', 'BLDSHARED': 'gcc-4.0 -bundle -g', 'LDSHARED': 'gcc-4.0 -bundle -g', } self.add_expected_saved_initial_values(config_vars, expected_vars) self.assertEqual(expected_vars, _osx_support._remove_universal_flags( config_vars)) def test__remove_unsupported_archs(self): config_vars = { 'CC': 'clang', 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ', 'LDFLAGS': '-arch ppc -arch i386 -g', 'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk', 'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g', 'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 ' '-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g', } expected_vars = { 'CC': 'clang', 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch i386 ', 'LDFLAGS': ' -arch i386 -g', 'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk', 'BLDSHARED': 'gcc-4.0 -bundle -arch i386 -g', 'LDSHARED': 'gcc-4.0 -bundle -arch i386 ' '-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g', } self.add_expected_saved_initial_values(config_vars, expected_vars) suffix = (':' + self.env['PATH']) if self.env['PATH'] else '' self.env['PATH'] = os.path.abspath(self.temp_path_dir) + suffix c_name = 'clang' os_helper.unlink(c_name) self.addCleanup(os_helper.unlink, c_name) # exit status 255 means no PPC support in this compiler chain with open(c_name, 'w') as f: f.write("#!/bin/sh\nexit 255") os.chmod(c_name, stat.S_IRWXU) self.assertEqual(expected_vars, _osx_support._remove_unsupported_archs( config_vars)) def test__override_all_archs(self): self.env['ARCHFLAGS'] = '-arch x86_64' config_vars = { 'CC': 'clang', 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ', 'LDFLAGS': '-arch ppc -arch i386 -g', 'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk', 'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g', 'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 ' '-isysroot /Developer/SDKs/MacOSX10.4u.sdk -g', } expected_vars = { 'CC': 'clang', 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch x86_64', 'LDFLAGS': ' -g -arch x86_64', 'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.4u.sdk', 'BLDSHARED': 'gcc-4.0 -bundle -g -arch x86_64', 'LDSHARED': 'gcc-4.0 -bundle -isysroot ' '/Developer/SDKs/MacOSX10.4u.sdk -g -arch x86_64', } self.add_expected_saved_initial_values(config_vars, expected_vars) self.assertEqual(expected_vars, _osx_support._override_all_archs( config_vars)) def test__check_for_unavailable_sdk(self): config_vars = { 'CC': 'clang', 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ' '-isysroot /Developer/SDKs/MacOSX10.1.sdk', 'LDFLAGS': '-arch ppc -arch i386 -g', 'CPPFLAGS': '-I. -isysroot /Developer/SDKs/MacOSX10.1.sdk', 'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g', 'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 ' '-isysroot /Developer/SDKs/MacOSX10.1.sdk -g', } expected_vars = { 'CC': 'clang', 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ' ' ', 'LDFLAGS': '-arch ppc -arch i386 -g', 'CPPFLAGS': '-I. ', 'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g', 'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 ' ' -g', } self.add_expected_saved_initial_values(config_vars, expected_vars) self.assertEqual(expected_vars, _osx_support._check_for_unavailable_sdk( config_vars)) def test__check_for_unavailable_sdk_alternate(self): # bpo-38360: also test the alternate single-argument form of -isysroot config_vars = { 'CC': 'clang', 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ' '-isysroot/Developer/SDKs/MacOSX10.1.sdk', 'LDFLAGS': '-arch ppc -arch i386 -g', 'CPPFLAGS': '-I. -isysroot/Developer/SDKs/MacOSX10.1.sdk', 'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g', 'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 ' '-isysroot/Developer/SDKs/MacOSX10.1.sdk -g', } expected_vars = { 'CC': 'clang', 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ' ' ', 'LDFLAGS': '-arch ppc -arch i386 -g', 'CPPFLAGS': '-I. ', 'BLDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 -g', 'LDSHARED': 'gcc-4.0 -bundle -arch ppc -arch i386 ' ' -g', } self.add_expected_saved_initial_values(config_vars, expected_vars) self.assertEqual(expected_vars, _osx_support._check_for_unavailable_sdk( config_vars)) def test_get_platform_osx(self): # Note, get_platform_osx is currently tested more extensively # indirectly by test_sysconfig and test_distutils config_vars = { 'CFLAGS': '-fno-strict-aliasing -g -O3 -arch ppc -arch i386 ' '-isysroot /Developer/SDKs/MacOSX10.1.sdk', 'MACOSX_DEPLOYMENT_TARGET': '10.6', } result = _osx_support.get_platform_osx(config_vars, ' ', ' ', ' ') self.assertEqual(('macosx', '10.6', 'fat'), result) if __name__ == "__main__": unittest.main()
bsd-3-clause
6e7045c2b326dc53cabcca033c584d3c
41.782209
81
0.533663
3.325465
false
true
false
false
brython-dev/brython
www/src/Lib/itertools.py
10
15909
import operator class accumulate: def __init__(self, iterable, func = operator.add): self.it = iter(iterable) self._total = None self.func = func def __iter__(self): return self def __next__(self): if not self._total: self._total = next(self.it) return self._total else: element = next(self.it) try: self._total = self.func(self._total, element) except: raise TypeError("unsupported operand type") return self._total ## Adapted from: ## https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-34 class chain: def __init__(self, *iterables): self._iterables_iter = iter(map(iter, iterables)) # little trick for the first chain.__next__() call self._cur_iterable_iter = iter([]) def __iter__(self): return self def __next__(self): while True: try: return next(self._cur_iterable_iter) except StopIteration: self._cur_iterable_iter = next(self._iterables_iter) @classmethod def from_iterable(cls, iterable): for it in iterable: for element in it: yield element class combinations: def __init__(self, iterable, r): self.pool = tuple(iterable) self.n = len(self.pool) self.r = r self.indices = list(range(self.r)) self.zero = False def __iter__(self): return self def __next__(self): if self.r > self.n: raise StopIteration if not self.zero: self.zero = True return tuple(self.pool[i] for i in self.indices) else: try: for i in reversed(range(self.r)): if self.indices[i] != i + self.n - self.r: break self.indices[i] += 1 for j in range(i+1, self.r): self.indices[j] = self.indices[j-1] + 1 return tuple(self.pool[i] for i in self.indices) except: raise StopIteration class combinations_with_replacement: def __init__(self, iterable, r): self.pool = tuple(iterable) self.n = len(self.pool) self.r = r self.indices = [0] * self.r self.zero = False def __iter__(self): return self def __next__(self): if not self.n and self.r: raise StopIteration if not self.zero: self.zero = True return tuple(self.pool[i] for i in self.indices) else: try: for i in reversed(range(self.r)): if self.indices[i] != self.n - 1: break self.indices[i:] = [self.indices[i] + 1] * (self.r - i) return tuple(self.pool[i] for i in self.indices) except: raise StopIteration ## Literally copied from ##https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-63 class compress: def __init__(self, data, selectors): self.data = iter(data) self.selectors = iter(selectors) def __iter__(self): return self def __next__(self): while True: next_item = next(self.data) next_selector = next(self.selectors) if bool(next_selector): return next_item ## Adapted from ##https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-79 ## I mimicked the > python3.1 behavior class count: """ Input is an int or a float. The original Python 3 implementation includes also complex numbers... but it still is not implemented in Brython as complex type is NotImplemented """ def __init__(self, start = 0, step = 1): if not isinstance(start, (int, float)): raise TypeError('a number is required') self.times = start - step self.step = step def __iter__(self): return self def __next__(self): self.times += self.step return self.times def __repr__(self): return 'count(%d)' % (self.times + self.step) ## Literally copied from ##https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-112 class cycle: def __init__(self, iterable): self._cur_iter = iter(iterable) self._saved = [] self._must_save = True def __iter__(self): return self def __next__(self): try: next_elt = next(self._cur_iter) if self._must_save: self._saved.append(next_elt) except StopIteration: self._cur_iter = iter(self._saved) next_elt = next(self._cur_iter) self._must_save = False return next_elt ## Literally copied from ##https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-149 class dropwhile: def __init__(self, predicate, iterable): self._predicate = predicate self._iter = iter(iterable) self._dropped = False def __iter__(self): return self def __next__(self): value = next(self._iter) if self._dropped: return value while self._predicate(value): value = next(self._iter) self._dropped = True return value ## Adapted from ##https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-261 class filterfalse: def __init__(self, predicate, iterable): # Make sure iterable *IS* iterable self._iter = iter(iterable) if predicate is None: self._predicate = bool else: self._predicate = predicate def __iter__(self): return self def __next__(self): next_elt = next(self._iter) while True: if not self._predicate(next_elt): return next_elt next_elt = next(self._iter) class groupby: # [k for k, g in groupby('AAAABBBCCDAABBB')] --> A B C D A B # [list(g) for k, g in groupby('AAAABBBCCD')] --> AAAA BBB CC D def __init__(self, iterable, key=None): if key is None: key = lambda x: x self.keyfunc = key self.it = iter(iterable) self.tgtkey = self.currkey = self.currvalue = object() def __iter__(self): return self def __next__(self): while self.currkey == self.tgtkey: self.currvalue = next(self.it) # Exit on StopIteration self.currkey = self.keyfunc(self.currvalue) self.tgtkey = self.currkey return (self.currkey, self._grouper(self.tgtkey)) def _grouper(self, tgtkey): while self.currkey == tgtkey: yield self.currvalue self.currvalue = next(self.it) # Exit on StopIteration self.currkey = self.keyfunc(self.currvalue) ## adapted from ##https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-323 class islice: def __init__(self, iterable, *args): s = slice(*args) self.start, self.stop, self.step = s.start or 0, s.stop, s.step if not isinstance(self.start, int): raise ValueError("Start argument must be an integer") if self.stop != None and not isinstance(self.stop, int): raise ValueError("Stop argument must be an integer or None") if self.step is None: self.step = 1 if self.start<0 or (self.stop != None and self.stop<0 ) or self.step<=0: raise ValueError("indices for islice() must be positive") self.it = iter(iterable) self.donext = None self.cnt = 0 def __iter__(self): return self def __next__(self): nextindex = self.start if self.stop != None and nextindex >= self.stop: raise StopIteration while self.cnt <= nextindex: nextitem = next(self.it) self.cnt += 1 self.start += self.step return nextitem class permutations: def __init__(self, iterable, r = None): self.pool = tuple(iterable) self.n = len(self.pool) self.r = self.n if r is None else r self.indices = list(range(self.n)) self.cycles = list(range(self.n, self.n - self.r, -1)) self.zero = False self.stop = False def __iter__(self): return self def __next__(self): indices = self.indices if self.r > self.n: raise StopIteration if not self.zero: self.zero = True return tuple(self.pool[i] for i in indices[:self.r]) i = self.r - 1 while i >= 0: j = self.cycles[i] - 1 if j > 0: self.cycles[i] = j indices[i], indices[-j] = indices[-j], indices[i] return tuple(self.pool[i] for i in indices[:self.r]) self.cycles[i] = len(indices) - i n1 = len(indices) - 1 assert n1 >= 0 num = indices[i] for k in range(i, n1): indices[k] = indices[k+1] indices[n1] = num i -= 1 raise StopIteration # copied from Python documentation on itertools.product def product(*args, repeat=1): # product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy # product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111 pools = [tuple(pool) for pool in args] * repeat result = [[]] for pool in pools: result = [x+[y] for x in result for y in pool] for prod in result: yield tuple(prod) ## adapted from ##https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-392 ## (Brython) ## renamed to _product : the implementation fails for product('abc', []) ## with CPython 3.x class _product: def __init__(self, *args, **kw): if len(kw) > 1: raise TypeError("product() takes at most 1 argument (%d given)" % len(kw)) self.repeat = kw.get('repeat', 1) if not isinstance(self.repeat, int): raise TypeError("integer argument expected, got %s" % type(self.repeat)) self.gears = [x for x in args] * self.repeat self.num_gears = len(self.gears) # initialization of indicies to loop over self.indicies = [(0, len(self.gears[x])) for x in range(0, self.num_gears)] self.cont = True self.zero = False def roll_gears(self): # Starting from the end of the gear indicies work to the front # incrementing the gear until the limit is reached. When the limit # is reached carry operation to the next gear should_carry = True for n in range(0, self.num_gears): nth_gear = self.num_gears - n - 1 if should_carry: count, lim = self.indicies[nth_gear] count += 1 if count == lim and nth_gear == 0: self.cont = False if count == lim: should_carry = True count = 0 else: should_carry = False self.indicies[nth_gear] = (count, lim) else: break def __iter__(self): return self def __next__(self): if self.zero: raise StopIteration if self.repeat > 0: if not self.cont: raise StopIteration l = [] for x in range(0, self.num_gears): index, limit = self.indicies[x] print('itertools 353',self.gears,x,index) l.append(self.gears[x][index]) self.roll_gears() return tuple(l) elif self.repeat == 0: self.zero = True return () else: raise ValueError("repeat argument cannot be negative") ## Literally copied from ##https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-441 class repeat: def __init__(self, obj, times=None): self._obj = obj if times is not None: range(times) # Raise a TypeError if times < 0: times = 0 self._times = times def __iter__(self): return self def __next__(self): # __next__() *need* to decrement self._times when consumed if self._times is not None: if self._times <= 0: raise StopIteration() self._times -= 1 return self._obj def __repr__(self): if self._times is not None: return 'repeat(%r, %r)' % (self._obj, self._times) else: return 'repeat(%r)' % (self._obj,) def __len__(self): if self._times == -1 or self._times is None: raise TypeError("len() of uniszed object") return self._times ## Adapted from ##https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-489 class starmap(object): def __init__(self, function, iterable): self._func = function self._iter = iter(iterable) def __iter__(self): return self def __next__(self): t = next(self._iter) return self._func(*t) ## Literally copied from ##https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-520 class takewhile(object): def __init__(self, predicate, iterable): self._predicate = predicate self._iter = iter(iterable) def __iter__(self): return self def __next__(self): value = next(self._iter) if not self._predicate(value): raise StopIteration() return value ## Almost literal from ##https://bitbucket.org/pypy/pypy/src/c1aa74c06e86/lib_pypy/itertools.py#cl-547 class TeeData(object): def __init__(self, iterator): self.data = [] self._iter = iterator def __getitem__(self, i): # iterates until 'i' if not done yet while i>= len(self.data): self.data.append(next(self._iter)) return self.data[i] class TeeObject(object): def __init__(self, iterable=None, tee_data=None): if tee_data: self.tee_data = tee_data self.pos = 0 # <=> Copy constructor elif isinstance(iterable, TeeObject): self.tee_data = iterable.tee_data self.pos = iterable.pos else: self.tee_data = TeeData(iter(iterable)) self.pos = 0 def __next__(self): data = self.tee_data[self.pos] self.pos += 1 return data def __iter__(self): return self def tee(iterable, n=2): if isinstance(iterable, TeeObject): return tuple([iterable] + [TeeObject(tee_data=iterable.tee_data) for i in range(n - 1)]) tee_data = TeeData(iter(iterable)) return tuple([TeeObject(tee_data=tee_data) for i in range(n)]) class zip_longest: def __init__(self, *args, fillvalue = None): self.args = [iter(arg) for arg in args] self.fillvalue = fillvalue self.units = len(args) def __iter__(self): return self def __next__(self): temp = [] nb = 0 for i in range(self.units): try: temp.append(next(self.args[i])) nb += 1 except StopIteration: temp.append(self.fillvalue) if nb==0: raise StopIteration return tuple(temp)
bsd-3-clause
77184593e8361758ae2e12285ba1b7cd
30.50297
87
0.533534
3.858598
false
false
false
false
brython-dev/brython
www/benchmarks/performance/bm_nbody.py
18
4440
#!/usr/bin/env python """N-body benchmark from the Computer Language Benchmarks Game. This is intended to support Unladen Swallow's perf.py. Accordingly, it has been modified from the Shootout version: - Accept standard Unladen Swallow benchmark options. - Run report_energy()/advance() in a loop. This currently requires Python 2.6 because of itertools.combinations(). """ # Pulled from http://shootout.alioth.debian.org/u64q/benchmark.php?test=nbody&lang=python&id=4 # Contributed by Kevin Carson. # Modified by Tupteq, Fredrik Johansson, and Daniel Nanz. __contact__ = "collinwinter@google.com (Collin Winter)" # Python imports import itertools #import optparse import sys import time # Local imports import util PI = 3.14159265358979323 SOLAR_MASS = 4 * PI * PI DAYS_PER_YEAR = 365.24 BODIES = { 'sun': ([0.0, 0.0, 0.0], [0.0, 0.0, 0.0], SOLAR_MASS), 'jupiter': ([4.84143144246472090e+00, -1.16032004402742839e+00, -1.03622044471123109e-01], [1.66007664274403694e-03 * DAYS_PER_YEAR, 7.69901118419740425e-03 * DAYS_PER_YEAR, -6.90460016972063023e-05 * DAYS_PER_YEAR], 9.54791938424326609e-04 * SOLAR_MASS), 'saturn': ([8.34336671824457987e+00, 4.12479856412430479e+00, -4.03523417114321381e-01], [-2.76742510726862411e-03 * DAYS_PER_YEAR, 4.99852801234917238e-03 * DAYS_PER_YEAR, 2.30417297573763929e-05 * DAYS_PER_YEAR], 2.85885980666130812e-04 * SOLAR_MASS), 'uranus': ([1.28943695621391310e+01, -1.51111514016986312e+01, -2.23307578892655734e-01], [2.96460137564761618e-03 * DAYS_PER_YEAR, 2.37847173959480950e-03 * DAYS_PER_YEAR, -2.96589568540237556e-05 * DAYS_PER_YEAR], 4.36624404335156298e-05 * SOLAR_MASS), 'neptune': ([1.53796971148509165e+01, -2.59193146099879641e+01, 1.79258772950371181e-01], [2.68067772490389322e-03 * DAYS_PER_YEAR, 1.62824170038242295e-03 * DAYS_PER_YEAR, -9.51592254519715870e-05 * DAYS_PER_YEAR], 5.15138902046611451e-05 * SOLAR_MASS) } SYSTEM = BODIES.values() PAIRS = list(itertools.combinations(SYSTEM, 2)) def advance(dt, n, bodies=SYSTEM, pairs=PAIRS): for i in range(n): for (([x1, y1, z1], v1, m1), ([x2, y2, z2], v2, m2)) in pairs: dx = x1 - x2 dy = y1 - y2 dz = z1 - z2 mag = dt * ((dx * dx + dy * dy + dz * dz) ** (-1.5)) b1m = m1 * mag b2m = m2 * mag v1[0] -= dx * b2m v1[1] -= dy * b2m v1[2] -= dz * b2m v2[0] += dx * b1m v2[1] += dy * b1m v2[2] += dz * b1m for (r, [vx, vy, vz], m) in bodies: r[0] += dt * vx r[1] += dt * vy r[2] += dt * vz def report_energy(bodies=SYSTEM, pairs=PAIRS, e=0.0): for (((x1, y1, z1), v1, m1), ((x2, y2, z2), v2, m2)) in pairs: dx = x1 - x2 dy = y1 - y2 dz = z1 - z2 e -= (m1 * m2) / ((dx * dx + dy * dy + dz * dz) ** 0.5) for (r, [vx, vy, vz], m) in bodies: e += m * (vx * vx + vy * vy + vz * vz) / 2. return e def offset_momentum(ref, bodies=SYSTEM, px=0.0, py=0.0, pz=0.0): for (r, [vx, vy, vz], m) in bodies: px -= vx * m py -= vy * m pz -= vz * m (r, v, m) = ref v[0] = px / m v[1] = py / m v[2] = pz / m def test_nbody(iterations): # Warm-up runs. report_energy() advance(0.01, 20000) report_energy() times = [] for _ in range(iterations): t0 = time.time() report_energy() advance(0.01, 20000) report_energy() t1 = time.time() times.append(t1 - t0) return times def run(geo_mean=True, num_runs=10): return util.run_benchmark(geo_mean, num_runs, test_nbody) #if __name__ == '__main__': # parser = optparse.OptionParser( # usage="%prog [options]", # description=("Run the n-body benchmark.")) # util.add_standard_options_to(parser) # options, args = parser.parse_args() # # offset_momentum(BODIES['sun']) # Set up global state # util.run_benchmark(options, options.num_runs, test_nbody)
bsd-3-clause
7f74ed9e70bb90d37de0538f7981e434
29.62069
94
0.543694
2.676311
false
false
false
false
brython-dev/brython
www/gallery/highcharts/examples/3d-column-interactive/chart.py
5
1245
from browser import document, window b_highchart = window.Highcharts.Chart.new chart = b_highchart({ 'chart': { 'renderTo': 'container', 'type': 'column', 'margin': 75, 'options3d': { 'enabled': True, 'alpha': 15, 'beta': 15, 'depth': 50, 'viewDistance': 25 } }, 'title': { 'text': 'Chart rotation demo' }, 'subtitle': { 'text': 'Test options by dragging the sliders below' }, 'plotOptions': { 'column': { 'depth': 25 } }, 'series': [{ 'data': [29.9, 71.5, 106.4, 129.2, 144.0, 176.0, 135.6, 148.5, 216.4, 194.1, 95.6, 54.4] }] }) def showValues(): document['R0-value'].html = chart.options.chart.options3d.alpha document['R1-value'].html = chart.options.chart.options3d.beta showValues() # activate the sliders def change_alpha(ev): chart.options.chart.options3d.alpha = ev.target.value showValues() chart.redraw(False) def change_beta(ev): chart.options.chart.options3d.beta = ev.target.value showValues() chart.redraw(False) document['R0'].bind('change', change_alpha) document['R1'].bind('change', change_beta)
bsd-3-clause
612e4db1b61b14369aabaae6fe9bd837
22.961538
96
0.559839
3.242188
false
false
false
false
brython-dev/brython
www/src/Lib/logging/__init__.py
1
78286
# Copyright 2001-2019 by Vinay Sajip. All Rights Reserved. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose and without fee is hereby granted, # provided that the above copyright notice appear in all copies and that # both that copyright notice and this permission notice appear in # supporting documentation, and that the name of Vinay Sajip # not be used in advertising or publicity pertaining to distribution # of the software without specific, written prior permission. # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """ Logging package for Python. Based on PEP 282 and comments thereto in comp.lang.python. Copyright (C) 2001-2019 Vinay Sajip. All Rights Reserved. To use, simply 'import logging' and log away! """ import sys, os, time, io, re, traceback, warnings, weakref, collections.abc from string import Template from string import Formatter as StrFormatter __all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR', 'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO', 'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler', 'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig', 'captureWarnings', 'critical', 'debug', 'disable', 'error', 'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass', 'info', 'log', 'makeLogRecord', 'setLoggerClass', 'shutdown', 'warn', 'warning', 'getLogRecordFactory', 'setLogRecordFactory', 'lastResort', 'raiseExceptions'] import threading __author__ = "Vinay Sajip <vinay_sajip@red-dove.com>" __status__ = "production" # The following module attributes are no longer updated. __version__ = "0.5.1.2" __date__ = "07 February 2010" #--------------------------------------------------------------------------- # Miscellaneous module data #--------------------------------------------------------------------------- # #_startTime is used as the base when calculating the relative time of events # _startTime = time.time() # #raiseExceptions is used to see if exceptions during handling should be #propagated # raiseExceptions = True # # If you don't want threading information in the log, set this to zero # logThreads = True # # If you don't want multiprocessing information in the log, set this to zero # logMultiprocessing = True # # If you don't want process information in the log, set this to zero # logProcesses = True #--------------------------------------------------------------------------- # Level related stuff #--------------------------------------------------------------------------- # # Default levels and level names, these can be replaced with any positive set # of values having corresponding names. There is a pseudo-level, NOTSET, which # is only really there as a lower limit for user-defined levels. Handlers and # loggers are initialized with NOTSET so that they will log all messages, even # at user-defined levels. # CRITICAL = 50 FATAL = CRITICAL ERROR = 40 WARNING = 30 WARN = WARNING INFO = 20 DEBUG = 10 NOTSET = 0 _levelToName = { CRITICAL: 'CRITICAL', ERROR: 'ERROR', WARNING: 'WARNING', INFO: 'INFO', DEBUG: 'DEBUG', NOTSET: 'NOTSET', } _nameToLevel = { 'CRITICAL': CRITICAL, 'FATAL': FATAL, 'ERROR': ERROR, 'WARN': WARNING, 'WARNING': WARNING, 'INFO': INFO, 'DEBUG': DEBUG, 'NOTSET': NOTSET, } def getLevelName(level): """ Return the textual representation of logging level 'level'. If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, INFO, DEBUG) then you get the corresponding string. If you have associated levels with names using addLevelName then the name you have associated with 'level' is returned. If a numeric value corresponding to one of the defined levels is passed in, the corresponding string representation is returned. Otherwise, the string "Level %s" % level is returned. """ # See Issues #22386, #27937 and #29220 for why it's this way result = _levelToName.get(level) if result is not None: return result result = _nameToLevel.get(level) if result is not None: return result return "Level %s" % level def addLevelName(level, levelName): """ Associate 'levelName' with 'level'. This is used when converting levels to text during message formatting. """ _acquireLock() try: #unlikely to cause an exception, but you never know... _levelToName[level] = levelName _nameToLevel[levelName] = level finally: _releaseLock() if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3) else: #pragma: no cover def currentframe(): """Return the frame object for the caller's stack frame.""" try: raise Exception except Exception: return sys.exc_info()[2].tb_frame.f_back # # _srcfile is used when walking the stack to check when we've got the first # caller stack frame, by skipping frames whose filename is that of this # module's source. It therefore should contain the filename of this module's # source file. # # Ordinarily we would use __file__ for this, but frozen modules don't always # have __file__ set, for some reason (see Issue #21736). Thus, we get the # filename from a handy code object from a function defined in this module. # (There's no particular reason for picking addLevelName.) # _srcfile = os.path.normcase(addLevelName.__code__.co_filename) # _srcfile is only used in conjunction with sys._getframe(). # To provide compatibility with older versions of Python, set _srcfile # to None if _getframe() is not available; this value will prevent # findCaller() from being called. You can also do this if you want to avoid # the overhead of fetching caller information, even when _getframe() is # available. #if not hasattr(sys, '_getframe'): # _srcfile = None def _checkLevel(level): if isinstance(level, int): rv = level elif str(level) == level: if level not in _nameToLevel: raise ValueError("Unknown level: %r" % level) rv = _nameToLevel[level] else: raise TypeError("Level not an integer or a valid string: %r" % level) return rv #--------------------------------------------------------------------------- # Thread-related stuff #--------------------------------------------------------------------------- # #_lock is used to serialize access to shared data structures in this module. #This needs to be an RLock because fileConfig() creates and configures #Handlers, and so might arbitrary user threads. Since Handler code updates the #shared dictionary _handlers, it needs to acquire the lock. But if configuring, #the lock would already have been acquired - so we need an RLock. #The same argument applies to Loggers and Manager.loggerDict. # _lock = threading.RLock() def _acquireLock(): """ Acquire the module-level lock for serializing access to shared data. This should be released with _releaseLock(). """ if _lock: _lock.acquire() def _releaseLock(): """ Release the module-level lock acquired by calling _acquireLock(). """ if _lock: _lock.release() # Prevent a held logging lock from blocking a child from logging. if not hasattr(os, 'register_at_fork'): # Windows and friends. def _register_at_fork_reinit_lock(instance): pass # no-op when os.register_at_fork does not exist. else: # A collection of instances with a _at_fork_reinit method (logging.Handler) # to be called in the child after forking. The weakref avoids us keeping # discarded Handler instances alive. _at_fork_reinit_lock_weakset = weakref.WeakSet() def _register_at_fork_reinit_lock(instance): _acquireLock() try: _at_fork_reinit_lock_weakset.add(instance) finally: _releaseLock() def _after_at_fork_child_reinit_locks(): for handler in _at_fork_reinit_lock_weakset: handler._at_fork_reinit() # _acquireLock() was called in the parent before forking. # The lock is reinitialized to unlocked state. _lock._at_fork_reinit() os.register_at_fork(before=_acquireLock, after_in_child=_after_at_fork_child_reinit_locks, after_in_parent=_releaseLock) #--------------------------------------------------------------------------- # The logging record #--------------------------------------------------------------------------- class LogRecord(object): """ A LogRecord instance represents an event being logged. LogRecord instances are created every time something is logged. They contain all the information pertinent to the event being logged. The main information passed in is in msg and args, which are combined using str(msg) % args to create the message field of the record. The record also includes information such as when the record was created, the source line where the logging call was made, and any exception information to be logged. """ def __init__(self, name, level, pathname, lineno, msg, args, exc_info, func=None, sinfo=None, **kwargs): """ Initialize a logging record with interesting information. """ ct = time.time() self.name = name self.msg = msg # # The following statement allows passing of a dictionary as a sole # argument, so that you can do something like # logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2}) # Suggested by Stefan Behnel. # Note that without the test for args[0], we get a problem because # during formatting, we test to see if the arg is present using # 'if self.args:'. If the event being logged is e.g. 'Value is %d' # and if the passed arg fails 'if self.args:' then no formatting # is done. For example, logger.warning('Value is %d', 0) would log # 'Value is %d' instead of 'Value is 0'. # For the use case of passing a dictionary, this should not be a # problem. # Issue #21172: a request was made to relax the isinstance check # to hasattr(args[0], '__getitem__'). However, the docs on string # formatting still seem to suggest a mapping object is required. # Thus, while not removing the isinstance check, it does now look # for collections.abc.Mapping rather than, as before, dict. if (args and len(args) == 1 and isinstance(args[0], collections.abc.Mapping) and args[0]): args = args[0] self.args = args self.levelname = getLevelName(level) self.levelno = level self.pathname = pathname try: self.filename = os.path.basename(pathname) self.module = os.path.splitext(self.filename)[0] except (TypeError, ValueError, AttributeError): self.filename = pathname self.module = "Unknown module" self.exc_info = exc_info self.exc_text = None # used to cache the traceback text self.stack_info = sinfo self.lineno = lineno self.funcName = func self.created = ct self.msecs = (ct - int(ct)) * 1000 self.relativeCreated = (self.created - _startTime) * 1000 if logThreads: self.thread = threading.get_ident() self.threadName = threading.current_thread().name else: # pragma: no cover self.thread = None self.threadName = None if not logMultiprocessing: # pragma: no cover self.processName = None else: self.processName = 'MainProcess' mp = sys.modules.get('multiprocessing') if mp is not None: # Errors may occur if multiprocessing has not finished loading # yet - e.g. if a custom import hook causes third-party code # to run when multiprocessing calls import. See issue 8200 # for an example try: self.processName = mp.current_process().name except Exception: #pragma: no cover pass if logProcesses and hasattr(os, 'getpid'): self.process = os.getpid() else: self.process = None def __repr__(self): return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno, self.pathname, self.lineno, self.msg) def getMessage(self): """ Return the message for this LogRecord. Return the message for this LogRecord after merging any user-supplied arguments with the message. """ msg = str(self.msg) if self.args: msg = msg % self.args return msg # # Determine which class to use when instantiating log records. # _logRecordFactory = LogRecord def setLogRecordFactory(factory): """ Set the factory to be used when instantiating a log record. :param factory: A callable which will be called to instantiate a log record. """ global _logRecordFactory _logRecordFactory = factory def getLogRecordFactory(): """ Return the factory to be used when instantiating a log record. """ return _logRecordFactory def makeLogRecord(dict): """ Make a LogRecord whose attributes are defined by the specified dictionary, This function is useful for converting a logging event received over a socket connection (which is sent as a dictionary) into a LogRecord instance. """ rv = _logRecordFactory(None, None, "", 0, "", (), None, None) rv.__dict__.update(dict) return rv #--------------------------------------------------------------------------- # Formatter classes and functions #--------------------------------------------------------------------------- _str_formatter = StrFormatter() del StrFormatter class PercentStyle(object): default_format = '%(message)s' asctime_format = '%(asctime)s' asctime_search = '%(asctime)' validation_pattern = re.compile(r'%\(\w+\)[#0+ -]*(\*|\d+)?(\.(\*|\d+))?[diouxefgcrsa%]', re.I) def __init__(self, fmt): self._fmt = fmt or self.default_format def usesTime(self): return self._fmt.find(self.asctime_search) >= 0 def validate(self): """Validate the input format, ensure it matches the correct style""" if not self.validation_pattern.search(self._fmt): raise ValueError("Invalid format '%s' for '%s' style" % (self._fmt, self.default_format[0])) def _format(self, record): return self._fmt % record.__dict__ def format(self, record): try: return self._format(record) except KeyError as e: raise ValueError('Formatting field not found in record: %s' % e) class StrFormatStyle(PercentStyle): default_format = '{message}' asctime_format = '{asctime}' asctime_search = '{asctime' fmt_spec = re.compile(r'^(.?[<>=^])?[+ -]?#?0?(\d+|{\w+})?[,_]?(\.(\d+|{\w+}))?[bcdefgnosx%]?$', re.I) field_spec = re.compile(r'^(\d+|\w+)(\.\w+|\[[^]]+\])*$') def _format(self, record): return self._fmt.format(**record.__dict__) def validate(self): """Validate the input format, ensure it is the correct string formatting style""" fields = set() try: for _, fieldname, spec, conversion in _str_formatter.parse(self._fmt): if fieldname: if not self.field_spec.match(fieldname): raise ValueError('invalid field name/expression: %r' % fieldname) fields.add(fieldname) if conversion and conversion not in 'rsa': raise ValueError('invalid conversion: %r' % conversion) if spec and not self.fmt_spec.match(spec): raise ValueError('bad specifier: %r' % spec) except ValueError as e: raise ValueError('invalid format: %s' % e) if not fields: raise ValueError('invalid format: no fields') class StringTemplateStyle(PercentStyle): default_format = '${message}' asctime_format = '${asctime}' asctime_search = '${asctime}' def __init__(self, fmt): self._fmt = fmt or self.default_format self._tpl = Template(self._fmt) def usesTime(self): fmt = self._fmt return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0 def validate(self): pattern = Template.pattern fields = set() for m in pattern.finditer(self._fmt): d = m.groupdict() if d['named']: fields.add(d['named']) elif d['braced']: fields.add(d['braced']) elif m.group(0) == '$': raise ValueError('invalid format: bare \'$\' not allowed') if not fields: raise ValueError('invalid format: no fields') def _format(self, record): return self._tpl.substitute(**record.__dict__) BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s" _STYLES = { '%': (PercentStyle, BASIC_FORMAT), '{': (StrFormatStyle, '{levelname}:{name}:{message}'), '$': (StringTemplateStyle, '${levelname}:${name}:${message}'), } class Formatter(object): """ Formatter instances are used to convert a LogRecord to text. Formatters need to know how a LogRecord is constructed. They are responsible for converting a LogRecord to (usually) a string which can be interpreted by either a human or an external system. The base Formatter allows a formatting string to be specified. If none is supplied, the the style-dependent default value, "%(message)s", "{message}", or "${message}", is used. The Formatter can be initialized with a format string which makes use of knowledge of the LogRecord attributes - e.g. the default value mentioned above makes use of the fact that the user's message and arguments are pre- formatted into a LogRecord's message attribute. Currently, the useful attributes in a LogRecord are described by: %(name)s Name of the logger (logging channel) %(levelno)s Numeric logging level for the message (DEBUG, INFO, WARNING, ERROR, CRITICAL) %(levelname)s Text logging level for the message ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL") %(pathname)s Full pathname of the source file where the logging call was issued (if available) %(filename)s Filename portion of pathname %(module)s Module (name portion of filename) %(lineno)d Source line number where the logging call was issued (if available) %(funcName)s Function name %(created)f Time when the LogRecord was created (time.time() return value) %(asctime)s Textual time when the LogRecord was created %(msecs)d Millisecond portion of the creation time %(relativeCreated)d Time in milliseconds when the LogRecord was created, relative to the time the logging module was loaded (typically at application startup time) %(thread)d Thread ID (if available) %(threadName)s Thread name (if available) %(process)d Process ID (if available) %(message)s The result of record.getMessage(), computed just as the record is emitted """ converter = time.localtime def __init__(self, fmt=None, datefmt=None, style='%', validate=True): """ Initialize the formatter with specified format strings. Initialize the formatter either with the specified format string, or a default as described above. Allow for specialized date formatting with the optional datefmt argument. If datefmt is omitted, you get an ISO8601-like (or RFC 3339-like) format. Use a style parameter of '%', '{' or '$' to specify that you want to use one of %-formatting, :meth:`str.format` (``{}``) formatting or :class:`string.Template` formatting in your format string. .. versionchanged:: 3.2 Added the ``style`` parameter. """ if style not in _STYLES: raise ValueError('Style must be one of: %s' % ','.join( _STYLES.keys())) self._style = _STYLES[style][0](fmt) if validate: self._style.validate() self._fmt = self._style._fmt self.datefmt = datefmt default_time_format = '%Y-%m-%d %H:%M:%S' default_msec_format = '%s,%03d' def formatTime(self, record, datefmt=None): """ Return the creation time of the specified LogRecord as formatted text. This method should be called from format() by a formatter which wants to make use of a formatted time. This method can be overridden in formatters to provide for any specific requirement, but the basic behaviour is as follows: if datefmt (a string) is specified, it is used with time.strftime() to format the creation time of the record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used. The resulting string is returned. This function uses a user-configurable function to convert the creation time to a tuple. By default, time.localtime() is used; to change this for a particular formatter instance, set the 'converter' attribute to a function with the same signature as time.localtime() or time.gmtime(). To change it for all formatters, for example if you want all logging times to be shown in GMT, set the 'converter' attribute in the Formatter class. """ ct = self.converter(record.created) if datefmt: s = time.strftime(datefmt, ct) else: s = time.strftime(self.default_time_format, ct) if self.default_msec_format: s = self.default_msec_format % (s, record.msecs) return s def formatException(self, ei): """ Format and return the specified exception information as a string. This default implementation just uses traceback.print_exception() """ sio = io.StringIO() tb = ei[2] # See issues #9427, #1553375. Commented out for now. #if getattr(self, 'fullstack', False): # traceback.print_stack(tb.tb_frame.f_back, file=sio) traceback.print_exception(ei[0], ei[1], tb, None, sio) s = sio.getvalue() sio.close() if s[-1:] == "\n": s = s[:-1] return s def usesTime(self): """ Check if the format uses the creation time of the record. """ return self._style.usesTime() def formatMessage(self, record): return self._style.format(record) def formatStack(self, stack_info): """ This method is provided as an extension point for specialized formatting of stack information. The input data is a string as returned from a call to :func:`traceback.print_stack`, but with the last trailing newline removed. The base implementation just returns the value passed in. """ return stack_info def format(self, record): """ Format the specified record as text. The record's attribute dictionary is used as the operand to a string formatting operation which yields the returned string. Before formatting the dictionary, a couple of preparatory steps are carried out. The message attribute of the record is computed using LogRecord.getMessage(). If the formatting string uses the time (as determined by a call to usesTime(), formatTime() is called to format the event time. If there is exception information, it is formatted using formatException() and appended to the message. """ record.message = record.getMessage() if self.usesTime(): record.asctime = self.formatTime(record, self.datefmt) s = self.formatMessage(record) if record.exc_info: # Cache the traceback text to avoid converting it multiple times # (it's constant anyway) if not record.exc_text: record.exc_text = self.formatException(record.exc_info) if record.exc_text: if s[-1:] != "\n": s = s + "\n" s = s + record.exc_text if record.stack_info: if s[-1:] != "\n": s = s + "\n" s = s + self.formatStack(record.stack_info) return s # # The default formatter to use when no other is specified # _defaultFormatter = Formatter() class BufferingFormatter(object): """ A formatter suitable for formatting a number of records. """ def __init__(self, linefmt=None): """ Optionally specify a formatter which will be used to format each individual record. """ if linefmt: self.linefmt = linefmt else: self.linefmt = _defaultFormatter def formatHeader(self, records): """ Return the header string for the specified records. """ return "" def formatFooter(self, records): """ Return the footer string for the specified records. """ return "" def format(self, records): """ Format the specified records and return the result as a string. """ rv = "" if len(records) > 0: rv = rv + self.formatHeader(records) for record in records: rv = rv + self.linefmt.format(record) rv = rv + self.formatFooter(records) return rv #--------------------------------------------------------------------------- # Filter classes and functions #--------------------------------------------------------------------------- class Filter(object): """ Filter instances are used to perform arbitrary filtering of LogRecords. Loggers and Handlers can optionally use Filter instances to filter records as desired. The base filter class only allows events which are below a certain point in the logger hierarchy. For example, a filter initialized with "A.B" will allow events logged by loggers "A.B", "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If initialized with the empty string, all events are passed. """ def __init__(self, name=''): """ Initialize a filter. Initialize with the name of the logger which, together with its children, will have its events allowed through the filter. If no name is specified, allow every event. """ self.name = name self.nlen = len(name) def filter(self, record): """ Determine if the specified record is to be logged. Is the specified record to be logged? Returns 0 for no, nonzero for yes. If deemed appropriate, the record may be modified in-place. """ if self.nlen == 0: return True elif self.name == record.name: return True elif record.name.find(self.name, 0, self.nlen) != 0: return False return (record.name[self.nlen] == ".") class Filterer(object): """ A base class for loggers and handlers which allows them to share common code. """ def __init__(self): """ Initialize the list of filters to be an empty list. """ self.filters = [] def addFilter(self, filter): """ Add the specified filter to this handler. """ if not (filter in self.filters): self.filters.append(filter) def removeFilter(self, filter): """ Remove the specified filter from this handler. """ if filter in self.filters: self.filters.remove(filter) def filter(self, record): """ Determine if a record is loggable by consulting all the filters. The default is to allow the record to be logged; any filter can veto this and the record is then dropped. Returns a zero value if a record is to be dropped, else non-zero. .. versionchanged:: 3.2 Allow filters to be just callables. """ rv = True for f in self.filters: if hasattr(f, 'filter'): result = f.filter(record) else: result = f(record) # assume callable - will raise if not if not result: rv = False break return rv #--------------------------------------------------------------------------- # Handler classes and functions #--------------------------------------------------------------------------- _handlers = weakref.WeakValueDictionary() #map of handler names to handlers _handlerList = [] # added to allow handlers to be removed in reverse of order initialized def _removeHandlerRef(wr): """ Remove a handler reference from the internal cleanup list. """ # This function can be called during module teardown, when globals are # set to None. It can also be called from another thread. So we need to # pre-emptively grab the necessary globals and check if they're None, # to prevent race conditions and failures during interpreter shutdown. acquire, release, handlers = _acquireLock, _releaseLock, _handlerList if acquire and release and handlers: acquire() try: if wr in handlers: handlers.remove(wr) finally: release() def _addHandlerRef(handler): """ Add a handler to the internal cleanup list using a weak reference. """ _acquireLock() try: _handlerList.append(weakref.ref(handler, _removeHandlerRef)) finally: _releaseLock() class Handler(Filterer): """ Handler instances dispatch logging events to specific destinations. The base handler class. Acts as a placeholder which defines the Handler interface. Handlers can optionally use Formatter instances to format records as desired. By default, no formatter is specified; in this case, the 'raw' message as determined by record.message is logged. """ def __init__(self, level=NOTSET): """ Initializes the instance - basically setting the formatter to None and the filter list to empty. """ Filterer.__init__(self) self._name = None self.level = _checkLevel(level) self.formatter = None # Add the handler to the global _handlerList (for cleanup on shutdown) _addHandlerRef(self) self.createLock() def get_name(self): return self._name def set_name(self, name): _acquireLock() try: if self._name in _handlers: del _handlers[self._name] self._name = name if name: _handlers[name] = self finally: _releaseLock() name = property(get_name, set_name) def createLock(self): """ Acquire a thread lock for serializing access to the underlying I/O. """ self.lock = threading.RLock() _register_at_fork_reinit_lock(self) def _at_fork_reinit(self): self.lock._at_fork_reinit() def acquire(self): """ Acquire the I/O thread lock. """ if self.lock: self.lock.acquire() def release(self): """ Release the I/O thread lock. """ if self.lock: self.lock.release() def setLevel(self, level): """ Set the logging level of this handler. level must be an int or a str. """ self.level = _checkLevel(level) def format(self, record): """ Format the specified record. If a formatter is set, use it. Otherwise, use the default formatter for the module. """ if self.formatter: fmt = self.formatter else: fmt = _defaultFormatter return fmt.format(record) def emit(self, record): """ Do whatever it takes to actually log the specified logging record. This version is intended to be implemented by subclasses and so raises a NotImplementedError. """ raise NotImplementedError('emit must be implemented ' 'by Handler subclasses') def handle(self, record): """ Conditionally emit the specified logging record. Emission depends on filters which may have been added to the handler. Wrap the actual emission of the record with acquisition/release of the I/O thread lock. Returns whether the filter passed the record for emission. """ rv = self.filter(record) if rv: self.acquire() try: self.emit(record) finally: self.release() return rv def setFormatter(self, fmt): """ Set the formatter for this handler. """ self.formatter = fmt def flush(self): """ Ensure all logging output has been flushed. This version does nothing and is intended to be implemented by subclasses. """ pass def close(self): """ Tidy up any resources used by the handler. This version removes the handler from an internal map of handlers, _handlers, which is used for handler lookup by name. Subclasses should ensure that this gets called from overridden close() methods. """ #get the module data lock, as we're updating a shared structure. _acquireLock() try: #unlikely to raise an exception, but you never know... if self._name and self._name in _handlers: del _handlers[self._name] finally: _releaseLock() def handleError(self, record): """ Handle errors which occur during an emit() call. This method should be called from handlers when an exception is encountered during an emit() call. If raiseExceptions is false, exceptions get silently ignored. This is what is mostly wanted for a logging system - most users will not care about errors in the logging system, they are more interested in application errors. You could, however, replace this with a custom handler if you wish. The record which was being processed is passed in to this method. """ if raiseExceptions and sys.stderr: # see issue 13807 t, v, tb = sys.exc_info() try: sys.stderr.write('--- Logging error ---\n') traceback.print_exception(t, v, tb, None, sys.stderr) sys.stderr.write('Call stack:\n') # Walk the stack frame up until we're out of logging, # so as to print the calling context. frame = tb.tb_frame while (frame and os.path.dirname(frame.f_code.co_filename) == __path__[0]): frame = frame.f_back if frame: traceback.print_stack(frame, file=sys.stderr) else: # couldn't find the right stack frame, for some reason sys.stderr.write('Logged from file %s, line %s\n' % ( record.filename, record.lineno)) # Issue 18671: output logging message and arguments try: sys.stderr.write('Message: %r\n' 'Arguments: %s\n' % (record.msg, record.args)) except RecursionError: # See issue 36272 raise except Exception: sys.stderr.write('Unable to print the message and arguments' ' - possible formatting error.\nUse the' ' traceback above to help find the error.\n' ) except OSError: #pragma: no cover pass # see issue 5971 finally: del t, v, tb def __repr__(self): level = getLevelName(self.level) return '<%s (%s)>' % (self.__class__.__name__, level) class StreamHandler(Handler): """ A handler class which writes logging records, appropriately formatted, to a stream. Note that this class does not close the stream, as sys.stdout or sys.stderr may be used. """ terminator = '\n' def __init__(self, stream=None): """ Initialize the handler. If stream is not specified, sys.stderr is used. """ Handler.__init__(self) if stream is None: stream = sys.stderr self.stream = stream def flush(self): """ Flushes the stream. """ self.acquire() try: if self.stream and hasattr(self.stream, "flush"): self.stream.flush() finally: self.release() def emit(self, record): """ Emit a record. If a formatter is specified, it is used to format the record. The record is then written to the stream with a trailing newline. If exception information is present, it is formatted using traceback.print_exception and appended to the stream. If the stream has an 'encoding' attribute, it is used to determine how to do the output to the stream. """ try: msg = self.format(record) stream = self.stream # issue 35046: merged two stream.writes into one. stream.write(msg + self.terminator) self.flush() except RecursionError: # See issue 36272 raise except Exception: self.handleError(record) def setStream(self, stream): """ Sets the StreamHandler's stream to the specified value, if it is different. Returns the old stream, if the stream was changed, or None if it wasn't. """ if stream is self.stream: result = None else: result = self.stream self.acquire() try: self.flush() self.stream = stream finally: self.release() return result def __repr__(self): level = getLevelName(self.level) name = getattr(self.stream, 'name', '') # bpo-36015: name can be an int name = str(name) if name: name += ' ' return '<%s %s(%s)>' % (self.__class__.__name__, name, level) class FileHandler(StreamHandler): """ A handler class which writes formatted logging records to disk files. """ def __init__(self, filename, mode='a', encoding=None, delay=False, errors=None): """ Open the specified file and use it as the stream for logging. """ # Issue #27493: add support for Path objects to be passed in filename = os.fspath(filename) #keep the absolute path, otherwise derived classes which use this #may come a cropper when the current directory changes self.baseFilename = os.path.abspath(filename) self.mode = mode self.encoding = encoding self.errors = errors self.delay = delay if delay: #We don't open the stream, but we still need to call the #Handler constructor to set level, formatter, lock etc. Handler.__init__(self) self.stream = None else: StreamHandler.__init__(self, self._open()) def close(self): """ Closes the stream. """ self.acquire() try: try: if self.stream: try: self.flush() finally: stream = self.stream self.stream = None if hasattr(stream, "close"): stream.close() finally: # Issue #19523: call unconditionally to # prevent a handler leak when delay is set StreamHandler.close(self) finally: self.release() def _open(self): """ Open the current base file with the (original) mode and encoding. Return the resulting stream. """ return open(self.baseFilename, self.mode, encoding=self.encoding, errors=self.errors) def emit(self, record): """ Emit a record. If the stream was not opened because 'delay' was specified in the constructor, open it before calling the superclass's emit. """ if self.stream is None: self.stream = self._open() StreamHandler.emit(self, record) def __repr__(self): level = getLevelName(self.level) return '<%s %s (%s)>' % (self.__class__.__name__, self.baseFilename, level) class _StderrHandler(StreamHandler): """ This class is like a StreamHandler using sys.stderr, but always uses whatever sys.stderr is currently set to rather than the value of sys.stderr at handler construction time. """ def __init__(self, level=NOTSET): """ Initialize the handler. """ Handler.__init__(self, level) @property def stream(self): return sys.stderr _defaultLastResort = _StderrHandler(WARNING) lastResort = _defaultLastResort #--------------------------------------------------------------------------- # Manager classes and functions #--------------------------------------------------------------------------- class PlaceHolder(object): """ PlaceHolder instances are used in the Manager logger hierarchy to take the place of nodes for which no loggers have been defined. This class is intended for internal use only and not as part of the public API. """ def __init__(self, alogger): """ Initialize with the specified logger being a child of this placeholder. """ self.loggerMap = { alogger : None } def append(self, alogger): """ Add the specified logger as a child of this placeholder. """ if alogger not in self.loggerMap: self.loggerMap[alogger] = None # # Determine which class to use when instantiating loggers. # def setLoggerClass(klass): """ Set the class to be used when instantiating a logger. The class should define __init__() such that only a name argument is required, and the __init__() should call Logger.__init__() """ if klass != Logger: if not issubclass(klass, Logger): raise TypeError("logger not derived from logging.Logger: " + klass.__name__) global _loggerClass _loggerClass = klass def getLoggerClass(): """ Return the class to be used when instantiating a logger. """ return _loggerClass class Manager(object): """ There is [under normal circumstances] just one Manager instance, which holds the hierarchy of loggers. """ def __init__(self, rootnode): """ Initialize the manager with the root node of the logger hierarchy. """ self.root = rootnode self.disable = 0 self.emittedNoHandlerWarning = False self.loggerDict = {} self.loggerClass = None self.logRecordFactory = None def getLogger(self, name): """ Get a logger with the specified name (channel name), creating it if it doesn't yet exist. This name is a dot-separated hierarchical name, such as "a", "a.b", "a.b.c" or similar. If a PlaceHolder existed for the specified name [i.e. the logger didn't exist but a child of it did], replace it with the created logger and fix up the parent/child references which pointed to the placeholder to now point to the logger. """ rv = None if not isinstance(name, str): raise TypeError('A logger name must be a string') _acquireLock() try: if name in self.loggerDict: rv = self.loggerDict[name] if isinstance(rv, PlaceHolder): ph = rv rv = (self.loggerClass or _loggerClass)(name) rv.manager = self self.loggerDict[name] = rv self._fixupChildren(ph, rv) self._fixupParents(rv) else: rv = (self.loggerClass or _loggerClass)(name) rv.manager = self self.loggerDict[name] = rv self._fixupParents(rv) finally: _releaseLock() return rv def setLoggerClass(self, klass): """ Set the class to be used when instantiating a logger with this Manager. """ if klass != Logger: if not issubclass(klass, Logger): raise TypeError("logger not derived from logging.Logger: " + klass.__name__) self.loggerClass = klass def setLogRecordFactory(self, factory): """ Set the factory to be used when instantiating a log record with this Manager. """ self.logRecordFactory = factory def _fixupParents(self, alogger): """ Ensure that there are either loggers or placeholders all the way from the specified logger to the root of the logger hierarchy. """ name = alogger.name i = name.rfind(".") rv = None while (i > 0) and not rv: substr = name[:i] if substr not in self.loggerDict: self.loggerDict[substr] = PlaceHolder(alogger) else: obj = self.loggerDict[substr] if isinstance(obj, Logger): rv = obj else: assert isinstance(obj, PlaceHolder) obj.append(alogger) i = name.rfind(".", 0, i - 1) if not rv: rv = self.root alogger.parent = rv def _fixupChildren(self, ph, alogger): """ Ensure that children of the placeholder ph are connected to the specified logger. """ name = alogger.name namelen = len(name) for c in ph.loggerMap.keys(): #The if means ... if not c.parent.name.startswith(nm) if c.parent.name[:namelen] != name: alogger.parent = c.parent c.parent = alogger def _clear_cache(self): """ Clear the cache for all loggers in loggerDict Called when level changes are made """ _acquireLock() for logger in self.loggerDict.values(): if isinstance(logger, Logger): logger._cache.clear() self.root._cache.clear() _releaseLock() #--------------------------------------------------------------------------- # Logger classes and functions #--------------------------------------------------------------------------- class Logger(Filterer): """ Instances of the Logger class represent a single logging channel. A "logging channel" indicates an area of an application. Exactly how an "area" is defined is up to the application developer. Since an application can have any number of areas, logging channels are identified by a unique string. Application areas can be nested (e.g. an area of "input processing" might include sub-areas "read CSV files", "read XLS files" and "read Gnumeric files"). To cater for this natural nesting, channel names are organized into a namespace hierarchy where levels are separated by periods, much like the Java or Python package namespace. So in the instance given above, channel names might be "input" for the upper level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. There is no arbitrary limit to the depth of nesting. """ def __init__(self, name, level=NOTSET): """ Initialize the logger with a name and an optional level. """ Filterer.__init__(self) self.name = name self.level = _checkLevel(level) self.parent = None self.propagate = True self.handlers = [] self.disabled = False self._cache = {} def setLevel(self, level): """ Set the logging level of this logger. level must be an int or a str. """ self.level = _checkLevel(level) self.manager._clear_cache() def debug(self, msg, *args, **kwargs): """ Log 'msg % args' with severity 'DEBUG'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.debug("Houston, we have a %s", "thorny problem", exc_info=1) """ if self.isEnabledFor(DEBUG): self._log(DEBUG, msg, args, **kwargs) def info(self, msg, *args, **kwargs): """ Log 'msg % args' with severity 'INFO'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.info("Houston, we have a %s", "interesting problem", exc_info=1) """ if self.isEnabledFor(INFO): self._log(INFO, msg, args, **kwargs) def warning(self, msg, *args, **kwargs): """ Log 'msg % args' with severity 'WARNING'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1) """ if self.isEnabledFor(WARNING): self._log(WARNING, msg, args, **kwargs) def warn(self, msg, *args, **kwargs): warnings.warn("The 'warn' method is deprecated, " "use 'warning' instead", DeprecationWarning, 2) self.warning(msg, *args, **kwargs) def error(self, msg, *args, **kwargs): """ Log 'msg % args' with severity 'ERROR'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.error("Houston, we have a %s", "major problem", exc_info=1) """ if self.isEnabledFor(ERROR): self._log(ERROR, msg, args, **kwargs) def exception(self, msg, *args, exc_info=True, **kwargs): """ Convenience method for logging an ERROR with exception information. """ self.error(msg, *args, exc_info=exc_info, **kwargs) def critical(self, msg, *args, **kwargs): """ Log 'msg % args' with severity 'CRITICAL'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.critical("Houston, we have a %s", "major disaster", exc_info=1) """ if self.isEnabledFor(CRITICAL): self._log(CRITICAL, msg, args, **kwargs) fatal = critical def log(self, level, msg, *args, **kwargs): """ Log 'msg % args' with the integer severity 'level'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.log(level, "We have a %s", "mysterious problem", exc_info=1) """ if not isinstance(level, int): if raiseExceptions: raise TypeError("level must be an integer") else: return if self.isEnabledFor(level): self._log(level, msg, args, **kwargs) def findCaller(self, stack_info=False, stacklevel=1): """ Find the stack frame of the caller so that we can note the source file name, line number and function name. """ f = currentframe() #On some versions of IronPython, currentframe() returns None if #IronPython isn't run with -X:Frames. if f is not None: f = f.f_back orig_f = f while f and stacklevel > 1: f = f.f_back stacklevel -= 1 if not f: f = orig_f rv = "(unknown file)", 0, "(unknown function)", None while hasattr(f, "f_code"): co = f.f_code filename = os.path.normcase(co.co_filename) if filename == _srcfile: f = f.f_back continue sinfo = None if stack_info: sio = io.StringIO() sio.write('Stack (most recent call last):\n') traceback.print_stack(f, file=sio) sinfo = sio.getvalue() if sinfo[-1] == '\n': sinfo = sinfo[:-1] sio.close() rv = (co.co_filename, f.f_lineno, co.co_name, sinfo) break return rv def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None, sinfo=None): """ A factory method which can be overridden in subclasses to create specialized LogRecords. """ rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func, sinfo) if extra is not None: for key in extra: if (key in ["message", "asctime"]) or (key in rv.__dict__): raise KeyError("Attempt to overwrite %r in LogRecord" % key) rv.__dict__[key] = extra[key] return rv def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False, stacklevel=1): """ Low-level logging routine which creates a LogRecord and then calls all the handlers of this logger to handle the record. """ sinfo = None if _srcfile: #IronPython doesn't track Python frames, so findCaller raises an #exception on some versions of IronPython. We trap it here so that #IronPython can use logging. try: fn, lno, func, sinfo = self.findCaller(stack_info, stacklevel) except ValueError: # pragma: no cover fn, lno, func = "(unknown file)", 0, "(unknown function)" else: # pragma: no cover fn, lno, func = "(unknown file)", 0, "(unknown function)" if exc_info: if isinstance(exc_info, BaseException): exc_info = (type(exc_info), exc_info, exc_info.__traceback__) elif not isinstance(exc_info, tuple): exc_info = sys.exc_info() record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info, func, extra, sinfo) self.handle(record) def handle(self, record): """ Call the handlers for the specified record. This method is used for unpickled records received from a socket, as well as those created locally. Logger-level filtering is applied. """ if (not self.disabled) and self.filter(record): self.callHandlers(record) def addHandler(self, hdlr): """ Add the specified handler to this logger. """ _acquireLock() try: if not (hdlr in self.handlers): self.handlers.append(hdlr) finally: _releaseLock() def removeHandler(self, hdlr): """ Remove the specified handler from this logger. """ _acquireLock() try: if hdlr in self.handlers: self.handlers.remove(hdlr) finally: _releaseLock() def hasHandlers(self): """ See if this logger has any handlers configured. Loop through all handlers for this logger and its parents in the logger hierarchy. Return True if a handler was found, else False. Stop searching up the hierarchy whenever a logger with the "propagate" attribute set to zero is found - that will be the last logger which is checked for the existence of handlers. """ c = self rv = False while c: if c.handlers: rv = True break if not c.propagate: break else: c = c.parent return rv def callHandlers(self, record): """ Pass a record to all relevant handlers. Loop through all handlers for this logger and its parents in the logger hierarchy. If no handler was found, output a one-off error message to sys.stderr. Stop searching up the hierarchy whenever a logger with the "propagate" attribute set to zero is found - that will be the last logger whose handlers are called. """ c = self found = 0 while c: for hdlr in c.handlers: found = found + 1 if record.levelno >= hdlr.level: hdlr.handle(record) if not c.propagate: c = None #break out else: c = c.parent if (found == 0): if lastResort: if record.levelno >= lastResort.level: lastResort.handle(record) elif raiseExceptions and not self.manager.emittedNoHandlerWarning: sys.stderr.write("No handlers could be found for logger" " \"%s\"\n" % self.name) self.manager.emittedNoHandlerWarning = True def getEffectiveLevel(self): """ Get the effective level for this logger. Loop through this logger and its parents in the logger hierarchy, looking for a non-zero logging level. Return the first one found. """ logger = self while logger: if logger.level: return logger.level logger = logger.parent return NOTSET def isEnabledFor(self, level): """ Is this logger enabled for level 'level'? """ if self.disabled: return False try: return self._cache[level] except KeyError: _acquireLock() try: if self.manager.disable >= level: is_enabled = self._cache[level] = False else: is_enabled = self._cache[level] = ( level >= self.getEffectiveLevel() ) finally: _releaseLock() return is_enabled def getChild(self, suffix): """ Get a logger which is a descendant to this one. This is a convenience method, such that logging.getLogger('abc').getChild('def.ghi') is the same as logging.getLogger('abc.def.ghi') It's useful, for example, when the parent logger is named using __name__ rather than a literal string. """ if self.root is not self: suffix = '.'.join((self.name, suffix)) return self.manager.getLogger(suffix) def __repr__(self): level = getLevelName(self.getEffectiveLevel()) return '<%s %s (%s)>' % (self.__class__.__name__, self.name, level) def __reduce__(self): # In general, only the root logger will not be accessible via its name. # However, the root logger's class has its own __reduce__ method. if getLogger(self.name) is not self: import pickle raise pickle.PicklingError('logger cannot be pickled') return getLogger, (self.name,) class RootLogger(Logger): """ A root logger is not that different to any other logger, except that it must have a logging level and there is only one instance of it in the hierarchy. """ def __init__(self, level): """ Initialize the logger with the name "root". """ Logger.__init__(self, "root", level) def __reduce__(self): return getLogger, () _loggerClass = Logger class LoggerAdapter(object): """ An adapter for loggers which makes it easier to specify contextual information in logging output. """ def __init__(self, logger, extra): """ Initialize the adapter with a logger and a dict-like object which provides contextual information. This constructor signature allows easy stacking of LoggerAdapters, if so desired. You can effectively pass keyword arguments as shown in the following example: adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2")) """ self.logger = logger self.extra = extra def process(self, msg, kwargs): """ Process the logging message and keyword arguments passed in to a logging call to insert contextual information. You can either manipulate the message itself, the keyword args or both. Return the message and kwargs modified (or not) to suit your needs. Normally, you'll only need to override this one method in a LoggerAdapter subclass for your specific needs. """ kwargs["extra"] = self.extra return msg, kwargs # # Boilerplate convenience methods # def debug(self, msg, *args, **kwargs): """ Delegate a debug call to the underlying logger. """ self.log(DEBUG, msg, *args, **kwargs) def info(self, msg, *args, **kwargs): """ Delegate an info call to the underlying logger. """ self.log(INFO, msg, *args, **kwargs) def warning(self, msg, *args, **kwargs): """ Delegate a warning call to the underlying logger. """ self.log(WARNING, msg, *args, **kwargs) def warn(self, msg, *args, **kwargs): warnings.warn("The 'warn' method is deprecated, " "use 'warning' instead", DeprecationWarning, 2) self.warning(msg, *args, **kwargs) def error(self, msg, *args, **kwargs): """ Delegate an error call to the underlying logger. """ self.log(ERROR, msg, *args, **kwargs) def exception(self, msg, *args, exc_info=True, **kwargs): """ Delegate an exception call to the underlying logger. """ self.log(ERROR, msg, *args, exc_info=exc_info, **kwargs) def critical(self, msg, *args, **kwargs): """ Delegate a critical call to the underlying logger. """ self.log(CRITICAL, msg, *args, **kwargs) def log(self, level, msg, *args, **kwargs): """ Delegate a log call to the underlying logger, after adding contextual information from this adapter instance. """ if self.isEnabledFor(level): msg, kwargs = self.process(msg, kwargs) self.logger.log(level, msg, *args, **kwargs) def isEnabledFor(self, level): """ Is this logger enabled for level 'level'? """ return self.logger.isEnabledFor(level) def setLevel(self, level): """ Set the specified level on the underlying logger. """ self.logger.setLevel(level) def getEffectiveLevel(self): """ Get the effective level for the underlying logger. """ return self.logger.getEffectiveLevel() def hasHandlers(self): """ See if the underlying logger has any handlers. """ return self.logger.hasHandlers() def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False): """ Low-level log implementation, proxied to allow nested logger adapters. """ return self.logger._log( level, msg, args, exc_info=exc_info, extra=extra, stack_info=stack_info, ) @property def manager(self): return self.logger.manager @manager.setter def manager(self, value): self.logger.manager = value @property def name(self): return self.logger.name def __repr__(self): logger = self.logger level = getLevelName(logger.getEffectiveLevel()) return '<%s %s (%s)>' % (self.__class__.__name__, logger.name, level) root = RootLogger(WARNING) Logger.root = root Logger.manager = Manager(Logger.root) #--------------------------------------------------------------------------- # Configuration classes and functions #--------------------------------------------------------------------------- def basicConfig(**kwargs): """ Do basic configuration for the logging system. This function does nothing if the root logger already has handlers configured, unless the keyword argument *force* is set to ``True``. It is a convenience method intended for use by simple scripts to do one-shot configuration of the logging package. The default behaviour is to create a StreamHandler which writes to sys.stderr, set a formatter using the BASIC_FORMAT format string, and add the handler to the root logger. A number of optional keyword arguments may be specified, which can alter the default behaviour. filename Specifies that a FileHandler be created, using the specified filename, rather than a StreamHandler. filemode Specifies the mode to open the file, if filename is specified (if filemode is unspecified, it defaults to 'a'). format Use the specified format string for the handler. datefmt Use the specified date/time format. style If a format string is specified, use this to specify the type of format string (possible values '%', '{', '$', for %-formatting, :meth:`str.format` and :class:`string.Template` - defaults to '%'). level Set the root logger level to the specified level. stream Use the specified stream to initialize the StreamHandler. Note that this argument is incompatible with 'filename' - if both are present, 'stream' is ignored. handlers If specified, this should be an iterable of already created handlers, which will be added to the root handler. Any handler in the list which does not have a formatter assigned will be assigned the formatter created in this function. force If this keyword is specified as true, any existing handlers attached to the root logger are removed and closed, before carrying out the configuration as specified by the other arguments. encoding If specified together with a filename, this encoding is passed to the created FileHandler, causing it to be used when the file is opened. errors If specified together with a filename, this value is passed to the created FileHandler, causing it to be used when the file is opened in text mode. If not specified, the default value is `backslashreplace`. Note that you could specify a stream created using open(filename, mode) rather than passing the filename and mode in. However, it should be remembered that StreamHandler does not close its stream (since it may be using sys.stdout or sys.stderr), whereas FileHandler closes its stream when the handler is closed. .. versionchanged:: 3.2 Added the ``style`` parameter. .. versionchanged:: 3.3 Added the ``handlers`` parameter. A ``ValueError`` is now thrown for incompatible arguments (e.g. ``handlers`` specified together with ``filename``/``filemode``, or ``filename``/``filemode`` specified together with ``stream``, or ``handlers`` specified together with ``stream``. .. versionchanged:: 3.8 Added the ``force`` parameter. .. versionchanged:: 3.9 Added the ``encoding`` and ``errors`` parameters. """ # Add thread safety in case someone mistakenly calls # basicConfig() from multiple threads _acquireLock() try: force = kwargs.pop('force', False) encoding = kwargs.pop('encoding', None) errors = kwargs.pop('errors', 'backslashreplace') if force: for h in root.handlers[:]: root.removeHandler(h) h.close() if len(root.handlers) == 0: handlers = kwargs.pop("handlers", None) if handlers is None: if "stream" in kwargs and "filename" in kwargs: raise ValueError("'stream' and 'filename' should not be " "specified together") else: if "stream" in kwargs or "filename" in kwargs: raise ValueError("'stream' or 'filename' should not be " "specified together with 'handlers'") if handlers is None: filename = kwargs.pop("filename", None) mode = kwargs.pop("filemode", 'a') if filename: if 'b'in mode: errors = None h = FileHandler(filename, mode, encoding=encoding, errors=errors) else: stream = kwargs.pop("stream", None) h = StreamHandler(stream) handlers = [h] dfs = kwargs.pop("datefmt", None) style = kwargs.pop("style", '%') if style not in _STYLES: raise ValueError('Style must be one of: %s' % ','.join( _STYLES.keys())) fs = kwargs.pop("format", _STYLES[style][1]) fmt = Formatter(fs, dfs, style) for h in handlers: if h.formatter is None: h.setFormatter(fmt) root.addHandler(h) level = kwargs.pop("level", None) if level is not None: root.setLevel(level) if kwargs: keys = ', '.join(kwargs.keys()) raise ValueError('Unrecognised argument(s): %s' % keys) finally: _releaseLock() #--------------------------------------------------------------------------- # Utility functions at module level. # Basically delegate everything to the root logger. #--------------------------------------------------------------------------- def getLogger(name=None): """ Return a logger with the specified name, creating it if necessary. If no name is specified, return the root logger. """ if not name or isinstance(name, str) and name == root.name: return root return Logger.manager.getLogger(name) def critical(msg, *args, **kwargs): """ Log a message with severity 'CRITICAL' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format. """ if len(root.handlers) == 0: basicConfig() root.critical(msg, *args, **kwargs) fatal = critical def error(msg, *args, **kwargs): """ Log a message with severity 'ERROR' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format. """ if len(root.handlers) == 0: basicConfig() root.error(msg, *args, **kwargs) def exception(msg, *args, exc_info=True, **kwargs): """ Log a message with severity 'ERROR' on the root logger, with exception information. If the logger has no handlers, basicConfig() is called to add a console handler with a pre-defined format. """ error(msg, *args, exc_info=exc_info, **kwargs) def warning(msg, *args, **kwargs): """ Log a message with severity 'WARNING' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format. """ if len(root.handlers) == 0: basicConfig() root.warning(msg, *args, **kwargs) def warn(msg, *args, **kwargs): warnings.warn("The 'warn' function is deprecated, " "use 'warning' instead", DeprecationWarning, 2) warning(msg, *args, **kwargs) def info(msg, *args, **kwargs): """ Log a message with severity 'INFO' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format. """ if len(root.handlers) == 0: basicConfig() root.info(msg, *args, **kwargs) def debug(msg, *args, **kwargs): """ Log a message with severity 'DEBUG' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format. """ if len(root.handlers) == 0: basicConfig() root.debug(msg, *args, **kwargs) def log(level, msg, *args, **kwargs): """ Log 'msg % args' with the integer severity 'level' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format. """ if len(root.handlers) == 0: basicConfig() root.log(level, msg, *args, **kwargs) def disable(level=CRITICAL): """ Disable all logging calls of severity 'level' and below. """ root.manager.disable = level root.manager._clear_cache() def shutdown(handlerList=_handlerList): """ Perform any cleanup actions in the logging system (e.g. flushing buffers). Should be called at application exit. """ for wr in reversed(handlerList[:]): #errors might occur, for example, if files are locked #we just ignore them if raiseExceptions is not set try: h = wr() if h: try: h.acquire() h.flush() h.close() except (OSError, ValueError): # Ignore errors which might be caused # because handlers have been closed but # references to them are still around at # application exit. pass finally: h.release() except: # ignore everything, as we're shutting down if raiseExceptions: raise #else, swallow #Let's try and shutdown automatically on application exit... import atexit atexit.register(shutdown) # Null handler class NullHandler(Handler): """ This handler does nothing. It's intended to be used to avoid the "No handlers could be found for logger XXX" one-off warning. This is important for library code, which may contain code to log events. If a user of the library does not configure logging, the one-off warning might be produced; to avoid this, the library developer simply needs to instantiate a NullHandler and add it to the top-level logger of the library module or package. """ def handle(self, record): """Stub.""" def emit(self, record): """Stub.""" def createLock(self): self.lock = None def _at_fork_reinit(self): pass # Warnings integration _warnings_showwarning = None def _showwarning(message, category, filename, lineno, file=None, line=None): """ Implementation of showwarnings which redirects to logging, which will first check to see if the file parameter is None. If a file is specified, it will delegate to the original warnings implementation of showwarning. Otherwise, it will call warnings.formatwarning and will log the resulting string to a warnings logger named "py.warnings" with level logging.WARNING. """ if file is not None: if _warnings_showwarning is not None: _warnings_showwarning(message, category, filename, lineno, file, line) else: s = warnings.formatwarning(message, category, filename, lineno, line) logger = getLogger("py.warnings") if not logger.handlers: logger.addHandler(NullHandler()) logger.warning("%s", s) def captureWarnings(capture): """ If capture is true, redirect all warnings to the logging package. If capture is False, ensure that warnings are not redirected to logging but to their original destinations. """ global _warnings_showwarning if capture: if _warnings_showwarning is None: _warnings_showwarning = warnings.showwarning warnings.showwarning = _showwarning else: if _warnings_showwarning is not None: warnings.showwarning = _warnings_showwarning _warnings_showwarning = None
bsd-3-clause
079033d961490a749d964825084869b4
34.455616
106
0.58041
4.485019
false
false
false
false
brython-dev/brython
www/src/Lib/encodings/cp852.py
35
35700
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP852.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_map) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_map)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp852', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Map decoding_map = codecs.make_identity_dict(range(256)) decoding_map.update({ 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS 0x0085: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE 0x0086: 0x0107, # LATIN SMALL LETTER C WITH ACUTE 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA 0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS 0x008a: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE 0x008b: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX 0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS 0x008f: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE 0x0091: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE 0x0092: 0x013a, # LATIN SMALL LETTER L WITH ACUTE 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS 0x0095: 0x013d, # LATIN CAPITAL LETTER L WITH CARON 0x0096: 0x013e, # LATIN SMALL LETTER L WITH CARON 0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE 0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS 0x009b: 0x0164, # LATIN CAPITAL LETTER T WITH CARON 0x009c: 0x0165, # LATIN SMALL LETTER T WITH CARON 0x009d: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE 0x009e: 0x00d7, # MULTIPLICATION SIGN 0x009f: 0x010d, # LATIN SMALL LETTER C WITH CARON 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE 0x00a4: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK 0x00a5: 0x0105, # LATIN SMALL LETTER A WITH OGONEK 0x00a6: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON 0x00a7: 0x017e, # LATIN SMALL LETTER Z WITH CARON 0x00a8: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK 0x00a9: 0x0119, # LATIN SMALL LETTER E WITH OGONEK 0x00aa: 0x00ac, # NOT SIGN 0x00ab: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE 0x00ac: 0x010c, # LATIN CAPITAL LETTER C WITH CARON 0x00ad: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00b0: 0x2591, # LIGHT SHADE 0x00b1: 0x2592, # MEDIUM SHADE 0x00b2: 0x2593, # DARK SHADE 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE 0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX 0x00b7: 0x011a, # LATIN CAPITAL LETTER E WITH CARON 0x00b8: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT 0x00bd: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE 0x00be: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x00c6: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE 0x00c7: 0x0103, # LATIN SMALL LETTER A WITH BREVE 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x00cf: 0x00a4, # CURRENCY SIGN 0x00d0: 0x0111, # LATIN SMALL LETTER D WITH STROKE 0x00d1: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE 0x00d2: 0x010e, # LATIN CAPITAL LETTER D WITH CARON 0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS 0x00d4: 0x010f, # LATIN SMALL LETTER D WITH CARON 0x00d5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON 0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE 0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX 0x00d8: 0x011b, # LATIN SMALL LETTER E WITH CARON 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x00db: 0x2588, # FULL BLOCK 0x00dc: 0x2584, # LOWER HALF BLOCK 0x00dd: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA 0x00de: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE 0x00df: 0x2580, # UPPER HALF BLOCK 0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S 0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX 0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE 0x00e4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE 0x00e5: 0x0148, # LATIN SMALL LETTER N WITH CARON 0x00e6: 0x0160, # LATIN CAPITAL LETTER S WITH CARON 0x00e7: 0x0161, # LATIN SMALL LETTER S WITH CARON 0x00e8: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE 0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE 0x00ea: 0x0155, # LATIN SMALL LETTER R WITH ACUTE 0x00eb: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE 0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE 0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE 0x00ee: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA 0x00ef: 0x00b4, # ACUTE ACCENT 0x00f0: 0x00ad, # SOFT HYPHEN 0x00f1: 0x02dd, # DOUBLE ACUTE ACCENT 0x00f2: 0x02db, # OGONEK 0x00f3: 0x02c7, # CARON 0x00f4: 0x02d8, # BREVE 0x00f5: 0x00a7, # SECTION SIGN 0x00f6: 0x00f7, # DIVISION SIGN 0x00f7: 0x00b8, # CEDILLA 0x00f8: 0x00b0, # DEGREE SIGN 0x00f9: 0x00a8, # DIAERESIS 0x00fa: 0x02d9, # DOT ABOVE 0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE 0x00fc: 0x0158, # LATIN CAPITAL LETTER R WITH CARON 0x00fd: 0x0159, # LATIN SMALL LETTER R WITH CARON 0x00fe: 0x25a0, # BLACK SQUARE 0x00ff: 0x00a0, # NO-BREAK SPACE }) ### Decoding Table decoding_table = ( '\x00' # 0x0000 -> NULL '\x01' # 0x0001 -> START OF HEADING '\x02' # 0x0002 -> START OF TEXT '\x03' # 0x0003 -> END OF TEXT '\x04' # 0x0004 -> END OF TRANSMISSION '\x05' # 0x0005 -> ENQUIRY '\x06' # 0x0006 -> ACKNOWLEDGE '\x07' # 0x0007 -> BELL '\x08' # 0x0008 -> BACKSPACE '\t' # 0x0009 -> HORIZONTAL TABULATION '\n' # 0x000a -> LINE FEED '\x0b' # 0x000b -> VERTICAL TABULATION '\x0c' # 0x000c -> FORM FEED '\r' # 0x000d -> CARRIAGE RETURN '\x0e' # 0x000e -> SHIFT OUT '\x0f' # 0x000f -> SHIFT IN '\x10' # 0x0010 -> DATA LINK ESCAPE '\x11' # 0x0011 -> DEVICE CONTROL ONE '\x12' # 0x0012 -> DEVICE CONTROL TWO '\x13' # 0x0013 -> DEVICE CONTROL THREE '\x14' # 0x0014 -> DEVICE CONTROL FOUR '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE '\x16' # 0x0016 -> SYNCHRONOUS IDLE '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK '\x18' # 0x0018 -> CANCEL '\x19' # 0x0019 -> END OF MEDIUM '\x1a' # 0x001a -> SUBSTITUTE '\x1b' # 0x001b -> ESCAPE '\x1c' # 0x001c -> FILE SEPARATOR '\x1d' # 0x001d -> GROUP SEPARATOR '\x1e' # 0x001e -> RECORD SEPARATOR '\x1f' # 0x001f -> UNIT SEPARATOR ' ' # 0x0020 -> SPACE '!' # 0x0021 -> EXCLAMATION MARK '"' # 0x0022 -> QUOTATION MARK '#' # 0x0023 -> NUMBER SIGN '$' # 0x0024 -> DOLLAR SIGN '%' # 0x0025 -> PERCENT SIGN '&' # 0x0026 -> AMPERSAND "'" # 0x0027 -> APOSTROPHE '(' # 0x0028 -> LEFT PARENTHESIS ')' # 0x0029 -> RIGHT PARENTHESIS '*' # 0x002a -> ASTERISK '+' # 0x002b -> PLUS SIGN ',' # 0x002c -> COMMA '-' # 0x002d -> HYPHEN-MINUS '.' # 0x002e -> FULL STOP '/' # 0x002f -> SOLIDUS '0' # 0x0030 -> DIGIT ZERO '1' # 0x0031 -> DIGIT ONE '2' # 0x0032 -> DIGIT TWO '3' # 0x0033 -> DIGIT THREE '4' # 0x0034 -> DIGIT FOUR '5' # 0x0035 -> DIGIT FIVE '6' # 0x0036 -> DIGIT SIX '7' # 0x0037 -> DIGIT SEVEN '8' # 0x0038 -> DIGIT EIGHT '9' # 0x0039 -> DIGIT NINE ':' # 0x003a -> COLON ';' # 0x003b -> SEMICOLON '<' # 0x003c -> LESS-THAN SIGN '=' # 0x003d -> EQUALS SIGN '>' # 0x003e -> GREATER-THAN SIGN '?' # 0x003f -> QUESTION MARK '@' # 0x0040 -> COMMERCIAL AT 'A' # 0x0041 -> LATIN CAPITAL LETTER A 'B' # 0x0042 -> LATIN CAPITAL LETTER B 'C' # 0x0043 -> LATIN CAPITAL LETTER C 'D' # 0x0044 -> LATIN CAPITAL LETTER D 'E' # 0x0045 -> LATIN CAPITAL LETTER E 'F' # 0x0046 -> LATIN CAPITAL LETTER F 'G' # 0x0047 -> LATIN CAPITAL LETTER G 'H' # 0x0048 -> LATIN CAPITAL LETTER H 'I' # 0x0049 -> LATIN CAPITAL LETTER I 'J' # 0x004a -> LATIN CAPITAL LETTER J 'K' # 0x004b -> LATIN CAPITAL LETTER K 'L' # 0x004c -> LATIN CAPITAL LETTER L 'M' # 0x004d -> LATIN CAPITAL LETTER M 'N' # 0x004e -> LATIN CAPITAL LETTER N 'O' # 0x004f -> LATIN CAPITAL LETTER O 'P' # 0x0050 -> LATIN CAPITAL LETTER P 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q 'R' # 0x0052 -> LATIN CAPITAL LETTER R 'S' # 0x0053 -> LATIN CAPITAL LETTER S 'T' # 0x0054 -> LATIN CAPITAL LETTER T 'U' # 0x0055 -> LATIN CAPITAL LETTER U 'V' # 0x0056 -> LATIN CAPITAL LETTER V 'W' # 0x0057 -> LATIN CAPITAL LETTER W 'X' # 0x0058 -> LATIN CAPITAL LETTER X 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y 'Z' # 0x005a -> LATIN CAPITAL LETTER Z '[' # 0x005b -> LEFT SQUARE BRACKET '\\' # 0x005c -> REVERSE SOLIDUS ']' # 0x005d -> RIGHT SQUARE BRACKET '^' # 0x005e -> CIRCUMFLEX ACCENT '_' # 0x005f -> LOW LINE '`' # 0x0060 -> GRAVE ACCENT 'a' # 0x0061 -> LATIN SMALL LETTER A 'b' # 0x0062 -> LATIN SMALL LETTER B 'c' # 0x0063 -> LATIN SMALL LETTER C 'd' # 0x0064 -> LATIN SMALL LETTER D 'e' # 0x0065 -> LATIN SMALL LETTER E 'f' # 0x0066 -> LATIN SMALL LETTER F 'g' # 0x0067 -> LATIN SMALL LETTER G 'h' # 0x0068 -> LATIN SMALL LETTER H 'i' # 0x0069 -> LATIN SMALL LETTER I 'j' # 0x006a -> LATIN SMALL LETTER J 'k' # 0x006b -> LATIN SMALL LETTER K 'l' # 0x006c -> LATIN SMALL LETTER L 'm' # 0x006d -> LATIN SMALL LETTER M 'n' # 0x006e -> LATIN SMALL LETTER N 'o' # 0x006f -> LATIN SMALL LETTER O 'p' # 0x0070 -> LATIN SMALL LETTER P 'q' # 0x0071 -> LATIN SMALL LETTER Q 'r' # 0x0072 -> LATIN SMALL LETTER R 's' # 0x0073 -> LATIN SMALL LETTER S 't' # 0x0074 -> LATIN SMALL LETTER T 'u' # 0x0075 -> LATIN SMALL LETTER U 'v' # 0x0076 -> LATIN SMALL LETTER V 'w' # 0x0077 -> LATIN SMALL LETTER W 'x' # 0x0078 -> LATIN SMALL LETTER X 'y' # 0x0079 -> LATIN SMALL LETTER Y 'z' # 0x007a -> LATIN SMALL LETTER Z '{' # 0x007b -> LEFT CURLY BRACKET '|' # 0x007c -> VERTICAL LINE '}' # 0x007d -> RIGHT CURLY BRACKET '~' # 0x007e -> TILDE '\x7f' # 0x007f -> DELETE '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX '\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS '\u016f' # 0x0085 -> LATIN SMALL LETTER U WITH RING ABOVE '\u0107' # 0x0086 -> LATIN SMALL LETTER C WITH ACUTE '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA '\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE '\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS '\u0150' # 0x008a -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE '\u0151' # 0x008b -> LATIN SMALL LETTER O WITH DOUBLE ACUTE '\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX '\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE '\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS '\u0106' # 0x008f -> LATIN CAPITAL LETTER C WITH ACUTE '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE '\u0139' # 0x0091 -> LATIN CAPITAL LETTER L WITH ACUTE '\u013a' # 0x0092 -> LATIN SMALL LETTER L WITH ACUTE '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX '\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS '\u013d' # 0x0095 -> LATIN CAPITAL LETTER L WITH CARON '\u013e' # 0x0096 -> LATIN SMALL LETTER L WITH CARON '\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE '\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE '\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS '\u0164' # 0x009b -> LATIN CAPITAL LETTER T WITH CARON '\u0165' # 0x009c -> LATIN SMALL LETTER T WITH CARON '\u0141' # 0x009d -> LATIN CAPITAL LETTER L WITH STROKE '\xd7' # 0x009e -> MULTIPLICATION SIGN '\u010d' # 0x009f -> LATIN SMALL LETTER C WITH CARON '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE '\u0104' # 0x00a4 -> LATIN CAPITAL LETTER A WITH OGONEK '\u0105' # 0x00a5 -> LATIN SMALL LETTER A WITH OGONEK '\u017d' # 0x00a6 -> LATIN CAPITAL LETTER Z WITH CARON '\u017e' # 0x00a7 -> LATIN SMALL LETTER Z WITH CARON '\u0118' # 0x00a8 -> LATIN CAPITAL LETTER E WITH OGONEK '\u0119' # 0x00a9 -> LATIN SMALL LETTER E WITH OGONEK '\xac' # 0x00aa -> NOT SIGN '\u017a' # 0x00ab -> LATIN SMALL LETTER Z WITH ACUTE '\u010c' # 0x00ac -> LATIN CAPITAL LETTER C WITH CARON '\u015f' # 0x00ad -> LATIN SMALL LETTER S WITH CEDILLA '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK '\u2591' # 0x00b0 -> LIGHT SHADE '\u2592' # 0x00b1 -> MEDIUM SHADE '\u2593' # 0x00b2 -> DARK SHADE '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT '\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE '\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX '\u011a' # 0x00b7 -> LATIN CAPITAL LETTER E WITH CARON '\u015e' # 0x00b8 -> LATIN CAPITAL LETTER S WITH CEDILLA '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT '\u017b' # 0x00bd -> LATIN CAPITAL LETTER Z WITH DOT ABOVE '\u017c' # 0x00be -> LATIN SMALL LETTER Z WITH DOT ABOVE '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL '\u0102' # 0x00c6 -> LATIN CAPITAL LETTER A WITH BREVE '\u0103' # 0x00c7 -> LATIN SMALL LETTER A WITH BREVE '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL '\xa4' # 0x00cf -> CURRENCY SIGN '\u0111' # 0x00d0 -> LATIN SMALL LETTER D WITH STROKE '\u0110' # 0x00d1 -> LATIN CAPITAL LETTER D WITH STROKE '\u010e' # 0x00d2 -> LATIN CAPITAL LETTER D WITH CARON '\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS '\u010f' # 0x00d4 -> LATIN SMALL LETTER D WITH CARON '\u0147' # 0x00d5 -> LATIN CAPITAL LETTER N WITH CARON '\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE '\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX '\u011b' # 0x00d8 -> LATIN SMALL LETTER E WITH CARON '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT '\u2588' # 0x00db -> FULL BLOCK '\u2584' # 0x00dc -> LOWER HALF BLOCK '\u0162' # 0x00dd -> LATIN CAPITAL LETTER T WITH CEDILLA '\u016e' # 0x00de -> LATIN CAPITAL LETTER U WITH RING ABOVE '\u2580' # 0x00df -> UPPER HALF BLOCK '\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S '\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX '\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE '\u0144' # 0x00e4 -> LATIN SMALL LETTER N WITH ACUTE '\u0148' # 0x00e5 -> LATIN SMALL LETTER N WITH CARON '\u0160' # 0x00e6 -> LATIN CAPITAL LETTER S WITH CARON '\u0161' # 0x00e7 -> LATIN SMALL LETTER S WITH CARON '\u0154' # 0x00e8 -> LATIN CAPITAL LETTER R WITH ACUTE '\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE '\u0155' # 0x00ea -> LATIN SMALL LETTER R WITH ACUTE '\u0170' # 0x00eb -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE '\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE '\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE '\u0163' # 0x00ee -> LATIN SMALL LETTER T WITH CEDILLA '\xb4' # 0x00ef -> ACUTE ACCENT '\xad' # 0x00f0 -> SOFT HYPHEN '\u02dd' # 0x00f1 -> DOUBLE ACUTE ACCENT '\u02db' # 0x00f2 -> OGONEK '\u02c7' # 0x00f3 -> CARON '\u02d8' # 0x00f4 -> BREVE '\xa7' # 0x00f5 -> SECTION SIGN '\xf7' # 0x00f6 -> DIVISION SIGN '\xb8' # 0x00f7 -> CEDILLA '\xb0' # 0x00f8 -> DEGREE SIGN '\xa8' # 0x00f9 -> DIAERESIS '\u02d9' # 0x00fa -> DOT ABOVE '\u0171' # 0x00fb -> LATIN SMALL LETTER U WITH DOUBLE ACUTE '\u0158' # 0x00fc -> LATIN CAPITAL LETTER R WITH CARON '\u0159' # 0x00fd -> LATIN SMALL LETTER R WITH CARON '\u25a0' # 0x00fe -> BLACK SQUARE '\xa0' # 0x00ff -> NO-BREAK SPACE ) ### Encoding Map encoding_map = { 0x0000: 0x0000, # NULL 0x0001: 0x0001, # START OF HEADING 0x0002: 0x0002, # START OF TEXT 0x0003: 0x0003, # END OF TEXT 0x0004: 0x0004, # END OF TRANSMISSION 0x0005: 0x0005, # ENQUIRY 0x0006: 0x0006, # ACKNOWLEDGE 0x0007: 0x0007, # BELL 0x0008: 0x0008, # BACKSPACE 0x0009: 0x0009, # HORIZONTAL TABULATION 0x000a: 0x000a, # LINE FEED 0x000b: 0x000b, # VERTICAL TABULATION 0x000c: 0x000c, # FORM FEED 0x000d: 0x000d, # CARRIAGE RETURN 0x000e: 0x000e, # SHIFT OUT 0x000f: 0x000f, # SHIFT IN 0x0010: 0x0010, # DATA LINK ESCAPE 0x0011: 0x0011, # DEVICE CONTROL ONE 0x0012: 0x0012, # DEVICE CONTROL TWO 0x0013: 0x0013, # DEVICE CONTROL THREE 0x0014: 0x0014, # DEVICE CONTROL FOUR 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE 0x0016: 0x0016, # SYNCHRONOUS IDLE 0x0017: 0x0017, # END OF TRANSMISSION BLOCK 0x0018: 0x0018, # CANCEL 0x0019: 0x0019, # END OF MEDIUM 0x001a: 0x001a, # SUBSTITUTE 0x001b: 0x001b, # ESCAPE 0x001c: 0x001c, # FILE SEPARATOR 0x001d: 0x001d, # GROUP SEPARATOR 0x001e: 0x001e, # RECORD SEPARATOR 0x001f: 0x001f, # UNIT SEPARATOR 0x0020: 0x0020, # SPACE 0x0021: 0x0021, # EXCLAMATION MARK 0x0022: 0x0022, # QUOTATION MARK 0x0023: 0x0023, # NUMBER SIGN 0x0024: 0x0024, # DOLLAR SIGN 0x0025: 0x0025, # PERCENT SIGN 0x0026: 0x0026, # AMPERSAND 0x0027: 0x0027, # APOSTROPHE 0x0028: 0x0028, # LEFT PARENTHESIS 0x0029: 0x0029, # RIGHT PARENTHESIS 0x002a: 0x002a, # ASTERISK 0x002b: 0x002b, # PLUS SIGN 0x002c: 0x002c, # COMMA 0x002d: 0x002d, # HYPHEN-MINUS 0x002e: 0x002e, # FULL STOP 0x002f: 0x002f, # SOLIDUS 0x0030: 0x0030, # DIGIT ZERO 0x0031: 0x0031, # DIGIT ONE 0x0032: 0x0032, # DIGIT TWO 0x0033: 0x0033, # DIGIT THREE 0x0034: 0x0034, # DIGIT FOUR 0x0035: 0x0035, # DIGIT FIVE 0x0036: 0x0036, # DIGIT SIX 0x0037: 0x0037, # DIGIT SEVEN 0x0038: 0x0038, # DIGIT EIGHT 0x0039: 0x0039, # DIGIT NINE 0x003a: 0x003a, # COLON 0x003b: 0x003b, # SEMICOLON 0x003c: 0x003c, # LESS-THAN SIGN 0x003d: 0x003d, # EQUALS SIGN 0x003e: 0x003e, # GREATER-THAN SIGN 0x003f: 0x003f, # QUESTION MARK 0x0040: 0x0040, # COMMERCIAL AT 0x0041: 0x0041, # LATIN CAPITAL LETTER A 0x0042: 0x0042, # LATIN CAPITAL LETTER B 0x0043: 0x0043, # LATIN CAPITAL LETTER C 0x0044: 0x0044, # LATIN CAPITAL LETTER D 0x0045: 0x0045, # LATIN CAPITAL LETTER E 0x0046: 0x0046, # LATIN CAPITAL LETTER F 0x0047: 0x0047, # LATIN CAPITAL LETTER G 0x0048: 0x0048, # LATIN CAPITAL LETTER H 0x0049: 0x0049, # LATIN CAPITAL LETTER I 0x004a: 0x004a, # LATIN CAPITAL LETTER J 0x004b: 0x004b, # LATIN CAPITAL LETTER K 0x004c: 0x004c, # LATIN CAPITAL LETTER L 0x004d: 0x004d, # LATIN CAPITAL LETTER M 0x004e: 0x004e, # LATIN CAPITAL LETTER N 0x004f: 0x004f, # LATIN CAPITAL LETTER O 0x0050: 0x0050, # LATIN CAPITAL LETTER P 0x0051: 0x0051, # LATIN CAPITAL LETTER Q 0x0052: 0x0052, # LATIN CAPITAL LETTER R 0x0053: 0x0053, # LATIN CAPITAL LETTER S 0x0054: 0x0054, # LATIN CAPITAL LETTER T 0x0055: 0x0055, # LATIN CAPITAL LETTER U 0x0056: 0x0056, # LATIN CAPITAL LETTER V 0x0057: 0x0057, # LATIN CAPITAL LETTER W 0x0058: 0x0058, # LATIN CAPITAL LETTER X 0x0059: 0x0059, # LATIN CAPITAL LETTER Y 0x005a: 0x005a, # LATIN CAPITAL LETTER Z 0x005b: 0x005b, # LEFT SQUARE BRACKET 0x005c: 0x005c, # REVERSE SOLIDUS 0x005d: 0x005d, # RIGHT SQUARE BRACKET 0x005e: 0x005e, # CIRCUMFLEX ACCENT 0x005f: 0x005f, # LOW LINE 0x0060: 0x0060, # GRAVE ACCENT 0x0061: 0x0061, # LATIN SMALL LETTER A 0x0062: 0x0062, # LATIN SMALL LETTER B 0x0063: 0x0063, # LATIN SMALL LETTER C 0x0064: 0x0064, # LATIN SMALL LETTER D 0x0065: 0x0065, # LATIN SMALL LETTER E 0x0066: 0x0066, # LATIN SMALL LETTER F 0x0067: 0x0067, # LATIN SMALL LETTER G 0x0068: 0x0068, # LATIN SMALL LETTER H 0x0069: 0x0069, # LATIN SMALL LETTER I 0x006a: 0x006a, # LATIN SMALL LETTER J 0x006b: 0x006b, # LATIN SMALL LETTER K 0x006c: 0x006c, # LATIN SMALL LETTER L 0x006d: 0x006d, # LATIN SMALL LETTER M 0x006e: 0x006e, # LATIN SMALL LETTER N 0x006f: 0x006f, # LATIN SMALL LETTER O 0x0070: 0x0070, # LATIN SMALL LETTER P 0x0071: 0x0071, # LATIN SMALL LETTER Q 0x0072: 0x0072, # LATIN SMALL LETTER R 0x0073: 0x0073, # LATIN SMALL LETTER S 0x0074: 0x0074, # LATIN SMALL LETTER T 0x0075: 0x0075, # LATIN SMALL LETTER U 0x0076: 0x0076, # LATIN SMALL LETTER V 0x0077: 0x0077, # LATIN SMALL LETTER W 0x0078: 0x0078, # LATIN SMALL LETTER X 0x0079: 0x0079, # LATIN SMALL LETTER Y 0x007a: 0x007a, # LATIN SMALL LETTER Z 0x007b: 0x007b, # LEFT CURLY BRACKET 0x007c: 0x007c, # VERTICAL LINE 0x007d: 0x007d, # RIGHT CURLY BRACKET 0x007e: 0x007e, # TILDE 0x007f: 0x007f, # DELETE 0x00a0: 0x00ff, # NO-BREAK SPACE 0x00a4: 0x00cf, # CURRENCY SIGN 0x00a7: 0x00f5, # SECTION SIGN 0x00a8: 0x00f9, # DIAERESIS 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00ac: 0x00aa, # NOT SIGN 0x00ad: 0x00f0, # SOFT HYPHEN 0x00b0: 0x00f8, # DEGREE SIGN 0x00b4: 0x00ef, # ACUTE ACCENT 0x00b8: 0x00f7, # CEDILLA 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK 0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE 0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE 0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS 0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE 0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX 0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE 0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS 0x00d7: 0x009e, # MULTIPLICATION SIGN 0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS 0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS 0x00f7: 0x00f6, # DIVISION SIGN 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS 0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE 0x0102: 0x00c6, # LATIN CAPITAL LETTER A WITH BREVE 0x0103: 0x00c7, # LATIN SMALL LETTER A WITH BREVE 0x0104: 0x00a4, # LATIN CAPITAL LETTER A WITH OGONEK 0x0105: 0x00a5, # LATIN SMALL LETTER A WITH OGONEK 0x0106: 0x008f, # LATIN CAPITAL LETTER C WITH ACUTE 0x0107: 0x0086, # LATIN SMALL LETTER C WITH ACUTE 0x010c: 0x00ac, # LATIN CAPITAL LETTER C WITH CARON 0x010d: 0x009f, # LATIN SMALL LETTER C WITH CARON 0x010e: 0x00d2, # LATIN CAPITAL LETTER D WITH CARON 0x010f: 0x00d4, # LATIN SMALL LETTER D WITH CARON 0x0110: 0x00d1, # LATIN CAPITAL LETTER D WITH STROKE 0x0111: 0x00d0, # LATIN SMALL LETTER D WITH STROKE 0x0118: 0x00a8, # LATIN CAPITAL LETTER E WITH OGONEK 0x0119: 0x00a9, # LATIN SMALL LETTER E WITH OGONEK 0x011a: 0x00b7, # LATIN CAPITAL LETTER E WITH CARON 0x011b: 0x00d8, # LATIN SMALL LETTER E WITH CARON 0x0139: 0x0091, # LATIN CAPITAL LETTER L WITH ACUTE 0x013a: 0x0092, # LATIN SMALL LETTER L WITH ACUTE 0x013d: 0x0095, # LATIN CAPITAL LETTER L WITH CARON 0x013e: 0x0096, # LATIN SMALL LETTER L WITH CARON 0x0141: 0x009d, # LATIN CAPITAL LETTER L WITH STROKE 0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE 0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE 0x0144: 0x00e4, # LATIN SMALL LETTER N WITH ACUTE 0x0147: 0x00d5, # LATIN CAPITAL LETTER N WITH CARON 0x0148: 0x00e5, # LATIN SMALL LETTER N WITH CARON 0x0150: 0x008a, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE 0x0151: 0x008b, # LATIN SMALL LETTER O WITH DOUBLE ACUTE 0x0154: 0x00e8, # LATIN CAPITAL LETTER R WITH ACUTE 0x0155: 0x00ea, # LATIN SMALL LETTER R WITH ACUTE 0x0158: 0x00fc, # LATIN CAPITAL LETTER R WITH CARON 0x0159: 0x00fd, # LATIN SMALL LETTER R WITH CARON 0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE 0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE 0x015e: 0x00b8, # LATIN CAPITAL LETTER S WITH CEDILLA 0x015f: 0x00ad, # LATIN SMALL LETTER S WITH CEDILLA 0x0160: 0x00e6, # LATIN CAPITAL LETTER S WITH CARON 0x0161: 0x00e7, # LATIN SMALL LETTER S WITH CARON 0x0162: 0x00dd, # LATIN CAPITAL LETTER T WITH CEDILLA 0x0163: 0x00ee, # LATIN SMALL LETTER T WITH CEDILLA 0x0164: 0x009b, # LATIN CAPITAL LETTER T WITH CARON 0x0165: 0x009c, # LATIN SMALL LETTER T WITH CARON 0x016e: 0x00de, # LATIN CAPITAL LETTER U WITH RING ABOVE 0x016f: 0x0085, # LATIN SMALL LETTER U WITH RING ABOVE 0x0170: 0x00eb, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE 0x0171: 0x00fb, # LATIN SMALL LETTER U WITH DOUBLE ACUTE 0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE 0x017a: 0x00ab, # LATIN SMALL LETTER Z WITH ACUTE 0x017b: 0x00bd, # LATIN CAPITAL LETTER Z WITH DOT ABOVE 0x017c: 0x00be, # LATIN SMALL LETTER Z WITH DOT ABOVE 0x017d: 0x00a6, # LATIN CAPITAL LETTER Z WITH CARON 0x017e: 0x00a7, # LATIN SMALL LETTER Z WITH CARON 0x02c7: 0x00f3, # CARON 0x02d8: 0x00f4, # BREVE 0x02d9: 0x00fa, # DOT ABOVE 0x02db: 0x00f2, # OGONEK 0x02dd: 0x00f1, # DOUBLE ACUTE ACCENT 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL 0x2580: 0x00df, # UPPER HALF BLOCK 0x2584: 0x00dc, # LOWER HALF BLOCK 0x2588: 0x00db, # FULL BLOCK 0x2591: 0x00b0, # LIGHT SHADE 0x2592: 0x00b1, # MEDIUM SHADE 0x2593: 0x00b2, # DARK SHADE 0x25a0: 0x00fe, # BLACK SQUARE }
bsd-3-clause
1541ebd24e4d57584199ee6d452b0f67
49.146132
97
0.593529
3.047896
false
false
false
false
dimagi/commcare-hq
corehq/messaging/whatsapputil.py
1
2142
import re from collections import namedtuple from corehq.apps.domain.models import Domain from dimagi.utils.web import get_url_base WA_TEMPLATE_STRING = "cc_wa_template" class WhatsAppTemplateStringException(Exception): pass def is_whatsapp_template_message(message_text): return WA_TEMPLATE_STRING in message_text.lower() def is_multimedia_message(msg): return 'caption_image' in msg.custom_metadata\ or 'caption_audio' in msg.custom_metadata\ or 'caption_video' in msg.custom_metadata def get_multimedia_urls(msg): image_url = audio_url = video_url = None domain_obj = Domain.get_by_name(msg.domain, strict=True) for app in domain_obj.full_applications(): for path, media in app.get_media_objects(remove_unused=True): if 'caption_image' in msg.custom_metadata and msg.custom_metadata['caption_image'] == path: image_url = get_url_base() + media.url() + 'image.png' if 'caption_audio' in msg.custom_metadata and msg.custom_metadata['caption_audio'] == path: audio_url = get_url_base() + media.url() + 'audio.mp3' if 'caption_video' in msg.custom_metadata and msg.custom_metadata['caption_video'] == path: video_url = get_url_base() + media.url() + 'video.mp4' return image_url, audio_url, video_url def extract_error_message_from_template_string(message_text): """If message is labeled as "invalid_survey_response" then error message should be extracted from template string """ return message_text.split(WA_TEMPLATE_STRING)[0] def get_template_hsm_parts(message_text): """The magic string users enter looks like: cc_wa_template:template_name:lang_code:{var1}{var2}{var3} """ HsmParts = namedtuple("hsm_parts", "template_name lang_code params") parts = message_text.split(":", maxsplit=3) try: params = re.findall("{(.+?)}+", parts[3]) except IndexError: params = [] try: return HsmParts(template_name=parts[1], lang_code=parts[2], params=params) except IndexError: raise WhatsAppTemplateStringException
bsd-3-clause
5dab5e72e33397712b4c90551c5df51e
35.931034
105
0.673203
3.534653
false
false
false
false
dimagi/commcare-hq
corehq/apps/reports/filters/urls.py
1
1090
from django.conf.urls import re_path as url from .api import ( CaseListFilterOptions, DeviceLogIds, DeviceLogUsers, EmwfOptionsView, MobileWorkersOptionsView, ReassignCaseOptions, EnterpriseUserOptions, ) from .location import LocationGroupFilterOptions urlpatterns = [ url(r'^emwf_options_all_users/$', EmwfOptionsView.as_view(), name='emwf_options_all_users'), url(r'^users_options/$', MobileWorkersOptionsView.as_view(), name=MobileWorkersOptionsView.urlname), url(r'^enterprise_users_options/$', EnterpriseUserOptions.as_view(), name="enterprise_user_options"), url(r'^case_list_options/$', CaseListFilterOptions.as_view(), name='case_list_options'), url(r'^reassign_case_options/$', ReassignCaseOptions.as_view(), name='reassign_case_options'), url(r'^grouplocationfilter_options/$', LocationGroupFilterOptions.as_view(), name='grouplocationfilter_options'), url(r'^device_log_users/$', DeviceLogUsers.as_view(), name='device_log_users'), url(r'^device_log_ids/$', DeviceLogIds.as_view(), name='device_log_ids'), ]
bsd-3-clause
7b45b08d0ba036e155bf9aed525c399f
44.416667
105
0.723853
3.516129
false
false
true
false
onepercentclub/bluebottle
bluebottle/statistics/migrations/0009_auto_20200717_1201.py
1
1315
# -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2020-07-17 10:01 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('statistics', '0008_auto_20200717_1136'), ] operations = [ migrations.AlterField( model_name='databasestatistic', name='query', field=models.CharField(choices=[(b'manual', 'Manual input'), (b'people_involved', 'People involved'), (b'participants', 'Participants'), (b'activities_succeeded', 'Activities succeeded'), (b'assignments_succeeded', 'Tasks succeeded'), (b'events_succeeded', 'Events succeeded'), (b'fundings_succeeded', 'Funding activities succeeded'), (b'assignment_members', 'Task applicants'), (b'event_members', 'Event participants'), (b'assignments_online', 'Tasks online'), (b'events_online', 'Events online'), (b'fundings_online', 'Funding activities online'), (b'donations', 'Donations'), (b'donated_total', 'Donated total'), (b'pledged_total', 'Pledged total'), (b'amount_matched', 'Amount matched'), (b'activities_online', 'Activities Online'), (b'votes_cast', 'Votes casts'), (b'time_spent', 'Time spent'), (b'members', 'Number of members')], db_index=True, max_length=30, verbose_name='query'), ), ]
bsd-3-clause
30e03608a2f6bb83c4df35366ddd6894
64.75
900
0.670722
3.563686
false
false
false
false
dimagi/commcare-hq
fabfile.py
1
2336
import os import time print() print("Hey things have changed.") print() time.sleep(1) print("We now do deploys from the commcare-cloud directory or the control machine.") print() time.sleep(2) if 'y' == input('Do you want instructions for how to migrate? [y/N]'): print() for dir_name in ['commcarehq-ansible', 'commcare-cloud']: ansible_repo = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', dir_name)) ansible_repo_exists = os.path.isdir(os.path.join(ansible_repo, '.git')) if ansible_repo_exists: break else: if 'y' != input('Do you have a local commcare-cloud ' '(formerly called commcarehq-ansible) repository already? [y/N]'): print(""" Set up commcare-cloud ========================= Put the commcarehq-ansible repo alongside this one like so: cd .. git clone https://github.com/dimagi/commcare-cloud.git cd commcare-cloud Now make a virtualenv for ansible: mkvirtualenv ansible """) else: print(""" Link commcare-cloud repo ============================ Symlink your commcare-cloud (or commcarehq-ansible) repo so that it lives alongside this one: ln -s <path/to/commcare-cloud> {ansible_repo} When you have done that, run fab to see more instructions. """.format(ansible_repo=ansible_repo)) exit(1) if ansible_repo_exists: print("✓ You already have the commcare-cloud repo alonside this one: {}" .format(ansible_repo)) print() time.sleep(1) print(""" Make your ansible environment fab-ready ======================================= Enter the commcare-cloud repo and make sure you have the latest cd {ansible_repo} git pull enter the env workon ansible do the necessary pip installs pip install -r fab/requirements.txt and make sure the necessary files are in the right place ./control/check_install.sh Run a fab command! ================== fab production deploy Remember that in steady state, you will need to workon the ansible virtualenv and enter the commcare-cloud directory before you will be able to run a fab command. """.format(ansible_repo=ansible_repo)) exit(1)
bsd-3-clause
ce9ba650e7725d211df75846d7566f8d
25.827586
97
0.607969
4.031088
false
false
false
false
dimagi/commcare-hq
corehq/ex-submodules/pillowtop/reindexer/change_providers/form.py
1
1748
from corehq.form_processor.change_publishers import change_meta_from_sql_form from corehq.form_processor.models import XFormInstance from corehq.util.pagination import paginate_function, ArgsListProvider from pillowtop.feed.interface import Change from pillowtop.reindexer.change_providers.interface import ChangeProvider class SqlDomainXFormChangeProvider(ChangeProvider): def __init__(self, domains, chunk_size=1000): self.domains = list(domains) self.chunk_size = chunk_size def iter_all_changes(self, start_from=None): if not self.domains: return for form_id_chunk in self._iter_form_id_chunks(): for form in XFormInstance.objects.get_forms(form_id_chunk): yield Change( id=form.form_id, sequence_id=None, document=form.to_json(), deleted=False, metadata=change_meta_from_sql_form(form), document_store=None, ) def _iter_form_id_chunks(self): kwargs = [] for domain in self.domains: for doc_type in XFormInstance.DOC_TYPE_TO_STATE: kwargs.append({'domain': domain, 'doc_type': doc_type}) args_provider = ArgsListProvider(kwargs) data_function = XFormInstance.objects.get_form_ids_in_domain chunk = [] for form_id in paginate_function(data_function, args_provider): chunk.append(form_id) if len(chunk) >= self.chunk_size: yield chunk chunk = [] if chunk: yield chunk def get_domain_form_change_provider(domains): return SqlDomainXFormChangeProvider(domains)
bsd-3-clause
5e0c2b538ffa2d2d81d69fe9fa326169
33.96
77
0.616133
4.055684
false
false
false
false
dimagi/commcare-hq
corehq/apps/userreports/reports/sum_when_templates.py
1
7827
import re from dimagi.ext.jsonobject import ( IntegerProperty, JsonObject, ListProperty, StringProperty, ) from corehq.apps.userreports.specs import TypeProperty class SumWhenTemplateSpec(JsonObject): type = StringProperty(required=True) expression = StringProperty(required=True) binds = ListProperty() then = IntegerProperty() def bind_count(self): return len(re.sub(r'[^?]', '', self.expression)) ### Templates for testing class YearRangeTemplateSpec(SumWhenTemplateSpec): type = TypeProperty('year_range') expression = "year >= ? and year < ?" class UnderXMonthsTemplateSpec(SumWhenTemplateSpec): type = TypeProperty("under_x_months") expression = "age_at_registration < ?" # Templates for Use class AdultFemaleMigrantDeathSpec(SumWhenTemplateSpec): type = TypeProperty("adult_female_migrant_death") expression = "sex = 'F' AND resident IS DISTINCT FROM 1 AND age_at_death_yrs >= 11" class AdultFemaleResidentDeathSpec(SumWhenTemplateSpec): type = TypeProperty("adult_female_resident_death") expression = "sex = 'F' AND resident = 1 AND age_at_death_yrs >= 11" class AgeAtDeathRangeMigrantSpec(SumWhenTemplateSpec): type = TypeProperty("age_at_death_range_migrant") expression = "sex = ? AND resident IS DISTINCT FROM 1 AND date_death - dob BETWEEN ? AND ?" class AgeAtDeathRangeResidentSpec(SumWhenTemplateSpec): type = TypeProperty("age_at_death_range_resident") expression = "sex = ? AND resident = 1 AND date_death - dob BETWEEN ? AND ?" class CCSPhaseNullTemplateSpec(SumWhenTemplateSpec): type = TypeProperty("ccs_phase_null") expression = "ccs_phase IS NULL" class CCSPhaseTemplateSpec(SumWhenTemplateSpec): type = TypeProperty("ccs_phase") expression = "ccs_phase = ?" class ComplementaryFeedingTemplateSpec(SumWhenTemplateSpec): type = TypeProperty("complementary_feeding") expression = "is_cf = ?" class ClosedOnNullTemplateSpec(SumWhenTemplateSpec): type = TypeProperty("closed_on_null") expression = "closed_on IS NULL" class FemaleAgeAtDeathSpec(SumWhenTemplateSpec): type = TypeProperty("female_age_at_death") expression = "female_death_type IS NOT NULL AND female_death_type != '' AND age_at_death_yrs >= ?" class FemaleDeathTypeMigrantSpec(SumWhenTemplateSpec): type = TypeProperty("female_death_type_migrant") expression = "female_death_type = ? AND resident IS DISTINCT FROM 1" class FemaleDeathTypeResidentSpec(SumWhenTemplateSpec): type = TypeProperty("female_death_type_resident") expression = "female_death_type = ? AND resident = 1" class OpenDisabilityTypeSpec(SumWhenTemplateSpec): type = TypeProperty("open_disability_type") expression = "closed_on IS NULL AND disability_type ~ ?" class OpenFemaleSpec(SumWhenTemplateSpec): type = TypeProperty("open_female") expression = "closed_on IS NULL AND sex = 'F'" class OpenFemaleDisabledSpec(SumWhenTemplateSpec): type = TypeProperty("open_female_disabled") expression = "closed_on IS NULL AND sex = 'F' and disabled = 1" class OpenFemaleHHCasteSpec(SumWhenTemplateSpec): type = TypeProperty("open_female_hh_caste") expression = "closed_on IS NULL AND sex = 'F' and hh_caste = ?" class OpenFemaleHHCasteNotSpec(SumWhenTemplateSpec): type = TypeProperty("open_female_hh_caste_not") expression = "closed_on IS NULL AND sex = 'F' and hh_caste NOT IN (?, ?)" class OpenFemaleHHMinoritySpec(SumWhenTemplateSpec): type = TypeProperty("open_female_hh_minority") expression = "closed_on IS NULL AND sex = 'F' and hh_minority = 1" class OpenFemaleMigrantSpec(SumWhenTemplateSpec): type = TypeProperty("open_female_migrant") expression = "closed_on IS NULL AND sex = 'F' AND resident != 1" class OpenFemaleMigrantDistinctFromSpec(SumWhenTemplateSpec): type = TypeProperty("open_female_migrant_distinct_from") expression = "closed_on IS NULL AND sex = 'F' AND resident IS DISTINCT FROM 1" class OpenFemaleResidentSpec(SumWhenTemplateSpec): type = TypeProperty("open_female_resident") expression = "closed_on IS NULL AND sex = 'F' AND resident = 1" class OpenMaleDisabledSpec(SumWhenTemplateSpec): type = TypeProperty("open_male_disabled") expression = "closed_on IS NULL AND sex IN ('M', 'O') and disabled = 1" class OpenMaleHHCasteSpec(SumWhenTemplateSpec): type = TypeProperty("open_male_hh_caste") expression = "closed_on IS NULL AND sex IN ('M', 'O') and hh_caste = ?" class OpenMaleHHCasteNotSpec(SumWhenTemplateSpec): type = TypeProperty("open_male_hh_caste_not") expression = "closed_on IS NULL AND sex in ('M', 'O') and hh_caste NOT IN (?, ?)" class OpenMaleHHMinoritySpec(SumWhenTemplateSpec): type = TypeProperty("open_male_hh_minority") expression = "closed_on IS NULL AND sex in ('M', 'O') and hh_minority = 1" class OpenMaleMigrantSpec(SumWhenTemplateSpec): type = TypeProperty("open_male_migrant") expression = "closed_on IS NULL AND sex IN ('M', 'O') AND resident != 1" class OpenMaleMigrantDistinctFromSpec(SumWhenTemplateSpec): type = TypeProperty("open_male_migrant_distinct_from") expression = "closed_on IS NULL AND sex IN ('M', 'O') AND resident IS DISTINCT FROM 1" class OpenMaleResidentSpec(SumWhenTemplateSpec): type = TypeProperty("open_male_resident") expression = "closed_on IS NULL AND sex IN ('M', 'O') AND resident = 1" class OpenPregnantMigrantSpec(SumWhenTemplateSpec): type = TypeProperty("open_pregnant_migrant") expression = "closed_on IS NULL AND is_pregnant = 1 and sex = 'F' AND resident != 1" class OpenPregnantResidentSpec(SumWhenTemplateSpec): type = TypeProperty("open_pregnant_resident") expression = "closed_on IS NULL AND is_pregnant = 1 and sex = 'F' AND resident = 1" class ReachedReferralHealthProblemSpec(SumWhenTemplateSpec): type = TypeProperty("reached_referral_health_problem") expression = "referral_reached_facility = ? AND referral_health_problem ~ ?" class ReachedReferralHealthProblem2ProblemsSpec(SumWhenTemplateSpec): type = TypeProperty("reached_referral_health_problem_2_problems") expression = "referral_reached_facility = ? AND (referral_health_problem ~ ? OR referral_health_problem ~ ?)" class ReachedReferralHealthProblem3ProblemsSpec(SumWhenTemplateSpec): type = TypeProperty("reached_referral_health_problem_3_problems") expression = "referral_reached_facility = ? AND (referral_health_problem ~ ? OR referral_health_problem ~ ? OR referral_health_problem ~ ?)" class ReachedReferralHealthProblem5ProblemsSpec(SumWhenTemplateSpec): type = TypeProperty("reached_referral_health_problem_5_problems") expression = "referral_reached_facility = ? AND (referral_health_problem ~ ? OR referral_health_problem ~ ? OR referral_health_problem ~ ? OR referral_health_problem ~ ? OR referral_health_problem ~ ?)" class ReferralHealthProblemSpec(SumWhenTemplateSpec): type = TypeProperty("referral_health_problem") expression = "referral_health_problem ~ ?" class ReferralHealthProblem2ProblemsSpec(SumWhenTemplateSpec): type = TypeProperty("referral_health_problem_2_problems") expression = "referral_health_problem ~ ? OR referral_health_problem ~ ?" class ReferralHealthProblem3ProblemsSpec(SumWhenTemplateSpec): type = TypeProperty("referral_health_problem_3_problems") expression = "referral_health_problem ~ ? OR referral_health_problem ~ ? OR referral_health_problem ~ ?" class ReferralHealthProblem5ProblemsSpec(SumWhenTemplateSpec): type = TypeProperty("referral_health_problem_5_problems") expression = "referral_health_problem ~ ? OR referral_health_problem ~ ? OR referral_health_problem ~ ? OR referral_health_problem ~ ? OR referral_health_problem ~ ?"
bsd-3-clause
a2e4f353f02d19ca0287b5d2ea32ecfb
34.739726
206
0.734125
3.334896
false
false
false
false
dimagi/commcare-hq
corehq/apps/export/det/schema_generator.py
1
8151
from django.utils.translation import gettext_lazy as _ from corehq.apps.data_dictionary.models import CaseProperty from corehq.apps.export.det.base import DETRow, DETTable, DETConfig from corehq.apps.export.det.exceptions import DETConfigError from corehq.apps.export.models import FormExportInstance, CaseExportInstance, CaseIndexExportColumn from corehq.apps.userreports import datatypes PROPERTIES_PREFIX = 'properties.' ID_FIELD = 'id' FORM_ID_SOURCE = 'id' CASE_ID_SOURCE = 'case_id' # maps Case fields to the API field names used in CommCareCaseResource CASE_API_PATH_MAP = { 'closed': 'closed', 'closed_on': 'date_closed', 'date_modified': 'date_modified', 'external_id': 'properties.external_id', 'opened_on': 'properties.date_opened', 'owner_id': 'properties.owner_id', '_id': 'id', 'name': 'properties.case_name', 'opened_by': 'opened_by', 'server_modified_on': 'server_date_modified', 'server_opened_on': 'server_date_opened', 'type': 'properties.case_type', 'user_id': 'user_id', } FORM_API_PATH_MAP = { 'xmlns': 'form.@xmlns', } MAP_VIA_STR2DATE = 'str2date' MAP_VIA_STR2NUM = 'str2num' class DefaultDETSchemaHelper(object): """ Helper to do datatype transformations, etc. during schema generation """ def get_path(self, input_column): return self.transform_path(input_column.item.readable_path) @staticmethod def transform_path(input_path): return input_path @staticmethod def get_map_via(export_item): return { datatypes.DATA_TYPE_DATETIME: MAP_VIA_STR2DATE, datatypes.DATA_TYPE_DATE: MAP_VIA_STR2DATE, datatypes.DATA_TYPE_INTEGER: MAP_VIA_STR2NUM, datatypes.DATA_TYPE_DECIMAL: MAP_VIA_STR2NUM, }.get(export_item.datatype, '') class CaseDETSchemaHelper(DefaultDETSchemaHelper): """ Schema helper for cases """ def __init__(self, dd_property_types): self.dd_property_types = dd_property_types def get_path(self, input_column): if isinstance(input_column, CaseIndexExportColumn): # this is an obscure but correct reference to the index reference ID # typically "parent", occasionally "host", rarely miscellaneous other things... # https://github.com/dimagi/commcare-hq/pull/29530/files#r613936070 index_ref_id = input_column.item.label.split('.')[0] return f'indices.{index_ref_id}.case_id' input_path = input_column.item.readable_path return CASE_API_PATH_MAP.get(input_path, f'{PROPERTIES_PREFIX}{input_path}') def get_map_via(self, export_item): explicit_type = super().get_map_via(export_item) if not explicit_type and export_item.readable_path in self.dd_property_types: return _dd_type_to_det_type(self.dd_property_types[export_item.readable_path]) return explicit_type class FormDETSchemaHelper(DefaultDETSchemaHelper): """ Schema helper for forms """ @staticmethod def transform_path(input_path): # either return hard-coded lookup or the path with no modifications return FORM_API_PATH_MAP.get(input_path, input_path) class RepeatDETSchemaHelper(DefaultDETSchemaHelper): """ Schema helper for form repeats """ def __init__(self, base_path): self.base_path = base_path def transform_path(self, input_path): # for repeats strip the base path from the input path return input_path.replace(f'{self.base_path}.', '') def generate_from_export_instance(export_instance, output_file): if isinstance(export_instance, CaseExportInstance): return generate_from_case_export_instance(export_instance, output_file) elif isinstance(export_instance, FormExportInstance): return generate_from_form_export_instance(export_instance, output_file) else: raise DETConfigError(_('Export instance type {name} not supported!').format( name=type(export_instance).__name__ )) def generate_from_case_export_instance(export_instance, output_file): assert isinstance(export_instance, CaseExportInstance) if not export_instance.selected_tables: raise DETConfigError(_('No Tables found in Export {name}').format(name=export_instance.name)) main_input_table = export_instance.selected_tables[0] main_output_table = DETTable( name=main_input_table.label, source='case', filter_name='type', filter_value=export_instance.case_type, rows=[], ) output = DETConfig(name=export_instance.name, tables=[main_output_table]) dd_property_types_by_name = _get_dd_property_types(export_instance.domain, export_instance.case_type) helper = CaseDETSchemaHelper(dd_property_types=dd_property_types_by_name) main_output_table.rows.append(DETRow(source_field='domain', field='domain')) _add_rows_for_table(main_input_table, main_output_table, helper=helper) _add_id_row_if_necessary(main_output_table, CASE_ID_SOURCE) # todo: add rows for other tables output.export_to_file(output_file) def _get_dd_property_types(domain, case_type): """ Get a dictionary of property types by name (from the data dictionary) for a given domain, case_type. e.g. { "name": "plain", "location": "gps", "event_date": "date", } """ return dict( CaseProperty.objects.filter( case_type__domain=domain, case_type__name=case_type, ).values_list('name', 'data_type') ) def _dd_type_to_det_type(data_dictionary_datatype): return { 'date': MAP_VIA_STR2DATE, 'number': MAP_VIA_STR2NUM, }.get(data_dictionary_datatype, '') def generate_from_form_export_instance(export_instance, output_file): assert isinstance(export_instance, FormExportInstance) if not export_instance.selected_tables: raise DETConfigError(_('No Tables found in Export {name}').format(name=export_instance.name)) output = DETConfig(name=export_instance.name) for input_table in export_instance.selected_tables: if _is_main_form_table(input_table): output_table = DETTable( name=input_table.label, source='form', filter_name='xmlns', filter_value=export_instance.xmlns, rows=[], ) output_table.rows.append(DETRow(source_field='domain', field='domain')) _add_rows_for_table(input_table, output_table, helper=FormDETSchemaHelper()) _add_id_row_if_necessary(output_table, FORM_ID_SOURCE) else: output_table = DETTable( name=input_table.label, source=f'form.{input_table.readable_path}[*]', filter_name='xmlns', filter_value=export_instance.xmlns, rows=[], ) helper = RepeatDETSchemaHelper(base_path=input_table.readable_path) _add_rows_for_table(input_table, output_table, helper=helper) output.tables.append(output_table) output.export_to_file(output_file) def _is_main_form_table(table_configuration): return table_configuration.readable_path == '' def _add_id_row_if_necessary(output_table, source_value): # DET requires an "id" field to exist to use SQL export. # Insert one at the beginning of the table if it doesn't exist. if not any(row.field == ID_FIELD for row in output_table.rows): output_table.rows.insert(0, DETRow( source_field=source_value, field=ID_FIELD, )) def _add_rows_for_table(input_table, output_table, helper=None): if helper is None: helper = DefaultDETSchemaHelper() for column in input_table.selected_columns: det_row = _get_det_row_for_export_column(column, helper) output_table.rows.append(det_row) def _get_det_row_for_export_column(column, helper): return DETRow( source_field=helper.get_path(column), field=column.label, map_via=helper.get_map_via(column.item) )
bsd-3-clause
d358a5c63d20eba4351e5f6663d8c82c
34.75
105
0.658815
3.559389
false
false
false
false
dimagi/commcare-hq
corehq/apps/export/filters.py
1
5503
from corehq.apps.es import filters as esfilters from corehq.apps.es.cases import ( closed_range, is_closed, modified_range, opened_by, opened_range, owner, owner_type, user, server_modified_range, ) from corehq.apps.es.forms import app, submitted, user_id, user_type from corehq.apps.es.sms import received as sms_received from corehq.apps.export.esaccessors import get_groups_user_ids from corehq.pillows.utils import USER_TYPES def _assert_user_types(user_types): if isinstance(user_types, str): user_types = [user_types] for type_ in user_types: assert type_ in USER_TYPES, "Expected user type to be in {}, got {}".format(USER_TYPES, type_) class ExportFilter(object): """ Abstract base class for an export filter on a single case or form property """ def to_es_filter(self): """ Return an ES filter representing this filter """ raise NotImplementedError class OR(ExportFilter): def __init__(self, *args): self.operand_filters = args def to_es_filter(self): return esfilters.OR(*[f.to_es_filter() for f in self.operand_filters]) class AND(ExportFilter): def __init__(self, *args): self.operand_filters = args def to_es_filter(self): return esfilters.AND(*[f.to_es_filter() for f in self.operand_filters]) class NOT(ExportFilter): def __init__(self, _filter): self.operand_filter = _filter def to_es_filter(self): return esfilters.NOT(self.operand_filter.to_es_filter()) class TermFilter(ExportFilter): def __init__(self, term, value): self.term = term self.value = value def to_es_filter(self): return esfilters.term(self.term, self.value) class AppFilter(ExportFilter): """ Filter on app_id """ def __init__(self, app_id): self.app_id = app_id def to_es_filter(self): return app(self.app_id) class RangeExportFilter(ExportFilter): def __init__(self, gt=None, gte=None, lt=None, lte=None): self.gt = gt self.gte = gte self.lt = lt self.lte = lte class OwnerFilter(ExportFilter): """ Filter on owner_id """ def __init__(self, owner_id): self.owner_id = owner_id def to_es_filter(self): return owner(self.owner_id) class OwnerTypeFilter(ExportFilter): def __init__(self, owner_type): _assert_user_types(owner_type) self.owner_types = owner_type def to_es_filter(self): return owner_type(self.owner_types) class IsClosedFilter(ExportFilter): """ Filter on case closed property """ def __init__(self, is_closed): self.is_closed = is_closed def to_es_filter(self): return is_closed(self.is_closed) class NameFilter(TermFilter): def __init__(self, case_name): super(NameFilter, self).__init__('name', case_name) self.case_name = case_name class OpenedOnRangeFilter(RangeExportFilter): def to_es_filter(self): return opened_range(self.gt, self.gte, self.lt, self.lte) class OpenedByFilter(ExportFilter): def __init__(self, opened_by): self.opened_by = opened_by def to_es_filter(self): return opened_by(self.opened_by) class ModifiedOnRangeFilter(RangeExportFilter): def to_es_filter(self): return modified_range(self.gt, self.gte, self.lt, self.lte) class ServerModifiedOnRangeFilter(RangeExportFilter): def to_es_filter(self): return server_modified_range(self.gt, self.gte, self.lt, self.lte) class LastModifiedByFilter(ExportFilter): def __init__(self, last_modified_by): self.last_modified_by = last_modified_by def to_es_filter(self): return user(self.last_modified_by) class ClosedOnRangeFilter(RangeExportFilter): def to_es_filter(self): return closed_range(self.gt, self.gte, self.lt, self.lte) class ClosedByFilter(TermFilter): def __init__(self, closed_by): super(ClosedByFilter, self).__init__('closed_by', closed_by) class GroupFilter(ExportFilter): # Abstract base class base_filter = None def __init__(self, group_ids): if not isinstance(group_ids, list): group_ids = [group_ids] self.group_ids = group_ids def to_es_filter(self): user_ids = get_groups_user_ids(self.group_ids) return self.base_filter(user_ids).to_es_filter() class GroupOwnerFilter(GroupFilter): base_filter = OwnerFilter class GroupLastModifiedByFilter(GroupFilter): base_filter = LastModifiedByFilter class GroupClosedByFilter(GroupFilter): base_filter = ClosedByFilter class ReceivedOnRangeFilter(RangeExportFilter): def to_es_filter(self): return submitted(self.gt, self.gte, self.lt, self.lte) class FormSubmittedByFilter(ExportFilter): def __init__(self, submitted_by): self.submitted_by = submitted_by def to_es_filter(self): return user_id(self.submitted_by) class UserTypeFilter(ExportFilter): def __init__(self, user_types): _assert_user_types(user_types) self.user_types = user_types def to_es_filter(self): return user_type(self.user_types) class GroupFormSubmittedByFilter(GroupFilter): base_filter = FormSubmittedByFilter class SmsReceivedRangeFilter(RangeExportFilter): def to_es_filter(self): return sms_received(self.gt, self.gte, self.lt, self.lte)
bsd-3-clause
951c6d2cb71dee5af4eede8fe193c6aa
21.834025
102
0.654189
3.456658
false
false
false
false
onepercentclub/bluebottle
bluebottle/members/tests/test_unit.py
1
6456
import datetime from datetime import timedelta from unittest import mock from django.contrib.auth.password_validation import get_default_password_validators from django.utils.timezone import now from pytz import UTC from bluebottle.clients.models import Client from bluebottle.clients.utils import LocalTenant from bluebottle.members.models import MemberPlatformSettings from bluebottle.test.factory_models.accounts import BlueBottleUserFactory from bluebottle.test.utils import BluebottleTestCase from bluebottle.test.utils import override_properties from bluebottle.time_based.tests.factories import ( DateActivityFactory, DateActivitySlotFactory, DateParticipantFactory, SlotParticipantFactory ) class TestMonkeyPatchPasswordValidators(BluebottleTestCase): password_validators = [ { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': { 'min_length': 10, } }, ] def test_validators_taken_from_settings_by_default(self): validators = get_default_password_validators() self.assertEqual(validators[0].min_length, 8) def test_validators_taken_from_properties(self): with override_properties(AUTH_PASSWORD_VALIDATORS=self.password_validators): validators = get_default_password_validators() self.assertEqual(validators[0].min_length, 10) def test_validators_different_tenant(self): with override_properties(AUTH_PASSWORD_VALIDATORS=self.password_validators): validators = get_default_password_validators() self.assertEqual(validators[0].min_length, 10) with LocalTenant(Client.objects.get(client_name='test2')): validators = get_default_password_validators() self.assertEqual(validators[0].min_length, 8) class MemberTestCase(BluebottleTestCase): def setUp(self): self.user = BlueBottleUserFactory.create() def test_no_hours_spent(self): self.assertEqual( self.user.hours_spent, 0 ) self.assertEqual( self.user.hours_planned, 0 ) def test_hours_spent(self): activity = DateActivityFactory.create( slot_selection='free' ) slot1 = DateActivitySlotFactory.create( activity=activity, start=now() - timedelta(days=1), duration=timedelta(hours=3) ) slot2 = DateActivitySlotFactory.create( activity=activity, start=now() + timedelta(days=1), duration=timedelta(hours=2) ) participant = DateParticipantFactory.create( activity=activity, user=self.user ) SlotParticipantFactory.create( participant=participant, slot=slot1 ) SlotParticipantFactory.create( participant=participant, slot=slot2 ) slot1.states.finish(save=True) self.assertEqual( self.user.hours_planned, 2 ) self.assertEqual( self.user.hours_spent, 3 ) def asserTimeSpent(self, when, expected): from bluebottle.members import models with mock.patch.object(models, 'now', return_value=when): self.assertEqual( self.user.hours_spent, expected ) def test_hours_spent_fiscal_year(self): jan20 = datetime.datetime(2020, 1, 3, tzinfo=UTC) feb20 = datetime.datetime(2020, 2, 3, tzinfo=UTC) aug20 = datetime.datetime(2020, 8, 3, tzinfo=UTC) nov20 = datetime.datetime(2020, 11, 3, tzinfo=UTC) nov19 = datetime.datetime(2019, 11, 3, tzinfo=UTC) activity = DateActivityFactory.create( slot_selection='free' ) slot1 = DateActivitySlotFactory.create( activity=activity, start=jan20, duration=timedelta(hours=1) ) slot2 = DateActivitySlotFactory.create( activity=activity, start=feb20, duration=timedelta(hours=2) ) slot3 = DateActivitySlotFactory.create( activity=activity, start=aug20, duration=timedelta(hours=4) ) slot4 = DateActivitySlotFactory.create( activity=activity, start=nov20, duration=timedelta(hours=8) ) slot5 = DateActivitySlotFactory.create( activity=activity, start=nov19, duration=timedelta(hours=20) ) participant = DateParticipantFactory.create( activity=activity, user=self.user ) SlotParticipantFactory.create( participant=participant, slot=slot1 ) SlotParticipantFactory.create( participant=participant, slot=slot2 ) SlotParticipantFactory.create( participant=participant, slot=slot3 ) SlotParticipantFactory.create( participant=participant, slot=slot4 ) SlotParticipantFactory.create( participant=participant, slot=slot5 ) slot1.states.finish(save=True) slot2.states.finish(save=True) slot3.states.finish(save=True) slot4.states.finish(save=True) slot5.states.finish(save=True) platform_settings = MemberPlatformSettings.load() platform_settings.fiscal_month_offset = 0 platform_settings.save() sep20 = datetime.datetime(2020, 9, 15, tzinfo=UTC) self.asserTimeSpent(sep20, 15) sep19 = datetime.datetime(2019, 9, 15, tzinfo=UTC) self.asserTimeSpent(sep19, 20) platform_settings.fiscal_month_offset = -4 platform_settings.save() sep20 = datetime.datetime(2020, 9, 15, tzinfo=UTC) self.asserTimeSpent(sep20, 8) sep19 = datetime.datetime(2019, 9, 15, tzinfo=UTC) self.asserTimeSpent(sep19, 27) platform_settings.fiscal_month_offset = 2 platform_settings.save() sep20 = datetime.datetime(2020, 9, 15, tzinfo=UTC) self.asserTimeSpent(sep20, 12) sep19 = datetime.datetime(2019, 9, 15, tzinfo=UTC) self.asserTimeSpent(sep19, 23)
bsd-3-clause
3cd9189d9c091512db33a12010661947
29.309859
85
0.615242
4.227898
false
true
false
false
onepercentclub/bluebottle
bluebottle/funding/migrations/0003_auto_20190604_1459.py
1
1369
# -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2019-06-04 12:59 from __future__ import unicode_literals import bluebottle.utils.fields from decimal import Decimal from django.db import migrations import djmoney.models.fields class Migration(migrations.Migration): dependencies = [ ('funding', '0002_auto_20190604_1458'), ] operations = [ migrations.AlterField( model_name='donation', name='amount', field=bluebottle.utils.fields.MoneyField(currency_choices="[('EUR', u'Euro')]", decimal_places=2, default=Decimal('0.0'), max_digits=12), ), migrations.AlterField( model_name='donation', name='amount_currency', field=djmoney.models.fields.CurrencyField(choices=[(b'EUR', 'Euro')], default='EUR', editable=False, max_length=50), ), migrations.AlterField( model_name='funding', name='target', field=bluebottle.utils.fields.MoneyField(currency_choices="[('EUR', u'Euro')]", decimal_places=2, default=Decimal('0.0'), max_digits=12), ), migrations.AlterField( model_name='funding', name='target_currency', field=djmoney.models.fields.CurrencyField(choices=[(b'EUR', 'Euro')], default='EUR', editable=False, max_length=50), ), ]
bsd-3-clause
65c56168c9f546a130b7baa506357831
35.026316
149
0.611395
3.792244
false
false
false
false
dimagi/commcare-hq
corehq/util/workbook_json/excel.py
1
10173
import io from zipfile import BadZipfile from tempfile import NamedTemporaryFile import openpyxl from openpyxl.utils.exceptions import InvalidFileException from django.core.files.uploadedfile import UploadedFile from django.utils.translation import gettext as _ class InvalidExcelFileException(Exception): pass class JSONReaderError(Exception): pass class HeaderValueError(Exception): pass class StringTypeRequiredError(Exception): pass class WorkbookJSONError(Exception): pass class IteratorJSONReader(object): """ >>> def normalize(it): ... r = [] ... for row in IteratorJSONReader(it): ... r.append(sorted(row.items())) ... return r >>> normalize([]) [] >>> normalize([['A', 'B', 'C'], ['1', '2', '3']]) [[('A', '1'), ('B', '2'), ('C', '3')]] >>> normalize([['A', 'data: key', 'user 1', 'user 2', 'is-ok?'], ... ['1', '2', '3', '4', 'yes']]) [[('A', '1'), ('data', {'key': '2'}), ('is-ok', True), ('user', ['3', '4'])]] """ def __init__(self, rows): # you can only call __iter__ once self._rows = iter(rows) try: self.headers = list(next(self._rows)) except StopIteration: self.headers = [] self.fieldnames = self.get_fieldnames() def row_to_json(self, row): obj = {} for value, header in zip(row, self.headers): self.set_field_value(obj, header, value) return obj def __iter__(self): try: for row in self._rows: yield self.row_to_json(row) finally: del self._rows def get_fieldnames(self): obj = {} for field, value in zip(self.headers, [''] * len(self.headers)): if not isinstance(field, str): raise HeaderValueError('Field %s is not a string.' % field) self.set_field_value(obj, field, value) return list(obj) @classmethod def set_field_value(cls, obj, field, value): if isinstance(value, bytes): value = value.decode('utf-8') if isinstance(value, str): value = value.strip() # try dict try: field, subfield = field.split(':') except Exception: pass else: field = field.strip() if field not in obj: obj[field] = {} cls.set_field_value(obj[field], subfield, value) return # try list try: field, _ = field.split() except Exception: pass else: dud = {} cls.set_field_value(dud, field, value) (field, value), = list(dud.items()) if field not in obj: obj[field] = [] elif not isinstance(obj[field], list): obj[field] = [obj[field]] if value not in (None, ''): obj[field].append(value) return # else flat # try boolean try: field, nothing = field.split('?') assert(nothing.strip() == '') except Exception: pass else: try: value = { 'yes': True, 'true': True, 'no': False, 'false': False, '': False, None: False, }[value.lower() if hasattr(value, 'lower') else value] except KeyError: raise JSONReaderError( 'Values for field %s must be "yes" or "no", not "%s"' % ( field, value) ) # set for any flat type field = field.strip() if field in obj: raise JSONReaderError( 'You have a repeat field: %s' % field ) obj[field] = value def get_workbook(file_or_filename): try: return WorkbookJSONReader(file_or_filename) except (HeaderValueError, InvalidExcelFileException) as e: raise WorkbookJSONError(_( "Upload failed! " "Please make sure you are using a valid Excel 2007 or later (.xlsx) file. " "Error details: {}." ).format(e)) except JSONReaderError as e: raise WorkbookJSONError(_( "Upload failed due to a problem with Excel columns. Error details: {}." ).format(e)) except HeaderValueError as e: raise WorkbookJSONError(_( "Upload encountered a data type error: {}." ).format(e)) except AttributeError as e: raise WorkbookJSONError(_( "Error processing Excel file: {}." ).format(e)) def get_single_worksheet(file_or_filename, title=None): workbook = get_workbook(file_or_filename) try: worksheet = workbook.get_worksheet(title=title) except WorksheetNotFound: raise WorkbookJSONError(_( "Could not find sheet '{title}'." ).format(title=title) if title else _("Uploaded file does not contian any sheets.")) return worksheet class WorksheetNotFound(Exception): def __init__(self, title): self.title = title super(WorksheetNotFound, self).__init__() class WorksheetJSONReader(IteratorJSONReader): def __init__(self, worksheet, title=None): width = 0 self.title = title self.worksheet = worksheet try: header_row = next(self.worksheet.iter_rows()) except StopIteration: header_row = [] for cell in header_row: if cell.value is None: break else: width += 1 self.worksheet.calculate_dimension(force=True) def iterator(): def _convert_float(value): """ excel doesn't distinguish between 1 and 1.0 if it can be an integer assume it is """ if isinstance(value, float) and int(value) == value: return int(value) else: # Specifically check for None so that we can allow a value of 0 return value if value is not None else '' for row in self.worksheet.iter_rows(): cell_values = [ _convert_float(cell.value) for cell in row[:width] ] if not any(cell != '' for cell in cell_values): break yield cell_values super(WorksheetJSONReader, self).__init__(iterator()) class WorkbookJSONReader(object): def __init__(self, file_or_filename): check_types = (UploadedFile, io.RawIOBase, io.BufferedIOBase) if isinstance(file_or_filename, check_types): tmp = NamedTemporaryFile(mode='wb', suffix='.xlsx', delete=False) file_or_filename.seek(0) tmp.write(file_or_filename.read()) file_or_filename.seek(0) tmp.close() file_or_filename = tmp.name try: self.wb = openpyxl.load_workbook(file_or_filename, read_only=True, data_only=True) except (BadZipfile, InvalidFileException, KeyError) as e: raise InvalidExcelFileException(str(e)) self.worksheets_by_title = {} self.worksheets = [] for worksheet in self.wb.worksheets: try: ws = WorksheetJSONReader(worksheet, title=worksheet.title) except IndexError: raise JSONReaderError('This Excel file has unrecognised formatting. Please try downloading ' 'the lookup table first, and then add data to it.') self.worksheets_by_title[worksheet.title] = ws self.worksheets.append(ws) def get_worksheet(self, title=None, index=None): if title is not None and index is not None: raise TypeError("Can only get worksheet by title *or* index") if title: try: return self.worksheets_by_title[title] except KeyError: raise WorksheetNotFound(title=title) elif index: try: return self.worksheets[index] except IndexError: raise WorksheetNotFound(title=index) else: try: return self.worksheets[0] except IndexError: raise WorksheetNotFound(title=0) def flatten_json_to_path(obj, path=()): if isinstance(obj, dict): for key, value in obj.items(): for item in flatten_json_to_path(value, path + (key,)): yield item elif isinstance(obj, list): for key, value in enumerate(obj): for item in flatten_json_to_path(value, path + (key,)): yield item else: yield (path, obj) def format_header(path, value): # pretty sure making a string-builder would be slower than concatenation s = path[0] for p in path[1:]: if isinstance(p, str): s += f': {p}' elif isinstance(p, int): s += f' {p + 1}' if isinstance(value, bool): s += '?' value = 'yes' if value else 'no' return s, value def flatten_json(obj): for key, value in flatten_json_to_path(obj): yield format_header(key, value) def json_to_headers(obj): return [key for key, value in sorted(flatten_json(obj), key=lambda t: alphanumeric_sort_key(t[0]))] def alphanumeric_sort_key(key): """ Sort the given iterable in the way that humans expect. Thanks to http://stackoverflow.com/a/2669120/240553 """ import re convert = lambda text: int(text) if text.isdigit() else text return [convert(c) for c in re.split('([0-9]+)', key)] def enforce_string_type(value): if isinstance(value, str): return value if isinstance(value, int): return str(value) # Don't try to guess for decimal types how they should be converted to string raise StringTypeRequiredError()
bsd-3-clause
d30bdd75511f6fef8c5de4cace4b3438
29.734139
108
0.539172
4.229938
false
false
false
false
dimagi/commcare-hq
corehq/apps/notifications/forms.py
1
3004
from django import forms from django.contrib.postgres.forms import SimpleArrayField from django.urls import reverse from django.utils.translation import gettext as _ from django.utils.translation import gettext_lazy from crispy_forms import bootstrap as twbscrispy from crispy_forms import layout as crispy from crispy_forms.helper import FormHelper from corehq.apps.hqwebapp import crispy as hqcrispy from .models import NOTIFICATION_TYPES, Notification class NotificationCreationForm(forms.Form): content = forms.CharField( label=gettext_lazy('Content'), max_length=140, widget=forms.Textarea(attrs={"class": "vertical-resize"}), ) url = forms.URLField( label=gettext_lazy('URL') ) type = forms.ChoiceField( label=gettext_lazy("Type"), choices=NOTIFICATION_TYPES, ) domain_specific = forms.BooleanField( label=gettext_lazy("This notification is not for all domains"), required=False ) domains = SimpleArrayField( base_field=forms.CharField(), label=gettext_lazy("Domains"), widget=forms.Textarea(attrs={"class": "vertical-resize"}), help_text=gettext_lazy("Enter a comma separated list of domains for this notification. " "This is only required if you have checked the box above."), required=False ) def __init__(self, *args, **kwargs): from corehq.apps.notifications.views import ManageNotificationView super(NotificationCreationForm, self).__init__(*args, **kwargs) self.helper = FormHelper() self.helper.form_method = 'POST' self.helper.form_class = 'form-horizontal' self.helper.form_action = '#' self.helper.label_class = 'col-sm-3 col-md-2' self.helper.field_class = 'col-sm-9 col-md-8 col-lg-6' self.helper.layout = crispy.Layout( crispy.Field('content'), crispy.Field('url'), crispy.Field('type'), hqcrispy.B3MultiField( "Domain Specific", crispy.Field('domain_specific') ), crispy.Field('domains'), hqcrispy.FormActions( twbscrispy.StrictButton( _("Submit Information"), type="submit", css_class="btn btn-primary", name="submit", ), hqcrispy.LinkButton( _("Cancel"), reverse(ManageNotificationView.urlname), css_class="btn btn-default", name="cancel", ), ), ) def save(self): data = self.cleaned_data Notification(content=data.get('content'), url=data.get('url'), type=data.get('type'), domain_specific=data.get('domain_specific'), domains=data.get('domains')).save()
bsd-3-clause
14fd3f9adc06c215dc685a9a88e420af
34.341176
96
0.581225
4.303725
false
false
false
false
dimagi/commcare-hq
corehq/apps/case_search/filter_dsl.py
1
9437
import re from dataclasses import dataclass from django.utils.translation import gettext as _ from eulxml.xpath import parse as parse_xpath from eulxml.xpath.ast import ( BinaryExpression, FunctionCall, Step, UnaryExpression, serialize, ) from corehq.apps.case_search.dsl_utils import unwrap_value from corehq.apps.case_search.exceptions import ( CaseFilterError, TooManyRelatedCasesError, XPathFunctionException, ) from corehq.apps.case_search.xpath_functions import ( XPATH_QUERY_FUNCTIONS, ) from corehq.apps.es import filters from corehq.apps.es.case_search import ( CaseSearchES, case_property_query, case_property_range_query, reverse_index_case_query, ) @dataclass class SearchFilterContext: domain: str fuzzy: bool = False def print_ast(node): """Prints the AST provided by eulxml.xpath.parse Useful for debugging particular expressions """ def visit(node, indent): print("\t" * indent, node) # noqa if hasattr(node, 'left'): indent += 1 visit(node.left, indent) if hasattr(node, 'op'): print("\t" * indent, "##### {} #####".format(node.op)) # noqa if hasattr(node, 'right'): visit(node.right, indent) indent -= 1 visit(node, 0) MAX_RELATED_CASES = 500000 # Limit each related case lookup to return 500,000 cases to prevent timeouts OPERATOR_MAPPING = { 'and': filters.AND, 'or': filters.OR, } RANGE_OP_MAPPING = { '>': 'gt', '>=': 'gte', '<': 'lt', '<=': 'lte', } EQ = "=" NEQ = "!=" COMPARISON_OPERATORS = [EQ, NEQ] + list(RANGE_OP_MAPPING.keys()) ALL_OPERATORS = COMPARISON_OPERATORS + list(OPERATOR_MAPPING.keys()) def build_filter_from_ast(node, context): """Builds an ES filter from an AST provided by eulxml.xpath.parse If fuzzy is true, all equality operations will be treated as fuzzy. """ def _walk_ancestor_cases(node): """Return a query that will fulfill the filter on the related case. :param node: a node returned from eulxml.xpath.parse of the form `parent/grandparent/property = 'value'` Since ES has no way of performing joins, we filter down in stages: 1. Find the ids of all cases where the condition is met 2. Walk down the case hierarchy, finding all related cases with the right identifier to the ids found in (1). 3. Return the lowest of these ids as an related case query filter """ # fetch the ids of the highest level cases that match the case_property # i.e. all the cases which have `property = 'value'` ids = _parent_property_lookup(node) # get the related case path we need to walk, i.e. `parent/grandparent/property` n = node.left while _is_ancestor_case_lookup(n): # This walks down the tree and finds case ids that match each identifier # This is basically performing multiple "joins" to find related cases since ES # doesn't have a way to relate models together # Get the path to the related case, e.g. `parent/grandparent` # On subsequent run throughs, it walks down the tree (e.g. n = [parent, /, grandparent]) n = n.left identifier = serialize(n.right) # the identifier at this step, e.g. `grandparent` # get the ids of the cases that point at the previous level's cases # this has the potential of being a very large list ids = _child_case_lookup(ids, identifier=identifier) if not ids: break # after walking the full tree, get the final level we are interested in, i.e. `parent` final_identifier = serialize(n.left) return reverse_index_case_query(ids, final_identifier) def _parent_property_lookup(node): """given a node of the form `parent/foo = 'thing'`, return all case_ids where `foo = thing` """ es_filter = _comparison_raw(node.left.right, node.op, node.right, node) es_query = CaseSearchES().domain(context.domain).filter(es_filter) if es_query.count() > MAX_RELATED_CASES: new_query = '{} {} "{}"'.format(serialize(node.left.right), node.op, node.right) raise TooManyRelatedCasesError( _("The related case lookup you are trying to perform would return too many cases"), new_query ) return es_query.scroll_ids() def _child_case_lookup(case_ids, identifier): """returns a list of all case_ids who have parents `case_id` with the relationship `identifier` """ return CaseSearchES().domain(context.domain).get_child_cases(case_ids, identifier).scroll_ids() def _is_ancestor_case_lookup(node): """Returns whether a particular AST node is an ancestory case lookup e.g. `parent/host/thing = 'foo'` """ return hasattr(node, 'left') and hasattr(node.left, 'op') and node.left.op == '/' def _is_subcase_count(node): """Returns whether a particular AST node is a subcase lookup. This is needed for subcase-count since we need the full expression, not just the function.""" if not isinstance(node, BinaryExpression): return False return isinstance(node.left, FunctionCall) and node.left.name == 'subcase-count' def _comparison(node): """Returns the filter for a comparison operation (=, !=, >, <, >=, <=) """ return _comparison_raw(node.left, node.op, node.right, node) def _comparison_raw(case_property_name_raw, op, value_raw, node): if not isinstance(case_property_name_raw, Step): raise CaseFilterError( _("We didn't understand what you were trying to do with {}").format(serialize(node)), serialize(node) ) case_property_name = serialize(case_property_name_raw) value = unwrap_value(value_raw, context) if op in [EQ, NEQ]: query = case_property_query(case_property_name, value, fuzzy=context.fuzzy) if op == NEQ: query = filters.NOT(query) return query else: try: return case_property_range_query(case_property_name, **{RANGE_OP_MAPPING[op]: value}) except (TypeError, ValueError): raise CaseFilterError( _("The right hand side of a comparison must be a number or date. " "Dates must be surrounded in quotation marks"), serialize(node), ) def visit(node): if isinstance(node, FunctionCall): if node.name in XPATH_QUERY_FUNCTIONS: return XPATH_QUERY_FUNCTIONS[node.name](node, context) else: raise XPathFunctionException( _("'{name}' is not a valid standalone function").format(name=node.name), serialize(node) ) if not hasattr(node, 'op'): raise CaseFilterError( _("Your search query is required to have at least one boolean operator ({boolean_ops})").format( boolean_ops=", ".join(COMPARISON_OPERATORS), ), serialize(node) ) if _is_ancestor_case_lookup(node): # this node represents a filter on a property for a related case return _walk_ancestor_cases(node) if _is_subcase_count(node): return XPATH_QUERY_FUNCTIONS['subcase-count'](node, context) if node.op in COMPARISON_OPERATORS: # This node is a leaf return _comparison(node) if node.op in list(OPERATOR_MAPPING.keys()): # This is another branch in the tree return OPERATOR_MAPPING[node.op](visit(node.left), visit(node.right)) raise CaseFilterError( _("We don't know what to do with operator '{}'. Please try reformatting your query.".format(node.op)), serialize(node) ) return visit(node) def build_filter_from_xpath(domain, xpath, fuzzy=False): """Given an xpath expression this function will generate an Elasticsearch filter""" error_message = _( "We didn't understand what you were trying to do with {}. " "Please try reformatting your query. " "The operators we accept are: {}" ) context = SearchFilterContext(domain, fuzzy) try: return build_filter_from_ast(parse_xpath(xpath), context) except TypeError as e: text_error = re.search(r"Unknown text '(.+)'", str(e)) if text_error: # This often happens if there is a bad operator (e.g. a ~ b) bad_part = text_error.groups()[0] raise CaseFilterError(error_message.format(bad_part, ", ".join(ALL_OPERATORS)), bad_part) raise CaseFilterError(_("Malformed search query"), None) except RuntimeError as e: # eulxml passes us string errors from YACC lex_token_error = re.search(r"LexToken\((\w+),\w?'(.+)'", str(e)) if lex_token_error: bad_part = lex_token_error.groups()[1] raise CaseFilterError(error_message.format(bad_part, ", ".join(ALL_OPERATORS)), bad_part) raise CaseFilterError(_("Malformed search query"), None)
bsd-3-clause
caac651b5d33d7e1d27cae2e9e059e6f
35.719844
114
0.611317
4.005518
false
false
false
false
dimagi/commcare-hq
corehq/apps/export/tests/test_export_item.py
1
2922
from collections import namedtuple from django.test import SimpleTestCase from unittest import mock from corehq.apps.export.models import ( ExportColumn, ExportItem, MultipleChoiceItem, Option, ) from corehq.apps.export.models.new import MAIN_TABLE, PathNode MockRequest = namedtuple('MockRequest', 'domain') @mock.patch( 'corehq.apps.export.models.new.get_request_domain', return_value=MockRequest(domain='my-domain'), ) class TestExportItemGeneration(SimpleTestCase): app_id = '1234' def setUp(self): self.item = ExportItem( path=[PathNode(name='data'), PathNode(name='question1')], label='Question One', last_occurrences={self.app_id: 3}, ) def test_create_default_from_export_item(self, _): column = ExportColumn.create_default_from_export_item(MAIN_TABLE, self.item, {self.app_id: 3}) self.assertEqual(column.is_advanced, False) self.assertEqual(column.is_deleted, False) self.assertEqual(column.label, 'data.question1') self.assertEqual(column.selected, True) def test_create_default_from_export_item_deleted(self, _): column = ExportColumn.create_default_from_export_item(MAIN_TABLE, self.item, {self.app_id: 4}) self.assertEqual(column.is_advanced, False) self.assertEqual(column.is_deleted, True) self.assertEqual(column.label, 'data.question1') self.assertEqual(column.selected, False) def test_create_default_from_export_item_not_main_table(self, _): column = ExportColumn.create_default_from_export_item(['other_table'], self.item, {self.app_id: 3}) self.assertEqual(column.is_advanced, False) self.assertEqual(column.is_deleted, False) self.assertEqual(column.label, 'data.question1') self.assertEqual(column.selected, False) def test_wrap_export_item(self, _): path = [PathNode(name="foo"), PathNode(name="bar")] item = ExportItem(path=path) wrapped = ExportItem.wrap(item.to_json()) self.assertEqual(type(wrapped), type(item)) self.assertEqual(wrapped.to_json(), item.to_json()) def test_wrap_export_item_child(self, _): path = [PathNode(name="foo"), PathNode(name="bar")] item = MultipleChoiceItem(path=path, options=[Option(value="foo")]) wrapped = ExportItem.wrap(item.to_json()) self.assertEqual(type(wrapped), type(item)) self.assertEqual(wrapped.to_json(), item.to_json()) class CreateFromQuestionTests(SimpleTestCase): def test_removes_html_from_label(self): question = { 'label': '<span style="color:#ffffff">Enter a number</span>', 'value': '/data/enter_number', 'type': 'Int', } item = ExportItem.create_from_question(question, 'app_id', 'app_ersion', []) self.assertEqual(item.label, 'Enter a number')
bsd-3-clause
9f91845ce9e8fe5cc092d0abd0c0ddd4
35.525
107
0.659138
3.666248
false
true
false
false
dimagi/commcare-hq
corehq/apps/userreports/management/commands/delete_orphaned_ucrs.py
1
8076
from collections import defaultdict from textwrap import dedent from django.core.management.base import BaseCommand from sqlalchemy.exc import ProgrammingError from corehq.apps.domain.models import Domain from corehq.apps.userreports.models import ( DataSourceConfiguration, StaticDataSourceConfiguration, ) from corehq.apps.userreports.util import ( LEGACY_UCR_TABLE_PREFIX, UCR_TABLE_PREFIX, get_domain_for_ucr_table_name, get_table_name, ) from corehq.sql_db.connections import connection_manager class Command(BaseCommand): """ An orphaned UCR table is one where the related datasource no longer exists This command is designed to delete orphaned tables """ help = "Delete orphaned UCR tables" def add_arguments(self, parser): parser.add_argument( '--engine_id', action='store', help='Only check this DB engine', ) parser.add_argument( '--force-delete', action='store_true', default=False, help='Drop orphaned tables on active domains' ) parser.add_argument( '--domain', action='store', help='Drop orphaned tables for a specific domain' ) def handle(self, **options): orphaned_tables_by_engine_id = get_orphaned_tables_by_engine_id(options.get('engine_id')) ucrs_to_delete = get_deletable_ucrs(orphaned_tables_by_engine_id, force_delete=options['force_delete'], domain=options['domain']) tablenames_to_drop = confirm_deletion_with_user(ucrs_to_delete) if not tablenames_to_drop: exit(0) drop_tables(tablenames_to_drop) def confirm_deletion_with_user(ucrs_to_delete): if not ucrs_to_delete: print("There aren't any UCRs to delete.") return None tablenames_to_drop = defaultdict(list) for engine_id, ucr_infos in ucrs_to_delete.items(): print(f"The following UCRs will be deleted in the {engine_id} database:") for ucr_info in ucr_infos: print(f"\t{ucr_info['tablename']} with {ucr_info['row_count']} rows.") tablenames_to_drop[engine_id].append(ucr_info['tablename']) if not tablenames_to_drop: print("No orphaned tables were found") return None if get_input("Are you sure you want to run the delete operation? (y/n)") == 'y': return tablenames_to_drop return None def get_orphaned_tables_by_engine_id(engine_id=None): """ :param engine_id: optional parameter to only search within a specific database :return: {<engine_id>: [<tablename>, ...]} """ data_sources = get_all_data_sources() tables_by_engine_id = get_tables_for_data_sources(data_sources, engine_id) return get_tables_without_data_sources(tables_by_engine_id) def get_all_data_sources(): data_sources = list(DataSourceConfiguration.all()) data_sources.extend(list(StaticDataSourceConfiguration.all())) return data_sources def get_deletable_ucrs(orphaned_tables_by_id, force_delete=False, domain=None): """ Ensures tables are UCRs via inserted_at column :param orphaned_tables_by_id: {<engine_id>: [<tablename>, ...]} :param force_delete: if True, orphaned tables associated with active domains are marked as deletable :param domain: optional, but if specified will only gather ucrs from the specified domain :return: """ ucrs_to_delete = defaultdict(list) active_domains_with_orphaned_ucrs = set() for engine_id, tablenames in orphaned_tables_by_id.items(): engine = connection_manager.get_engine(engine_id) if not tablenames: continue for tablename in tablenames: domain_for_table = get_domain_for_ucr_table_name(tablename) if domain and domain != domain_for_table: continue if not force_delete and not is_domain_deleted(domain_for_table): active_domains_with_orphaned_ucrs.add(domain_for_table) continue with engine.begin() as connection: try: result = connection.execute(f'SELECT COUNT(*), MAX(inserted_at) FROM "{tablename}"') except ProgrammingError: print(f"\t{tablename}: no inserted_at column, probably not UCR") except Exception as e: print(f"\tAn error was encountered when attempting to read from {tablename}: {e}") else: row_count, idle_since = result.fetchone() ucrs_to_delete[engine_id].append({'tablename': tablename, 'row_count': row_count}) if not force_delete and active_domains_with_orphaned_ucrs: formatted_domains = '\n'.join(sorted(active_domains_with_orphaned_ucrs)) print(dedent(""" {} active domain(s) have orphaned ucrs. Use the '--domain' option to further inspect a specific domain. Use the '--force-delete' option if you are sure you want to delete all orphaned ucrs. The domains are: {} """).format(len(active_domains_with_orphaned_ucrs), formatted_domains)) return ucrs_to_delete def drop_tables(ucrs_to_delete): """ :param ucrs_to_delete: {'<engine_id>': [<tablename>, ...]} """ for engine_id, tablenames in ucrs_to_delete.items(): engine = connection_manager.get_engine(engine_id) if not tablenames: continue for tablename in tablenames: with engine.begin() as connection: connection.execute(f'DROP TABLE "{tablename}"') print(f'\tDeleted {tablename}') def is_domain_deleted(domain): """ Ensure that the domain exists in the deleted_domain view, AND not in the active domain view :param domain: :return: True if deleted, False if not """ deleted_domains = Domain.get_deleted_domain_names() active_domains = set(Domain.get_all_names()) return domain in deleted_domains and domain not in active_domains def get_tables_for_data_sources(data_sources, engine_id): """ :param data_sources: :param engine_id: optional parameter to limit results to one db engine :return: a dictionary in the form of {<engine_id>: [<tables>], ...} """ tables_by_engine_id = defaultdict(set) for data_source in data_sources: if engine_id and data_source.engine_id != engine_id: continue table_name = get_table_name(data_source.domain, data_source.table_id) tables_by_engine_id[data_source.engine_id].add(table_name) return tables_by_engine_id def get_tables_without_data_sources(tables_by_engine_id): """ :param tables_by_engine_id: :return: a dictionary in the form of {<engine_id>: [<tables], ...} """ tables_without_data_sources = defaultdict(list) for engine_id, expected_tables in tables_by_engine_id.items(): try: engine = connection_manager.get_engine(engine_id) except KeyError: print(f"Engine id {engine_id} does not exist anymore. Skipping.") continue with engine.begin() as connection: # Using string formatting rather than execute with %s syntax # is acceptable here because the strings we're inserting are static # and only templated for DRYness results = connection.execute(f""" SELECT table_name FROM information_schema.tables WHERE table_schema='public' AND table_type='BASE TABLE' AND ( table_name LIKE '{UCR_TABLE_PREFIX}%%' OR table_name LIKE '{LEGACY_UCR_TABLE_PREFIX}%%' ); """).fetchall() tables_in_db = {r[0] for r in results} tables_without_data_sources[engine_id] = tables_in_db - expected_tables return tables_without_data_sources def get_input(message): return input(message)
bsd-3-clause
b9c7349e4455c33db778ffda42f34921
35.542986
111
0.629148
3.93184
false
false
false
false
dimagi/commcare-hq
corehq/apps/callcenter/queries.py
1
8169
from abc import ABCMeta, abstractmethod, abstractproperty from memoized import memoized from sqlalchemy import distinct, func from sqlalchemy.sql import and_, label, operators, or_, select from corehq.apps.callcenter.const import * class BaseQuery(metaclass=ABCMeta): @abstractproperty def sql_adapter(self): raise NotImplementedError @property @memoized def sql_table(self): return self.sql_adapter.get_table() def _run_query(self, query): with self.sql_adapter.session_helper.session_context() as session: data = list(session.execute(query)) self.sql_adapter.track_load(len(data)) return data class CaseQuery(BaseQuery): def __init__(self, domain, cc_case_type, owners_needing_data): self.owners_needing_data = owners_needing_data self.cc_case_type = cc_case_type self.domain = domain @abstractmethod def get_results(self, include_type_in_result, limit_types, start_date, end_date): raise NotImplementedError @property def sql_adapter(self): from corehq.apps.callcenter.data_source import get_sql_adapters_for_domain return get_sql_adapters_for_domain(self.domain).cases @property def owner_column(self): return self.sql_table.c.owner_id @property def type_column(self): return self.sql_table.c.type def columns(self, include_type_in_result, distinct_docs=False): doc_id = self.sql_table.c.doc_id if distinct_docs: doc_id = distinct(doc_id) columns = [ label('owner_id', self.owner_column), label('count', func.count(doc_id)), ] if include_type_in_result: columns.append( label('type', self.type_column) ) return columns def group_by(self, include_type_in_result): group_by = [self.owner_column] if include_type_in_result: group_by.append(self.type_column) return group_by def type_filter(self, limit_types): if limit_types: return operators.in_op(self.type_column, limit_types) else: return self.type_column != self.cc_case_type def _build_query(self, include_type_in_result, limit_types, where_clauses, distinct_docs=False): standard_where = [ self.type_filter(limit_types), operators.in_op(self.owner_column, self.owners_needing_data), ] all_where = where_clauses + standard_where query = select( self.columns(include_type_in_result, distinct_docs=distinct_docs) ).where(and_( *all_where )).group_by( *self.group_by(include_type_in_result) ) return query class CaseQueryOpenedClosed(CaseQuery): """ Count of cases where lower <= opened_on < upper cases_opened_{period} cases_opened_{case_type}_{period} Count of cases where lower <= closed_on < upper cases_closed_{period} cases_closed_{case_type}_{period} """ def __init__(self, domain, cc_case_type, owners_needing_data, opened=True): self.opened = opened super(CaseQueryOpenedClosed, self).__init__(domain, cc_case_type, owners_needing_data) self.opened_or_closed = 'opened' if opened else 'closed' @property def owner_column(self): return self.sql_table.c['{}_by'.format(self.opened_or_closed)] @property def date_column(self): return self.sql_table.c['{}_on'.format(self.opened_or_closed)] def get_results(self, include_type_in_result, limit_types, start_date, end_date): query = self._build_query(include_type_in_result, limit_types, [ self.date_column >= start_date, self.date_column < end_date, ]) return self._run_query(query) class CaseQueryTotal(CaseQuery): """ Count of cases where opened_on < upper and (closed == False or closed_on >= lower) cases_total_{period} cases_total_{case_type}_{period} """ def get_results(self, include_type_in_result, limit_types, start_date, end_date): query = self._build_query(include_type_in_result, limit_types, [ self.sql_table.c.opened_on < end_date, or_( self.sql_table.c.closed == 0, self.sql_table.c.closed_on >= start_date ) ]) return self._run_query(query) class CaseQueryActive(CaseQuery): """ Count of cases where lower <= case_action.date < upper cases_active_{period} cases_active_{case_type}_{period} """ @property def sql_adapter(self): from corehq.apps.callcenter.data_source import get_sql_adapters_for_domain return get_sql_adapters_for_domain(self.domain).case_actions def get_results(self, include_type_in_result, limit_types, start_date, end_date): query = self._build_query(include_type_in_result, limit_types, [ self.sql_table.c.date >= start_date, self.sql_table.c.date < end_date, ], distinct_docs=True) return self._run_query(query) class CaseQueryTotalLegacy(BaseQuery): """ Count of cases per user that are currently open (legacy indicator). """ def __init__(self, domain, cc_case_type, users_needing_data): self.users_needing_data = users_needing_data self.cc_case_type = cc_case_type self.domain = domain @property def sql_adapter(self): from corehq.apps.callcenter.data_source import get_sql_adapters_for_domain return get_sql_adapters_for_domain(self.domain).cases def get_results(self): query = select([ label('user_id', self.sql_table.c.owner_id), label('count', func.count(self.sql_table.c.doc_id)) ]).where(and_( self.sql_table.c.type != self.cc_case_type, self.sql_table.c.closed == 0, operators.in_op(self.sql_table.c.owner_id, self.users_needing_data), )).group_by( self.sql_table.c.owner_id ) return self._run_query(query) class FormQuery(BaseQuery): def __init__(self, domain, users_needing_data): self.domain = domain self.users_needing_data = users_needing_data @property def sql_adapter(self): from corehq.apps.callcenter.data_source import get_sql_adapters_for_domain return get_sql_adapters_for_domain(self.domain).forms class StandardFormQuery(FormQuery): """ Count of forms submitted by each user during the period (upper to lower) """ def get_results(self, start_date, end_date): query = select([ label('user_id', self.sql_table.c.user_id), label('count', func.count(self.sql_table.c.doc_id)) ]).where(and_( operators.ge(self.sql_table.c.time_end, start_date), operators.lt(self.sql_table.c.time_end, end_date), operators.in_op(self.sql_table.c.user_id, self.users_needing_data) )).group_by( self.sql_table.c.user_id ) return self._run_query(query) class CustomFormQuery(FormQuery): """ For specific forms add the number of forms completed during the time period (lower to upper) In some cases also add the average duration of the forms. """ def get_results(self, xmlns, indicator_type, start_date, end_date): if indicator_type == TYPE_DURATION: aggregation = func.avg(self.sql_table.c.duration) else: aggregation = func.count(self.sql_table.c.doc_id) query = select([ label('user_id', self.sql_table.c.user_id), label('count', aggregation) ]).where(and_( operators.ge(self.sql_table.c.time_end, start_date), operators.lt(self.sql_table.c.time_end, end_date), operators.in_op(self.sql_table.c.user_id, self.users_needing_data), self.sql_table.c.xmlns == xmlns, )).group_by( self.sql_table.c.user_id ) return self._run_query(query)
bsd-3-clause
b0a48c1829317209f001d95332279050
30.910156
100
0.615375
3.603441
false
false
false
false
dimagi/commcare-hq
corehq/apps/app_manager/const.py
1
2707
APP_V1 = '1.0' APP_V2 = '2.0' MAJOR_RELEASE_TO_VERSION = { "1": APP_V1, "2": APP_V2, } SCHEDULE_PHASE = 'current_schedule_phase' SCHEDULE_LAST_VISIT = 'last_visit_number_{}' SCHEDULE_LAST_VISIT_DATE = 'last_visit_date_{}' SCHEDULE_GLOBAL_NEXT_VISIT_DATE = 'next_visit_date' SCHEDULE_NEXT_DUE = 'next_due' SCHEDULE_TERMINATED = '-1' SCHEDULE_CURRENT_VISIT_NUMBER = 'current_visit_number' SCHEDULE_UNSCHEDULED_VISIT = 'unscheduled_visit' SCHEDULE_MAX_DATE = (2 ** 31) - 1 SCHEDULE_DATE_CASE_OPENED = 'date_opened' ATTACHMENT_PREFIX = 'attachment:' CASE_ID = 'case_id' USERCASE_TYPE = 'commcare-user' USERCASE_ID = 'usercase_id' USERCASE_PREFIX = 'user/' AUTO_SELECT_USER = 'user' AUTO_SELECT_FIXTURE = 'fixture' AUTO_SELECT_CASE = 'case' AUTO_SELECT_LOCATION = 'location' AUTO_SELECT_RAW = 'raw' AUTO_SELECT_USERCASE = 'usercase' RETURN_TO = 'return_to' AMPLIFIES_YES = 'yes' AMPLIFIES_NO = 'no' AMPLIFIES_NOT_SET = 'not_set' DEFAULT_MONTH_FILTER_PERIOD_LENGTH = 0 STOCK_QUESTION_TAG_NAMES = [ 'balance', 'transfer', ] MOBILE_UCR_VERSION_1 = '1.0' MOBILE_UCR_MIGRATING_TO_2 = '1.5' MOBILE_UCR_VERSION_2 = '2.0' MOBILE_UCR_VERSIONS = [MOBILE_UCR_VERSION_1, MOBILE_UCR_MIGRATING_TO_2, MOBILE_UCR_VERSION_2] DEFAULT_LOCATION_FIXTURE_OPTION = 'project_default' LOCATION_FIXTURE_OPTIONS = [ DEFAULT_LOCATION_FIXTURE_OPTION, 'both_fixtures', 'only_flat_fixture', 'only_hierarchical_fixture' ] SYNC_HIERARCHICAL_FIXTURE = ('both_fixtures', 'only_hierarchical_fixture') SYNC_FLAT_FIXTURES = ('both_fixtures', 'only_flat_fixture') TARGET_COMMCARE = 'commcare' TARGET_COMMCARE_LTS = 'commcare_lts' WORKFLOW_DEFAULT = 'default' # go to the app main screen WORKFLOW_ROOT = 'root' # go to the module select screen WORKFLOW_PARENT_MODULE = 'parent_module' # go to the parent module's screen WORKFLOW_MODULE = 'module' # go to the current module's screen WORKFLOW_PREVIOUS = 'previous_screen' # go to the previous screen (prior to entering the form) WORKFLOW_FORM = 'form' # go straight to another form or menu ALL_WORKFLOWS = [ WORKFLOW_DEFAULT, WORKFLOW_ROOT, WORKFLOW_PARENT_MODULE, WORKFLOW_MODULE, WORKFLOW_PREVIOUS, WORKFLOW_FORM, ] # allow all options as fallback except the one for form linking WORKFLOW_FALLBACK_OPTIONS = list(ALL_WORKFLOWS).remove(WORKFLOW_FORM) WORKFLOW_CASE_LIST = 'case_list' # Return back to the case list after registering a case REGISTRATION_FORM_WORFLOWS = [ WORKFLOW_DEFAULT, WORKFLOW_CASE_LIST, ] REGISTRY_WORKFLOW_LOAD_CASE = 'load_case' REGISTRY_WORKFLOW_SMART_LINK = 'smart_link' UPDATE_MODE_ALWAYS, UPDATE_MODE_EDIT = 'always', 'edit' MULTI_SELECT_MAX_SELECT_VALUE = 100 DEFAULT_PAGE_LIMIT = 10
bsd-3-clause
679267b7d7a08e02c6067c141217f5cd
27.494737
95
0.715922
2.688183
false
false
false
false
dimagi/commcare-hq
corehq/apps/reports/management/commands/project_stats_report.py
1
24286
from collections import defaultdict from datetime import date, datetime from django.core.management import BaseCommand from django.db import ProgrammingError, connections, models, router from django.db.models import Count, F, Func from django.db.models.aggregates import Avg, StdDev from dateutil.relativedelta import relativedelta from corehq.apps.data_analytics.models import MALTRow from corehq.apps.es import CaseES, FormES, UserES from corehq.apps.es.aggregations import ( DateHistogram, NestedAggregation, TermsAggregation, ) from corehq.apps.reports.standard.project_health import ( get_performance_threshold, ) from corehq.apps.userreports.models import ( DataSourceConfiguration, StaticDataSourceConfiguration, ) from corehq.apps.userreports.util import get_table_name from corehq.blobs.models import BlobMeta from corehq.form_processor.models import ( LedgerTransaction, LedgerValue, ) from corehq.form_processor.models.cases import CommCareCase from corehq.form_processor.utils.sql import fetchall_as_namedtuple from corehq.sql_db.connections import connection_manager from corehq.sql_db.util import ( estimate_row_count, get_db_aliases_for_partitioned_query, split_list_by_db_partition, ) from corehq.util.markup import ( CSVRowFormatter, SimpleTableWriter, TableRowFormatter, ) from casexml.apps.phone.models import SyncLogSQL RESOURCE_MODEL_STATS = [ 'total_users', 'monthly_forms_per_user', 'monthly_cases_per_user', 'monthly_user_form_stats_expanded', 'monthly_user_case_stats_expanded', 'monthly_user_cases_updated', 'case_index_ratio', 'attachments', 'forms_total', 'cases_total', 'case_transactions_per_form', 'case_transactions_total', 'synclogs_per_user', 'static_datasources', 'dynamic_datasources', 'datasources_info', 'devicelogs_per_user', 'ledger_updates_per_case', 'average_ledgers_per_case', 'case_type_stats', ] class ResourceModel(object): def __init__(self, dictionary): self._dictionary = dictionary def __setitem__(self, key, item): if key not in self._dictionary: raise KeyError("The key {} is not defined.".format(key)) self._dictionary[key] = item def __getitem__(self, key): return self._dictionary[key] resource_model_stats_dict = {key: '' for key in RESOURCE_MODEL_STATS} resource_model = ResourceModel(resource_model_stats_dict) db_aliases = get_db_aliases_for_partitioned_query() DB_ALIAS = db_aliases[0] PARTITIONS_COUNT = len(db_aliases) class Month(Func): function = 'EXTRACT' template = '%(function)s(MONTH from %(expressions)s)' output_field = models.IntegerField() class Year(Func): function = 'EXTRACT' template = '%(function)s(YEAR from %(expressions)s)' output_field = models.IntegerField() class Command(BaseCommand): help = """Print out project stats for use by the cluster utilization model\n https://drive.google.com/drive/folders/0Bz-nswrLHmApbExCOVJ6TkgzeDQ """ def add_arguments(self, parser): parser.add_argument('domain') parser.add_argument('-m', '--months', default=3, type=int, help="Months to average data over") parser.add_argument( '--include-current', action='store_true', default=False, help="Include the current month" ) parser.add_argument('--csv', action='store_true', default=False, help="Output as CSV") def handle(self, domain, months, csv, **options): self.domain = domain self.csv = csv self.date_start = (datetime.utcnow() - relativedelta(months=months)).date().replace(day=1) self.date_end = None if options['include_current'] else datetime.utcnow().date().replace(day=1) self.active_not_deleted_users = ( UserES(for_export=True) .domain(domain).values_list("_id", flat=True) ) self.stdout.write('Collecting data (this might take a while)') self.collect_doc_counts() self.collect_forms_per_user_per_month() self.collect_cases_created_per_user_per_month() self.collect_cases_updated_per_user_per_month() self.collect_case_transactions() self.collect_case_indices() self.collect_synclogs() self.collect_devicelogs() self.collect_ledgers_per_case() self.collect_attachment_sizes() self.collect_ucr_data() self.output_stats() def collect_doc_counts(self): form_es = FormES() form_es.remove_default_filters() resource_model['forms_total'] = form_es.domain(self.domain).count() resource_model['cases_total'] = CaseES().domain(self.domain).count() def collect_forms_per_user_per_month(self): performance_threshold = get_performance_threshold(self.domain) base_queryset = MALTRow.objects.filter( domain_name=self.domain, month__gte=self.date_start, ) if self.date_end: base_queryset.filter(month__lt=self.date_end) user_stat_from_malt = ( base_queryset.values('month') .annotate( num_users=Count('user_id'), avg_forms=Avg('num_of_forms'), std_dev=StdDev('num_of_forms') ) ) total_users = 0 total_average_forms = 0 months = 0 for stat in user_stat_from_malt: total_average_forms += stat['avg_forms'] total_users += stat['num_users'] months += 1 resource_model['total_users'] = total_users if months > 0: resource_model['monthly_forms_per_user'] = total_average_forms / months monthly_user_stats = user_stat_from_malt \ .filter(user_type__in=['CommCareUser']) \ .filter(user_id__in=self.active_not_deleted_users) \ .filter(num_of_forms__gte=performance_threshold) resource_model['monthly_user_form_stats_expanded'] = monthly_user_stats def collect_cases_created_per_user_per_month(self, case_type=None): query = ( CaseES(for_export=True).domain(self.domain) .opened_range(gte=self.date_start, lt=self.date_end) .aggregation( TermsAggregation('cases_per_user', 'owner_id', size=100) .aggregation(DateHistogram('cases_by_date', 'opened_on', DateHistogram.Interval.MONTH))) ) if case_type: query = query.case_type(case_type) results = query.size(0).run() stats = defaultdict(list) for bucket in results.aggregations.cases_per_user.buckets_list: for month, count in bucket.cases_by_date.counts_by_bucket().items(): stats[month].append(count) final_stats = [] total_average_cases_per_user = 0 n = 0 for month, case_count_list in sorted(list(stats.items()), key=lambda r: r[0]): average_cases_per_user = sum(case_count_list) // len(case_count_list) total_average_cases_per_user += average_cases_per_user n += 1 final_stats.append((month, average_cases_per_user)) if n > 0: resource_model['monthly_cases_per_user'] = total_average_cases_per_user / n resource_model['monthly_user_case_stats_expanded'] = final_stats def _print_table(self, headers, rows): if self.csv: row_formatter = CSVRowFormatter() else: row_formatter = TableRowFormatter( [20] * len(headers), ) SimpleTableWriter(self.stdout, row_formatter).write_table(headers, rows) self.stdout.write('') def _print_section_title(self, title_string): self.stdout.write('') self.stdout.write(f'{title_string.upper()}') self.stdout.write('=' * len(title_string)) def _print_value(self, name, *values): separator = ',' if self.csv else ': ' values = [str(val) for val in values] self.stdout.write('\n%s%s%s\n' % (name, separator, separator.join(values))) def collect_cases_updated_per_user_per_month(self): results = ( CaseES(for_export=True).domain(self.domain) .active_in_range(gte=self.date_start, lt=self.date_end) .aggregation(TermsAggregation('cases_per_user', 'owner_id', size=100).aggregation( NestedAggregation('actions', 'actions').aggregation( DateHistogram('cases_by_date', 'server_date', DateHistogram.Interval.MONTH) ) )).size(0).run()) stats = defaultdict(list) for bucket in results.aggregations.cases_per_user.buckets_list: for month, count in bucket.actions.cases_by_date.counts_by_bucket().items(): stats[month].append(count) final_stats = [] for month, case_count_list in sorted(list(stats.items()), key=lambda r: r[0]): final_stats.append((month, sum(case_count_list) // len(case_count_list))) resource_model['monthly_user_cases_updated'] = final_stats def collect_ledgers_per_case(self): case_ids = set() ledger_count = 0 results = ( LedgerValue.objects.using(DB_ALIAS).filter(domain=self.domain) .values('case_id') .annotate(ledger_count=Count('pk')) ) for result in results: case_ids = result['case_id'] ledger_count = result['ledger_count'] if not case_ids: return avg_ledgers_per_case = ledger_count / len(case_ids) case_types_result = CaseES(for_export=True) \ .domain(self.domain).case_ids(case_ids) \ .aggregation(TermsAggregation('types', 'type.exact')) \ .size(0).run() case_types = case_types_result.aggregations.types.keys resource_model['case_type_stats'] = [] for type_ in case_types: results = ( CommCareCase.objects.using(DB_ALIAS).filter(domain=self.domain, closed=True, type=type_) .annotate(lifespan=F('closed_on') - F('opened_on')) .annotate(avg_lifespan=Avg('lifespan')) .values('avg_lifespan', flat=True) ) case_type_data = { 'type': type_, 'count': CaseES().domain(self.domain).case_type(type_).count(), 'avg_lifespan': results[0]['avg_lifespan'], 'cases_per_user_pm': self._cases_created_per_user_per_month(type_), } resource_model['case_type_stats'].append(case_type_data) resource_model['average_ledgers_per_case'] = avg_ledgers_per_case stats = defaultdict(list) for db_name, case_ids_p in split_list_by_db_partition(case_ids): transactions_per_case_per_month = ( LedgerTransaction.objects.using(db_name).filter(case_id__in=case_ids) .annotate(m=Month('server_date'), y=Year('server_date')).values('case_id', 'y', 'm') .annotate(count=Count('id')) ) for row in transactions_per_case_per_month: month = date(row['y'], row['m'], 1) stats[month].append(row['count']) final_stats = [] for month, transaction_count_list in sorted(list(stats.items()), key=lambda r: r[0]): final_stats.append((month.isoformat(), sum(transaction_count_list) // len(transaction_count_list))) resource_model['ledger_updates_per_case'] = final_stats def _cases_created_per_user_per_month(self, case_type=None): query = ( CaseES(for_export=True).domain(self.domain) .opened_range(gte=self.date_start, lt=self.date_end) .aggregation( TermsAggregation('cases_per_user', 'owner_id', size=100) .aggregation(DateHistogram('cases_by_date', 'opened_on', DateHistogram.Interval.MONTH))) ) if case_type: query = query.case_type(case_type) results = query.size(0).run() stats = defaultdict(list) for bucket in results.aggregations.cases_per_user.buckets_list: for month, count in bucket.cases_by_date.counts_by_bucket().items(): stats[month].append(count) final_stats = [] for month, case_count_list in sorted(list(stats.items()), key=lambda r: r[0]): final_stats.append((month, sum(case_count_list) // len(case_count_list))) return final_stats def collect_attachment_sizes(self): with BlobMeta.get_cursor_for_partition_db(DB_ALIAS, readonly=True) as cursor: cursor.execute(""" SELECT meta.content_type, width_bucket(content_length, 0, 2900000, 10) AS bucket, min(content_length) as bucket_min, max(content_length) AS bucket_max, count(content_length) AS freq FROM blobs_blobmeta meta INNER JOIN form_processor_xforminstancesql ON meta.parent_id = form_processor_xforminstancesql.form_id WHERE content_length IS NOT NULL AND form_processor_xforminstancesql.domain = %s GROUP BY content_type, bucket ORDER BY content_type, bucket """, [self.domain]) result = [i for i in fetchall_as_namedtuple(cursor)] resource_model['attachments'] = result def collect_ucr_data(self): static_datasources = StaticDataSourceConfiguration.by_domain(self.domain) dynamic_datasources = DataSourceConfiguration.by_domain(self.domain) resource_model['static_datasources'] = len(static_datasources) resource_model['dynamic_datasources'] = len(dynamic_datasources) def _get_count(config): table_name = get_table_name(config.domain, config.table_id) db_name = connection_manager.get_django_db_alias(config.engine_id) query = ('SELECT * FROM "%s"' % table_name, []) try: return estimate_row_count(query, db_name) except ProgrammingError: return "Table not found" def _get_table_size(config): table_name = get_table_name(config.domain, config.table_id) db_name = connection_manager.get_django_db_alias(config.engine_id) db_cursor = connections[db_name].cursor() with db_cursor as cursor: try: cursor.execute("SELECT pg_total_relation_size('\"%s\"')" % table_name, []) bytes = cursor.fetchone()[0] return bytes except ProgrammingError: return "Table not found" rows = sorted([ ( datasource.display_name, _get_count(datasource), datasource.referenced_doc_type, _get_table_size(datasource) ) for datasource in static_datasources + dynamic_datasources ], key=lambda r: r[-1] if r[-1] != 'Table not found' else 0) resource_model['datasources_info'] = rows def collect_case_transactions(self): db_cursor = connections[DB_ALIAS].cursor() with db_cursor as cursor: cursor.execute(""" SELECT COUNT(*) as num_forms, sum(d.count) as num_updates FROM ( SELECT COUNT(*) as count FROM form_processor_casetransaction t JOIN form_processor_commcarecasesql c on t.case_id = c.case_id WHERE c.domain = %s GROUP BY form_id ) AS d """, [self.domain]) result = cursor.fetchall() forms, form_case_updates = (0, 0) if result: forms, form_case_updates = result[0] resource_model['case_transactions_per_form'] = round(form_case_updates / forms, 2) resource_model['case_transactions_total'] = form_case_updates * PARTITIONS_COUNT def collect_case_indices(self): db_cursor = connections[DB_ALIAS].cursor() with db_cursor as cursor: cursor.execute(""" SELECT COUNT(*) FROM form_processor_commcarecaseindexsql WHERE domain = %s; """, [self.domain]) (case_indices,) = cursor.fetchone() total_cases = resource_model['cases_total'] if total_cases > 0: # total_cases are already multiplied by PARTITIONS_COUNT, # so case_indices needs to also be multiplied in order to cancel out that factor resource_model['case_index_ratio'] = (case_indices * PARTITIONS_COUNT) / total_cases else: resource_model['case_index_ratio'] = None def collect_synclogs(self): db_name = router.db_for_read(SyncLogSQL) db_cursor = connections[db_name].cursor() with db_cursor as cursor: cursor.execute(""" SELECT COUNT(*), sum(d.count) FROM ( SELECT COUNT(*) AS count FROM phone_synclogsql WHERE domain = %s GROUP BY user_id ) AS d """, [self.domain]) result = cursor.fetchall() total_users, total_user_synclogs = (0, 0) if result: total_users, total_user_synclogs = result[0] if total_users > 0: resource_model['synclogs_per_user'] = total_user_synclogs / total_users else: resource_model['synclogs_per_user'] = 0 def collect_devicelogs(self): from phonelog.models import DeviceReportEntry device_log_data = DeviceReportEntry.objects.filter(domain=self.domain) \ .aggregate( num_authors=Count('user_id', distinct=True), num_device_logs=Count('id'), ) devicelogs_per_user = device_log_data['num_device_logs'] // device_log_data['num_authors'] \ if device_log_data['num_authors'] > 0 else 0 resource_model['devicelogs_per_user'] = devicelogs_per_user def output_stats(self): self._print_section_title('Docs count') self._output_docs_count() self._print_section_title('User stats') self._output_monthly_user_form_stats() self._output_monthly_user_case_stats() self._print_section_title('Case Indices') self._output_case_ratio_index() self._print_section_title('Case Transactions') self._output_case_transactions() self._print_section_title('Case Type Statistics') self._output_case_types_stats() self._print_section_title('Average Ledgers per case') self._output_ledgers_per_case() self._print_section_title('Ledger updates per case') self._output_ledger_updates_per_case() self._print_section_title('Sync logs') self.stdout.write('** Synclogs are pruned every so often, so this numbers might be misleading') self._output_synclogs() self._print_section_title('Device logs') self._output_devicelogs() self._print_section_title('Attachments') self._output_attachment_sizes() self._print_section_title('UCR') self._output_ucr() def _output_docs_count(self): total_forms = resource_model['forms_total'] self.stdout.write(f'Total forms: {total_forms}') total_cases = resource_model['cases_total'] self.stdout.write(f'Total cases: {total_cases}') def _output_monthly_user_form_stats(self): def _format_rows(query_): return [ (row['month'].isoformat(), row['num_users'], row['avg_forms'], row['std_dev']) for row in query_ ] user_stats = resource_model['monthly_user_form_stats_expanded'] headers = ['Month', 'Active Users', 'Average forms per user', 'Std Dev'] self._print_table( headers, _format_rows( user_stats ) ) monthly_forms_per_user = resource_model['monthly_forms_per_user'] self.stdout.write(f'Average forms per user per month: {monthly_forms_per_user}') self.stdout.write('') self.stdout.write('System user stats') self._print_table( headers, _format_rows(user_stats.filter(username='system')) ) def _output_monthly_user_case_stats(self, case_type=None): case_stats = resource_model['monthly_user_case_stats_expanded'] suffix = '' if case_type: suffix = '(case type: %s)' % case_type self.stdout.write('Cases created per user (estimate)') self._print_table(['Month', 'Cases created per user %s' % suffix], case_stats) case_updates = resource_model['monthly_user_cases_updated'] self.stdout.write('Cases updated per user (estimate)') self._print_table(['Month', 'Cases updated per user'], case_updates) monthly_cases_per_user = resource_model['monthly_cases_per_user'] self.stdout.write(f'Average cases per user per month: {monthly_cases_per_user}') def _output_case_ratio_index(self): case_index_ratio = round(resource_model['case_index_ratio'] or 0, 2) self.stdout.write(f'Ratio of cases to case indices: 1 : {case_index_ratio}') def _output_attachment_sizes(self): attachments = resource_model['attachments'] self.stdout.write('Form attachment sizes (bytes)') self._print_table( ['Content Type', 'Count', 'Bucket range', 'Bucket (1-10)'], [ (row.content_type, row.freq, '[%s-%s]' % (row.bucket_min, row.bucket_max), row.bucket) for row in attachments ] ) def _output_ucr(self): self.stdout.write(f"Static UCR data sources: {resource_model['static_datasources']}") self.stdout.write(f"Dynamic UCR data sources: {resource_model['dynamic_datasources']}") rows = resource_model['datasources_info'] self.stdout.write('') self.stdout.write('UCR datasource sizes') self._print_table( ['Datasource name', 'Row count (approximate)', 'Doc type', 'Size (bytes)'], rows ) def _output_case_transactions(self): case_transactions = resource_model['case_transactions_per_form'] self.stdout.write(f'Average cases updated per form: {case_transactions}') case_transactions_total = resource_model['case_transactions_total'] self.stdout.write(f'Total case transactions: {case_transactions_total}') def _output_synclogs(self): synclogs_per_user = resource_model['synclogs_per_user'] self.stdout.write(f'Synclogs per user: {synclogs_per_user}') def _output_devicelogs(self): synclogs_per_user = resource_model['devicelogs_per_user'] self.stdout.write(f'Device logs per user: {synclogs_per_user}') def _output_case_types_stats(self): for case_type_data in resource_model['case_type_stats']: case_type = case_type_data['type'] self._print_value('case_type', case_type, case_type_data['count']) avg_lifespan = case_type_data['avg_lifespan'] cases_per_user = case_type_data['cases_per_user_pm'] self._print_value('Average lifespan for "%s" cases' % case_type, avg_lifespan) suffix = '' if case_type: suffix = '(case type: %s)' % case_type self.stdout.write('Cases created per user (estimate)') self._print_table(['Month', 'Cases created per user %s' % suffix], cases_per_user) def _output_ledgers_per_case(self): avg_ledgers_per_case = resource_model['average_ledgers_per_case'] self._print_value('Average ledgers per case', avg_ledgers_per_case) def _output_ledger_updates_per_case(self): stats = resource_model['ledger_updates_per_case'] self._print_table(['Month', 'Ledgers updated per case'], stats)
bsd-3-clause
0908408f6927eb946aeac96cd0196122
37.065831
111
0.601169
3.819755
false
false
false
false
onepercentclub/bluebottle
bluebottle/deeds/serializers.py
1
5678
from rest_framework import serializers from rest_framework.validators import UniqueTogetherValidator from rest_framework_json_api.relations import ( ResourceRelatedField, SerializerMethodResourceRelatedField, SerializerMethodHyperlinkedRelatedField ) from bluebottle.activities.utils import ( BaseActivitySerializer, BaseActivityListSerializer, BaseContributorSerializer ) from bluebottle.bluebottle_drf2.serializers import PrivateFileSerializer from bluebottle.deeds.models import Deed, DeedParticipant from bluebottle.fsm.serializers import TransitionSerializer from bluebottle.time_based.permissions import CanExportParticipantsPermission from bluebottle.utils.serializers import ResourcePermissionField from bluebottle.utils.utils import reverse_signed class DeedSerializer(BaseActivitySerializer): permissions = ResourcePermissionField('deed-detail', view_args=('pk',)) links = serializers.SerializerMethodField() my_contributor = SerializerMethodResourceRelatedField( model=DeedParticipant, read_only=True, source='get_my_contributor' ) contributors = SerializerMethodHyperlinkedRelatedField( model=DeedParticipant, many=True, related_link_view_name='related-deed-participants', related_link_url_kwarg='activity_id' ) participants_export_url = PrivateFileSerializer( 'deed-participant-export', url_args=('pk', ), filename='participant.csv', permission=CanExportParticipantsPermission, read_only=True ) def get_my_contributor(self, instance): user = self.context['request'].user if user.is_authenticated: return instance.contributors.filter(user=user).instance_of(DeedParticipant).first() def get_links(self, instance): if instance.start and instance.end: return { 'ical': reverse_signed('deed-ical', args=(instance.pk, )), 'google': instance.google_calendar_link, } else: return {} class Meta(BaseActivitySerializer.Meta): model = Deed fields = BaseActivitySerializer.Meta.fields + ( 'my_contributor', 'contributors', 'start', 'end', 'enable_impact', 'target', 'links', 'participants_export_url', ) class JSONAPIMeta(BaseActivitySerializer.JSONAPIMeta): resource_name = 'activities/deeds' included_resources = BaseActivitySerializer.JSONAPIMeta.included_resources + [ 'my_contributor', 'my_contributor.user', 'my_contributor.invite' ] included_serializers = dict( BaseActivitySerializer.included_serializers, **{ 'my_contributor': 'bluebottle.deeds.serializers.DeedParticipantSerializer', 'my_contributor.user': 'bluebottle.initiatives.serializers.MemberSerializer', 'my_contributor.invite': 'bluebottle.activities.utils.InviteSerializer', } ) class DeedListSerializer(BaseActivityListSerializer): permissions = ResourcePermissionField('deed-detail', view_args=('pk',)) class Meta(BaseActivityListSerializer.Meta): model = Deed fields = BaseActivityListSerializer.Meta.fields + ( 'start', 'end', ) class JSONAPIMeta(BaseActivityListSerializer.JSONAPIMeta): resource_name = 'activities/deeds' class DeedTransitionSerializer(TransitionSerializer): resource = ResourceRelatedField(queryset=Deed.objects.all()) included_serializers = { 'resource': 'bluebottle.deeds.serializers.DeedSerializer', } class JSONAPIMeta(object): included_resources = ['resource', ] resource_name = 'activities/deed-transitions' class DeedParticipantSerializer(BaseContributorSerializer): activity = ResourceRelatedField( queryset=Deed.objects.all() ) permissions = ResourcePermissionField('deed-participant-detail', view_args=('pk',)) class Meta(BaseContributorSerializer.Meta): model = DeedParticipant meta_fields = BaseContributorSerializer.Meta.meta_fields + ('permissions', ) validators = [ UniqueTogetherValidator( queryset=DeedParticipant.objects.all(), fields=('activity', 'user') ) ] class JSONAPIMeta(BaseContributorSerializer.JSONAPIMeta): resource_name = 'contributors/deeds/participants' included_resources = [ 'user', 'activity', 'activity.goals', 'invite' ] included_serializers = { 'user': 'bluebottle.initiatives.serializers.MemberSerializer', 'activity': 'bluebottle.deeds.serializers.DeedSerializer', 'activity.goals': 'bluebottle.impact.serializers.ImpactGoalSerializer', 'invite': 'bluebottle.activities.utils.InviteSerializer', } class DeedParticipantListSerializer(DeedParticipantSerializer): pass class DeedParticipantTransitionSerializer(TransitionSerializer): resource = ResourceRelatedField(queryset=DeedParticipant.objects.all()) field = 'states' included_serializers = { 'resource': 'bluebottle.deeds.serializers.DeedParticipantSerializer', 'resource.activity': 'bluebottle.deeds.serializers.DeedSerializer', 'resource.activity.goals': 'bluebottle.impact.serializers.ImpactGoalSerializer', } class JSONAPIMeta(object): resource_name = 'contributors/deeds/participant-transitions' included_resources = [ 'resource', 'resource.activity', 'resource.activity.goals' ]
bsd-3-clause
f88e80ff2428b643192bcd840cc7482f
34.267081
95
0.686862
4.391338
false
false
false
false
dimagi/commcare-hq
corehq/feature_previews.py
1
7272
""" Feature Previews are built on top of toggle, so if you migrate a toggle to a feature preview, you shouldn't need to migrate the data, as long as the slug is kept intact. """ from django.conf import settings from django.utils.translation import gettext_lazy as _ from django_prbac.utils import has_privilege as prbac_has_privilege from memoized import memoized from corehq.apps.accounting.models import SoftwarePlanEdition from corehq.util.quickcache import quickcache from .privileges import LOOKUP_TABLES from .toggles import ( StaticToggle, NAMESPACE_DOMAIN, TAG_PREVIEW, all_toggles_by_name_in_scope, ECD_MIGRATED_DOMAINS, ECD_PREVIEW_ENTERPRISE_DOMAINS, ) class FeaturePreview(StaticToggle): """ FeaturePreviews should be used in conjunction with normal role based access. Check the FeaturePreview first since that's a faster operation. e.g. if feature_previews.BETA_FEATURE.enabled(domain) \ and has_privilege(request, privileges.BETA_FEATURE): # do cool thing for BETA_FEATURE """ def __init__(self, slug, label, description, help_link=None, privilege=None, save_fn=None, can_self_enable_fn=None): self.privilege = privilege # a function determining whether this preview can be enabled # according to the request object self.can_self_enable_fn = can_self_enable_fn super(FeaturePreview, self).__init__( slug, label, TAG_PREVIEW, description=description, help_link=help_link, save_fn=save_fn, namespaces=[NAMESPACE_DOMAIN] ) def has_privilege(self, request): has_privilege = True if self.privilege: has_privilege = prbac_has_privilege(request, self.privilege) can_self_enable = True if self.can_self_enable_fn: can_self_enable = self.can_self_enable_fn(request) return has_privilege and can_self_enable def all_previews(): return list(all_previews_by_name().values()) @memoized def all_previews_by_name(): return all_toggles_by_name_in_scope(globals(), toggle_class=FeaturePreview) def previews_dict(domain): by_name = all_previews_by_name() enabled = previews_enabled_for_domain(domain) return {by_name[name].slug: True for name in enabled if name in by_name} def preview_values_by_name(domain): """ Loads all feature previews into a dictionary for use in JS """ enabled_previews = previews_enabled_for_domain(domain) return { name: name in enabled_previews for name in all_previews_by_name().keys() } @quickcache(["domain"], timeout=24 * 60 * 60, skip_arg=lambda _: settings.UNIT_TESTING) def previews_enabled_for_domain(domain): """Return set of preview names that are enabled for the given domain""" return { name for name, preview in all_previews_by_name().items() if preview.enabled(domain) } CALC_XPATHS = FeaturePreview( slug='calc_xpaths', label=_('Custom Calculations in Case List'), description=_( "Specify a custom xpath expression to calculate a value " "in the case list or case detail screen."), help_link='https://confluence.dimagi.com/display/commcarepublic/Calculations+in+the+Case+List+and+Details' ) ENUM_IMAGE = FeaturePreview( slug='enum_image', label=_('Icons in Case List'), description=_( "Display a case property as an icon in the case list. " "For example, to show that a case is late, " 'display a red square instead of "late: yes".' ), help_link='https://help.commcarehq.org/display/commcarepublic/Adding+Icons+in+Case+List+and+Case+Detail+screen' ) CONDITIONAL_ENUM = FeaturePreview( slug='conditional_enum', label=_('Conditional ID Mapping in Case List'), description=_( "Specify a custom xpath expression to calculate a lookup key in the case list, case detail screen or " "case tile enum columns." ), ) SPLIT_MULTISELECT_CASE_EXPORT = FeaturePreview( slug='split_multiselect_case_export', label=_('Split multi-selects in case export'), description=_( "This setting allows users to split multi-select questions into multiple " "columns in case exports." ) ) def enable_callcenter(domain_name, checked): from corehq.apps.domain.models import Domain domain_obj = Domain.get_by_name(domain_name) domain_obj.call_center_config.enabled = checked domain_obj.save() def can_enable_callcenter(request): # This will only allow domains to remove themselves from the # call center feature preview, but no new domains can currently activate # the preview. A request from product return CALLCENTER.enabled_for_request(request) CALLCENTER = FeaturePreview( slug='callcenter', label=_("Call Center"), description=_( 'The call center application setting allows an application to reference a ' 'mobile user as a case that can be monitored using CommCare. ' 'This allows supervisors to view their workforce within CommCare. ' 'From here they can do things like monitor workers with performance issues, ' 'update their case with possible reasons for poor performance, ' 'and offer guidance towards solutions.'), help_link='https://help.commcarehq.org/display/commcarepublic/How+to+set+up+a+Supervisor-Call+Center+Application', save_fn=enable_callcenter, can_self_enable_fn=can_enable_callcenter, ) # Only used in Vellum VELLUM_ADVANCED_ITEMSETS = FeaturePreview( slug='advanced_itemsets', label=_("Custom Single and Multiple Answer Questions"), description=_( "Allows display of custom lists, such as case sharing groups or locations as choices in Single Answer or " "Multiple Answer lookup Table questions. Configuring these questions requires specifying advanced logic. " "Available in form builder, as an additional option on the Lookup Table Data for lookup " "table questions." ), privilege=LOOKUP_TABLES, ) def is_eligible_for_ecd_preview(request): if not (hasattr(request, 'plan') and hasattr(request, 'subscription') and hasattr(request, 'domain')): return False if request.subscription.is_trial: return False is_migrated = ECD_MIGRATED_DOMAINS.enabled_for_request(request) is_enterprise_eligible = ECD_PREVIEW_ENTERPRISE_DOMAINS.enabled_for_request(request) is_pro_or_advanced = request.plan.plan.edition in [ SoftwarePlanEdition.ADVANCED, SoftwarePlanEdition.PRO ] return is_migrated and (is_pro_or_advanced or is_enterprise_eligible) def clear_project_data_tab_cache(domain_name, _checked): from corehq.tabs.tabclasses import ProjectDataTab ProjectDataTab.clear_dropdown_cache_for_all_domain_users(domain_name) EXPLORE_CASE_DATA_PREVIEW = FeaturePreview( slug='explore_case_data_preview', label=_("Explore Case Data"), description=_( "This feature allows you to quickly explore your case data for " "ad-hoc data queries or to identify unclean data." ), can_self_enable_fn=is_eligible_for_ecd_preview, save_fn=clear_project_data_tab_cache, )
bsd-3-clause
e8d19c4e748897b75da08db66c37b530
33.140845
118
0.693619
3.7875
false
false
false
false
dimagi/commcare-hq
custom/inddex/reports/r3_nutrient_intake.py
1
4614
import textwrap from itertools import chain from corehq.apps.reports.filters.case_list import CaseListFilter from custom.inddex import filters from custom.inddex.food import FoodData from .utils import MultiTabularReport, format_row, na_for_None class NutrientIntakeReport(MultiTabularReport): name = 'Report 3 - Disaggregated Intake Data by Respondent and Aggregated Daily Intake Data by Respondent' slug = 'report_3_disaggr_intake_data_by_rspndnt_and_aggr_daily_intake_data_by_rspndnt' # yup, really export_only = True description = textwrap.dedent(""" This report provides information on the total quantity and total nutrient content for each individual food or recipe reported by each respondent in the recall. It also provides total daily energy and nutrient intakes for each respondent. This report cannot be previewed. Users must download the data to access the information. """) @property def fields(self): return [ CaseListFilter, filters.DateRangeFilter, filters.GenderFilter, filters.AgeRangeFilter, filters.PregnancyFilter, filters.BreastFeedingFilter, filters.SettlementAreaFilter, filters.SupplementsFilter, filters.FaoWhoGiftFoodGroupDescriptionFilter, filters.RecallStatusFilter, ] @property def data_providers(self): food_data = FoodData.from_request(self.domain, self.request) return [ IntakeData(food_data), DailyIntakeData(food_data), ] class IntakeData: title = 'Disaggregated Intake Data By Food' slug = 'disaggr_intake_data_by_rspndnt' _columns = [ 'unique_respondent_id', 'location_id', 'respondent_id', 'recall_case_id', 'opened_by_username', 'owner_name', 'visit_date', 'recall_status', 'gender', 'age_years_calculated', 'age_months_calculated', 'age_range', 'pregnant', 'breastfeeding', 'urban_rural', 'supplements', 'food_code', 'food_name', 'recipe_name', 'caseid', 'food_type', 'reference_food_code', 'base_term_food_code', 'include_in_analysis', 'fao_who_gift_food_group_code', 'fao_who_gift_food_group_description', 'user_food_group', 'is_ingredient', 'ingredient_type', 'total_grams', 'conv_factor_gap_code', 'conv_factor_gap_desc', 'fct_gap_code', 'fct_gap_desc' ] def __init__(self, food_data): self._food_data = food_data self._nutrient_names = self._food_data.fixtures.nutrient_names @property def headers(self): return self._columns + list(self._nutrient_names) @property def rows(self): for row in self._food_data.rows: yield format_row(chain( (getattr(row, col) for col in self._columns), (na_for_None(row.get_nutrient_amt(name)) for name in self._nutrient_names), )) class DailyIntakeData: title = 'Aggregated Daily Intake By Respondent' slug = 'aggr_daily_intake_by_rspndnt' _metadata_columns = [ 'unique_respondent_id', 'location_id', 'respondent_id', 'recall_case_id', 'opened_by_username', 'owner_name', 'visit_date', 'recall_status', 'gender', 'age_years_calculated', 'age_months_calculated', 'age_range', 'pregnant', 'breastfeeding', 'urban_rural', 'supplements', ] def __init__(self, food_data): self._food_data = food_data self._nutrient_names = self._food_data.fixtures.nutrient_names @property def headers(self): return self._metadata_columns + list(self._nutrient_names) @property def rows(self): rows = {} for row in self._food_data.rows: nutrients = [row.get_nutrient_amt(name) for name in self._nutrient_names] key = (row.unique_respondent_id, row.visit_date) if key not in rows: rows[key] = { 'static_cols': [getattr(row, col) for col in self._metadata_columns], 'nutrients': nutrients } else: rows[key]['nutrients'] = map(_sum, zip(rows[key]['nutrients'], nutrients)) for row in rows.values(): yield format_row(chain(row['static_cols'], map(na_for_None, row['nutrients']))) def _sum(items): real_items = [item for item in items if item is not None] return sum(real_items) if real_items else None
bsd-3-clause
6ede8447f856743a584d0d853870dff6
33.954545
110
0.614434
3.538344
false
false
false
false
onepercentclub/bluebottle
bluebottle/looker/utils.py
1
3409
from future import standard_library from bluebottle.members.models import MemberPlatformSettings standard_library.install_aliases() from builtins import object import binascii from collections import OrderedDict import json import hmac import base64 from hashlib import sha1 import time import os from urllib.parse import urlencode, quote_plus from django.db import connection from django.conf import settings from bluebottle.clients import properties from bluebottle.analytics.models import AnalyticsPlatformSettings from bluebottle.utils.utils import get_current_host class LookerSSOEmbed(object): session_length = settings.LOOKER_SESSION_LENGTH models = ('Projects', ) permissions = ('see_user_dashboards', 'see_lookml_dashboards', 'access_data', 'see_looks', ) def __init__(self, user, type, id): self.user = user self._path = '/embed/{}s/{}'.format(type, id) @property def path(self): return '/login/embed/{}'.format(quote_plus(self._path)) @property def nonce(self): return binascii.hexlify(os.urandom(16)) @property def time(self): return int(time.time()) @property def looker_host(self): if hasattr(settings, 'LOOKER_HOST'): return settings.LOOKER_HOST else: return 'looker.{}'.format( get_current_host(False) ) def sign(self, params): attrs = ( 'looker_host', 'path', 'nonce', 'time', 'session_length', 'external_user_id', 'permissions', 'models', 'group_ids', 'external_group_id', 'user_attributes', 'access_filters' ) values = [params.get(attr, getattr(self, attr, None)) for attr in attrs] values = [value for value in values if value is not None] string_to_sign = "\n".join(values) signer = hmac.new( settings.LOOKER_SECRET.encode('utf-8'), string_to_sign.encode('utf-8').strip(), sha1 ) return base64.b64encode(signer.digest()).strip() @property def url(self): schema_name = connection.tenant.schema_name analytics_settings = AnalyticsPlatformSettings.objects.get() member_settings = MemberPlatformSettings.objects.get() params = OrderedDict([ ('nonce', self.nonce.decode()), ('time', self.time), ('session_length', self.session_length), ('external_user_id', '{}-{}'.format(schema_name, self.user.id)), ('permissions', self.permissions), ('models', self.models), ('access_filters', {}), ('first_name', self.user.first_name), ('last_name', self.user.last_name), ('group_ids', [3]), ('external_group_id', 'Back-office Users'), ('user_attributes', { 'tenant': schema_name, 'fiscal_month_offset': member_settings.fiscal_month_offset, 'user_base': analytics_settings.user_base, 'language': properties.LANGUAGE_CODE, }), ('force_logout_login', True), ]) json_params = OrderedDict((key, json.dumps(value)) for key, value in list(params.items())) json_params['signature'] = self.sign(json_params) return '{}{}?{}'.format( 'https://' + self.looker_host, self.path, urlencode(json_params) )
bsd-3-clause
fbd638db5bbdb5434cacb17eaac106e1
32.097087
98
0.605456
4.03432
false
false
false
false
dimagi/commcare-hq
corehq/apps/cloudcare/touchforms_api.py
1
4048
from corehq.apps.app_manager.suite_xml.sections.entries import EntriesHelper from corehq.apps.cloudcare import CLOUDCARE_DEVICE_ID from corehq.apps.users.models import CouchUser from corehq.form_processor.models import CommCareCase DELEGATION_STUB_CASE_TYPE = "cc_delegation_stub" class BaseSessionDataHelper(object): def __init__(self, domain, couch_user): self.domain = domain self.couch_user = couch_user def get_session_data(self, device_id=CLOUDCARE_DEVICE_ID): """ Get session data used by touchforms. """ session_data = { 'device_id': device_id, 'domain': self.domain, } session_data.update(get_user_contributions_to_touchforms_session(self.domain, self.couch_user)) return session_data def get_full_context(self, root_extras=None, session_extras=None): """ Get the entire touchforms context for a given user/app/module/form/case """ root_extras = root_extras or {} session_extras = session_extras or {} session_data = self.get_session_data() session_data.update(session_extras) xform_url = root_extras.get('formplayer_url') ret = { "session_data": session_data, "xform_url": xform_url, } ret.update(root_extras) return ret class CaseSessionDataHelper(BaseSessionDataHelper): def __init__(self, domain, couch_user, case_id_or_case, app, form, delegation=False): super(CaseSessionDataHelper, self).__init__(domain, couch_user) self.form = form self.app = app if case_id_or_case is None or isinstance(case_id_or_case, str): self.case_id = case_id_or_case self._case = None else: self.case_id = case_id_or_case.case_id self._case = case_id_or_case self._delegation = delegation @property def case(self): if not self._case: self._case = CommCareCase.objects.get_case(self.case_id, self.domain) return self._case @property def case_type(self): return self.case.type @property def _case_parent_id(self): """Only makes sense if the case is a delegation stub""" return self.case.get_index_map().get('parent')['case_id'] @property def delegation(self): if self._delegation and self.case_id: assert self.case_type == DELEGATION_STUB_CASE_TYPE return self._delegation def get_session_data(self, device_id=CLOUDCARE_DEVICE_ID): """ Get session data used by touchforms. """ session_data = super(CaseSessionDataHelper, self).get_session_data(device_id) if self.case_id: if self.delegation: session_data["delegation_id"] = self.case_id session_data["case_id"] = self._case_parent_id else: session_data[self.case_session_variable_name] = self.case_id if self.app: session_data["app_id"] = self.app.get_id session_data["app_version"] = self.app.version return session_data @property def case_session_variable_name(self): session_var = 'case_id' datums = EntriesHelper(self.app).get_datums_meta_for_form_generic(self.form) datums = [datum for datum in datums if datum.case_type == self.case_type] if len(datums) == 1: session_var = datums[0].id return session_var def get_user_contributions_to_touchforms_session(domain, couch_user_or_commconnect_case): return { 'username': couch_user_or_commconnect_case.raw_username, 'user_id': couch_user_or_commconnect_case.get_id, # This API is used by smsforms, so sometimes "couch_user" can be # a case, in which case there is no user_data. 'user_data': (couch_user_or_commconnect_case.get_user_session_data(domain) if isinstance(couch_user_or_commconnect_case, CouchUser) else {}), }
bsd-3-clause
2f3ec64dee22129f36f7c74d4d9ecead
35.142857
103
0.620059
3.572816
false
false
false
false
dimagi/commcare-hq
corehq/apps/reports/tests/test_supply_accessors.py
1
4004
import uuid from django.test import TestCase from casexml.apps.case.mock import CaseFactory from corehq.apps.commtrack.helpers import make_product from corehq.apps.hqcase.utils import submit_case_blocks from corehq.apps.reports.analytics.dbaccessors import ( get_aggregated_ledger_values, ) from corehq.form_processor.tests.utils import ( FormProcessorTestUtils, sharded, ) @sharded class LedgerDBAccessorTest(TestCase): @classmethod def setUpClass(cls): super(LedgerDBAccessorTest, cls).setUpClass() cls.domain = uuid.uuid4().hex FormProcessorTestUtils.delete_all_cases_forms_ledgers(cls.domain) cls.product_a = make_product(cls.domain, 'A Product', 'prodcode_a') cls.product_b = make_product(cls.domain, 'B Product', 'prodcode_b') @classmethod def tearDownClass(cls): cls.product_a.delete() cls.product_b.delete() FormProcessorTestUtils.delete_all_cases_forms_ledgers(cls.domain) super(LedgerDBAccessorTest, cls).tearDownClass() def setUp(self): super(LedgerDBAccessorTest, self).setUp() self.factory = CaseFactory(domain=self.domain) self.case_one = self.factory.create_case() self.case_two = self.factory.create_case() def tearDown(self): FormProcessorTestUtils.delete_all_ledgers(self.domain) super(LedgerDBAccessorTest, self).tearDown() def _submit_ledgers(self, ledger_blocks): return submit_case_blocks(ledger_blocks, self.domain)[0].form_id def _set_balance(self, balance, case_id, product_id): from corehq.apps.commtrack.tests.util import get_single_balance_block return self._submit_ledgers([ get_single_balance_block(case_id, product_id, balance) ]) def _check_result(self, expected, entry_ids=None): result = [] case_ids = [ self.case_one.case_id, self.case_two.case_id, ] for row in get_aggregated_ledger_values(self.domain, case_ids, 'stock', entry_ids): result.append(row._asdict()) self.assertItemsEqual(result, expected) def test_one_ledger(self): from corehq.apps.commtrack.tests.util import get_single_balance_block self._submit_ledgers([ get_single_balance_block(self.case_one.case_id, self.product_a._id, 1), ]) self._check_result([ {'entry_id': self.product_a._id, 'balance': 1} ]) def test_two_ledger(self): from corehq.apps.commtrack.tests.util import get_single_balance_block self._submit_ledgers([ get_single_balance_block(self.case_one.case_id, self.product_a._id, 1), get_single_balance_block(self.case_two.case_id, self.product_a._id, 3), ]) self._check_result([ {'entry_id': self.product_a._id, 'balance': 4} ]) def test_multiple_entries(self): from corehq.apps.commtrack.tests.util import get_single_balance_block self._submit_ledgers([ get_single_balance_block(self.case_one.case_id, self.product_a._id, 1), get_single_balance_block(self.case_two.case_id, self.product_a._id, 3), get_single_balance_block(self.case_one.case_id, self.product_b._id, 3), ]) self._check_result([ {'entry_id': self.product_b._id, 'balance': 3}, {'entry_id': self.product_a._id, 'balance': 4}, ]) def test_filter_entries(self): from corehq.apps.commtrack.tests.util import get_single_balance_block self._submit_ledgers([ get_single_balance_block(self.case_one.case_id, self.product_a._id, 1), get_single_balance_block(self.case_two.case_id, self.product_a._id, 3), get_single_balance_block(self.case_one.case_id, self.product_b._id, 3), ]) self._check_result([ {'entry_id': self.product_a._id, 'balance': 4}, ], entry_ids=[self.product_a._id])
bsd-3-clause
40b10ca54b3b345f13e51d06e05b1eec
36.773585
91
0.636613
3.42515
false
true
false
false
onepercentclub/bluebottle
bluebottle/collect/serializers.py
1
7318
from rest_framework import serializers from rest_framework.serializers import ModelSerializer from rest_framework_json_api.relations import ( ResourceRelatedField, SerializerMethodResourceRelatedField, SerializerMethodHyperlinkedRelatedField ) from bluebottle.activities.utils import ( BaseActivitySerializer, BaseActivityListSerializer, BaseContributorSerializer ) from bluebottle.activities.models import Organizer from bluebottle.bluebottle_drf2.serializers import PrivateFileSerializer from bluebottle.collect.models import CollectActivity, CollectContributor, CollectType from bluebottle.fsm.serializers import TransitionSerializer from bluebottle.time_based.permissions import CanExportParticipantsPermission from bluebottle.utils.serializers import ResourcePermissionField from bluebottle.utils.utils import reverse_signed class CollectActivitySerializer(BaseActivitySerializer): permissions = ResourcePermissionField('collect-activity-detail', view_args=('pk',)) links = serializers.SerializerMethodField() collect_type = ResourceRelatedField( queryset=CollectType.objects, required=False, allow_null=True, ) my_contributor = SerializerMethodResourceRelatedField( model=CollectContributor, read_only=True, source='get_my_contributor' ) contributors = SerializerMethodHyperlinkedRelatedField( model=CollectContributor, many=True, related_link_view_name='related-collect-contributors', related_link_url_kwarg='activity_id' ) contributors_export_url = PrivateFileSerializer( 'collect-contributors-export', url_args=('pk', ), filename='contributors.csv', permission=CanExportParticipantsPermission, read_only=True ) def get_links(self, instance): if instance.start and instance.end: return { 'ical': reverse_signed('collect-ical', args=(instance.pk, )), 'google': instance.google_calendar_link, } else: return {} def get_my_contributor(self, instance): user = self.context['request'].user if user.is_authenticated: return instance.contributors.filter(user=user).instance_of(CollectContributor).first() def get_contributor_count(self, instance): return instance.contributors.not_instance_of(Organizer).filter( status__in=['accepted', 'succeeded', 'activity_refunded'], user__isnull=False ).count() class Meta(BaseActivitySerializer.Meta): model = CollectActivity fields = BaseActivitySerializer.Meta.fields + ( 'my_contributor', 'contributors', 'start', 'end', 'realized', 'contributors_export_url', 'location', 'location_hint', 'collect_type', 'target', 'realized', 'enable_impact', 'links' ) class JSONAPIMeta(BaseActivitySerializer.JSONAPIMeta): resource_name = 'activities/collects' included_resources = BaseActivitySerializer.JSONAPIMeta.included_resources + [ 'my_contributor', 'my_contributor.invite', 'location', 'collect_type' 'goals', 'goals.type', ] included_serializers = dict( BaseActivitySerializer.included_serializers, **{ 'my_contributor': 'bluebottle.collect.serializers.CollectContributorSerializer', 'location': 'bluebottle.geo.serializers.GeolocationSerializer', 'collect_type': 'bluebottle.collect.serializers.CollectTypeSerializer', 'my_contributor.invite': 'bluebottle.activities.utils.InviteSerializer', } ) class CollectActivityListSerializer(BaseActivityListSerializer): permissions = ResourcePermissionField('collect-activity-detail', view_args=('pk',)) collect_type = ResourceRelatedField( queryset=CollectType.objects, required=False, allow_null=True, ) class Meta(BaseActivityListSerializer.Meta): model = CollectActivity fields = BaseActivityListSerializer.Meta.fields + ( 'start', 'end', 'location', 'realized', 'collect_type', 'location', ) class JSONAPIMeta(BaseActivityListSerializer.JSONAPIMeta): resource_name = 'activities/collects' included_resources = BaseActivityListSerializer.JSONAPIMeta.included_resources + [ 'collect_type', 'location', ] included_serializers = dict( BaseActivityListSerializer.included_serializers, **{ 'collect_type': 'bluebottle.collect.serializers.CollectTypeSerializer', 'location': 'bluebottle.geo.serializers.GeolocationSerializer', } ) class CollectActivityTransitionSerializer(TransitionSerializer): resource = ResourceRelatedField(queryset=CollectActivity.objects.all()) included_serializers = { 'resource': 'bluebottle.collect.serializers.CollectActivitySerializer', } class JSONAPIMeta(object): included_resources = ['resource', ] resource_name = 'activities/collect-transitions' class CollectContributorSerializer(BaseContributorSerializer): activity = ResourceRelatedField( queryset=CollectActivity.objects.all() ) permissions = ResourcePermissionField('collect-contributor-detail', view_args=('pk',)) class Meta(BaseContributorSerializer.Meta): model = CollectContributor meta_fields = BaseContributorSerializer.Meta.meta_fields + ('permissions', ) fields = BaseContributorSerializer.Meta.fields class JSONAPIMeta(BaseContributorSerializer.JSONAPIMeta): resource_name = 'contributors/collect/contributors' included_resources = [ 'user', 'activity', 'invite', ] included_serializers = { 'user': 'bluebottle.initiatives.serializers.MemberSerializer', 'activity': 'bluebottle.collect.serializers.CollectActivitySerializer', 'invite': 'bluebottle.activities.utils.InviteSerializer', } class CollectContributorListSerializer(CollectContributorSerializer): pass class CollectContributorTransitionSerializer(TransitionSerializer): resource = ResourceRelatedField(queryset=CollectContributor.objects.all()) field = 'states' included_serializers = { 'resource': 'bluebottle.collect.serializers.CollectContributorSerializer', 'resource.activity': 'bluebottle.collect.serializers.CollectActivitySerializer', 'resource.activity.goals': 'bluebottle.impact.serializers.ImpactGoalSerializer', } class JSONAPIMeta(object): resource_name = 'contributors/collect/contributor-transitions' included_resources = [ 'resource', 'resource.activity', 'resource.activity.goals' ] class CollectTypeSerializer(ModelSerializer): class Meta(object): model = CollectType fields = ('id', 'name', 'unit', 'unit_plural') class JSONAPIMeta(object): resource_name = 'activities/collect-types'
bsd-3-clause
125da27cd0a4f22d36e14dff6213c4b3
33.682464
98
0.674638
4.605412
false
false
false
false
onepercentclub/bluebottle
bluebottle/test/factory_models/geo.py
1
2010
from builtins import object import factory from django.contrib.gis.geos import Point from bluebottle.geo.models import ( Country, SubRegion, Region, Location, LocationGroup, Place, Geolocation) class RegionFactory(factory.DjangoModelFactory): class Meta(object): model = Region name = factory.Sequence(lambda n: 'Region{0}'.format(n)) class SubRegionFactory(factory.DjangoModelFactory): class Meta(object): model = SubRegion name = factory.Sequence(lambda n: 'SubRegion{0}'.format(n)) region = factory.SubFactory(RegionFactory) class CountryFactory(factory.DjangoModelFactory): class Meta(object): model = Country name = factory.Faker('country') alpha2_code = factory.Faker('country_code') subregion = factory.SubFactory(SubRegionFactory) class LocationGroupFactory(factory.DjangoModelFactory): class Meta(object): model = LocationGroup name = factory.Sequence(lambda n: 'LocationGroup_{0}'.format(n)) class LocationFactory(factory.DjangoModelFactory): class Meta(object): model = Location name = factory.Sequence(lambda n: 'Location_{0}'.format(n)) position = Point(52.5, 13.4) country = factory.SubFactory(CountryFactory) group = factory.SubFactory(LocationGroupFactory) class PlaceFactory(factory.DjangoModelFactory): class Meta(object): model = Place exclude = ['content_object'] position = Point(52.5, 13.4) country = factory.SubFactory(CountryFactory) class GeolocationFactory(factory.DjangoModelFactory): class Meta(object): model = Geolocation street = factory.Faker('street_name') street_number = factory.Faker('building_number') locality = factory.Faker('city') position = Point(13.4, 52.5) country = factory.SubFactory(CountryFactory) formatted_address = factory.LazyAttribute( lambda o: '{} {} {} {}'.format( o.street, o.street_number, o.locality, o.country.name ) )
bsd-3-clause
18014779f4480bce6ec3f5a2dfaa59e0
26.162162
68
0.694527
3.918129
false
false
false
false
dimagi/commcare-hq
corehq/form_processor/migrations/0039_auto_20151130_1748.py
1
1284
from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('form_processor', '0038_form_functions'), ] operations = [ migrations.CreateModel( name='LedgerValue', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('entry_id', models.CharField(max_length=100, db_index=True)), ('section_id', models.CharField(max_length=100, db_index=True)), ('balance', models.IntegerField(default=0)), ('last_modified', models.DateTimeField(auto_now=True)), ('case', models.ForeignKey(to='form_processor.CommCareCaseSQL', to_field='case_id', on_delete=models.CASCADE)), ], options={ }, bases=(models.Model,), ), migrations.AlterField( model_name='casetransaction', name='type', field=models.PositiveSmallIntegerField(choices=[(0, 'form'), (1, 'rebuild_with_reason'), (2, 'user_requested_rebuild'), (3, 'user_archived_rebuild'), (4, 'form_archive_rebuild'), (5, 'form_edit_rebuild'), (6, 'ledger')]), preserve_default=True, ), ]
bsd-3-clause
93a0a4cbe704203dbd9567a9375ad49e
40.419355
233
0.566978
4.089172
false
false
false
false
dimagi/commcare-hq
corehq/apps/es/tests/test_registry.py
1
4626
from django.test import SimpleTestCase from ..exceptions import ESRegistryError from ..registry import ( _ES_INFO_REGISTRY, _ALIAS_REF_COUNTS, register, deregister, verify_alias, verify_registered, registry_entry, get_registry, ) from .utils import es_test class INDEX_INFO: alias = "alias" @es_test class TestRegistry(SimpleTestCase): def setUp(self): super().setUp() self._save_reg = _ES_INFO_REGISTRY.copy() self._save_als = _ALIAS_REF_COUNTS.copy() _ES_INFO_REGISTRY.clear() _ALIAS_REF_COUNTS.clear() def tearDown(self): _ES_INFO_REGISTRY.clear() _ALIAS_REF_COUNTS.clear() _ES_INFO_REGISTRY.update(self._save_reg) _ALIAS_REF_COUNTS.update(self._save_als) super().tearDown() def _register_one_and_check(self, info, cname): if cname is None: _cname = info.alias else: _cname = cname alias_ref_count = _ALIAS_REF_COUNTS.get(info.alias, 0) self.assertNotIn(_cname, _ES_INFO_REGISTRY) if cname is None: register(info) else: register(info, cname) entry = _ES_INFO_REGISTRY[_cname] self.assertIs(entry, info) self.assertEqual(_ALIAS_REF_COUNTS[info.alias], alias_ref_count + 1) def _deregister_one_and_check(self, info_or_cname, info): if info is None: alias = cname = info_or_cname.alias else: alias = info.alias if info_or_cname is None: info_or_cname = cname = alias else: cname = info_or_cname self.assertIn(cname, _ES_INFO_REGISTRY) alias_ref_count = _ALIAS_REF_COUNTS[alias] self.assertGreaterEqual(alias_ref_count, 1) deregister(info_or_cname) self.assertNotIn(cname, _ES_INFO_REGISTRY) self.assertEqual(_ALIAS_REF_COUNTS.get(alias, 0), alias_ref_count - 1) def _check_reg_lens(self, reg, als): self.assertEqual(len(_ES_INFO_REGISTRY), reg) self.assertEqual(len(_ALIAS_REF_COUNTS), als) def test_register_one_by_alias(self): self._check_reg_lens(0, 0) self._register_one_and_check(INDEX_INFO, None) self._check_reg_lens(1, 1) def test_register_one_by_cname(self): self._check_reg_lens(0, 0) self._register_one_and_check(INDEX_INFO, "canonical") self._check_reg_lens(1, 1) def test_register_multiple(self): self._check_reg_lens(0, 0) self._register_one_and_check(INDEX_INFO, None) self._check_reg_lens(1, 1) self._register_one_and_check(INDEX_INFO, "canonical") self._check_reg_lens(2, 1) def test_deregister_one_by_alias(self): register(INDEX_INFO) self._check_reg_lens(1, 1) self._deregister_one_and_check(INDEX_INFO, None) self.assertEqual(len(_ES_INFO_REGISTRY), 0) self.assertEqual(len(_ALIAS_REF_COUNTS), 0) def test_deregister_one_by_info(self): register(INDEX_INFO) self._check_reg_lens(1, 1) self._deregister_one_and_check(None, INDEX_INFO) self._check_reg_lens(0, 0) def test_deregister_one_by_cname(self): register(INDEX_INFO, "canonical") self._check_reg_lens(1, 1) self._deregister_one_and_check("canonical", INDEX_INFO) self._check_reg_lens(0, 0) def test_deregister_multiple(self): register(INDEX_INFO) register(INDEX_INFO, "canonical") self._check_reg_lens(2, 1) self._deregister_one_and_check("canonical", INDEX_INFO) self._check_reg_lens(1, 1) self._deregister_one_and_check(None, INDEX_INFO) self._check_reg_lens(0, 0) def test_verify_alias(self): with self.assertRaises(ESRegistryError): verify_alias(INDEX_INFO.alias) register(INDEX_INFO) verify_alias(INDEX_INFO.alias) # should not raise ESRegistryError def test_verify_registered(self): with self.assertRaises(ESRegistryError): verify_registered(INDEX_INFO) register(INDEX_INFO) verify_registered(INDEX_INFO) # should not raise ESRegistryError def test_registry_entry(self): with self.assertRaises(ESRegistryError): verify_registered(INDEX_INFO) register(INDEX_INFO) self.assertIs(registry_entry(INDEX_INFO.alias), INDEX_INFO) def test_get_registry(self): self.assertDictEqual(get_registry(), {}) register(INDEX_INFO) self.assertDictEqual(get_registry(), {INDEX_INFO.alias: INDEX_INFO})
bsd-3-clause
c01bb0c06c28c7affcbef77c01d370b7
32.280576
78
0.615218
3.454817
false
true
false
false
dimagi/commcare-hq
custom/samveg/case_importer/validators.py
1
8105
import datetime import re from collections import Counter, defaultdict from django.utils.translation import gettext as _ from corehq.apps.case_importer.util import EXTERNAL_ID from corehq.apps.domain.models import OperatorCallLimitSettings from corehq.util.dates import get_previous_month_date_range, iso_string_to_date from custom.samveg.case_importer.exceptions import ( CallNotInLastMonthError, CallValueInvalidError, CallValuesMissingError, MobileNumberInvalidError, OwnerNameMissingError, RequiredValueMissingError, UnexpectedFileError, UnexpectedSkipCallValidatorValueError, UploadLimitReachedError, ) from custom.samveg.case_importer.operations import BaseRowOperation from custom.samveg.const import ( MOBILE_NUMBER, OWNER_NAME, RCH_BENEFICIARY_IDENTIFIER, RCH_REQUIRED_COLUMNS, REQUIRED_COLUMNS, SKIP_CALL_VALIDATOR, SKIP_CALL_VALIDATOR_YES, SNCU_BENEFICIARY_IDENTIFIER, SNCU_REQUIRED_COLUMNS, ) class BaseSheetValidator: @classmethod def run(cls, spreadsheet): """Validate spreadsheet. :param spreadsheet: Spreadsheet object provided by corehq.apps.case_importer.tracking.case_upload_tracker.CaseUpload.get_spreadsheet :return: List of error messages. """ return [] class RequiredColumnsValidator(BaseSheetValidator): @classmethod def run(cls, spreadsheet): errors = [] errors.extend(cls._validate_required_columns(spreadsheet)) return errors @classmethod def _validate_required_columns(cls, spreadsheet): columns = spreadsheet.get_header_columns() error_messages = [] try: required_columns = get_required_columns(columns) except UnexpectedFileError: return [_( 'Unexpected sheet uploaded. Either {rch_identifier} or {sncu_identifier} should be present' ).format( rch_identifier=RCH_BENEFICIARY_IDENTIFIER, sncu_identifier=SNCU_BENEFICIARY_IDENTIFIER )] missing_columns = set(required_columns) - set(columns) if missing_columns: error_messages.append(_('Missing columns {column_names}').format( column_names=", ".join(missing_columns) )) return error_messages class CallColumnsValidator(BaseSheetValidator): @classmethod def run(cls, spreadsheet): errors = [] errors.extend(cls._validate_call_columns(spreadsheet)) return errors @classmethod def _validate_call_columns(cls, spreadsheet): # at least one call column, Call1-6 columns = spreadsheet.get_header_columns() error_messages = [] call_regex = re.compile(r'^Call[1-6]$') if not any(call_regex.match(column_name) for column_name in columns): error_messages.append( _('Need at least one Call column for Calls 1-6') ) return error_messages class RequiredValueValidator(BaseRowOperation): def __init__(self, row_num=None, raw_row=None, **kwargs): super(RequiredValueValidator, self).__init__(**kwargs) self.row_num = row_num self.raw_row = raw_row def run(self): self.error_messages.extend(self._validate_required_columns()) return self.fields_to_update, self.error_messages def _validate_required_columns(self): error_messages = [] missing_values = set() columns = set(self.raw_row.keys()) required_columns = get_required_columns(columns) required_columns.append(EXTERNAL_ID) for required_column in required_columns: if not self.fields_to_update.get(required_column): missing_values.add(required_column) if missing_values: error_messages.append( RequiredValueMissingError( message=_('Required columns are {column_names}').format( column_names=', '.join(required_columns) ) ) ) return error_messages class CallValidator(BaseRowOperation): def __init__(self, raw_row=None, **kwargs): super(CallValidator, self).__init__(**kwargs) self.raw_row = raw_row def run(self): if self.raw_row.get(SKIP_CALL_VALIDATOR): # skip the row # add error message if the value isn't the only expected value if self.raw_row.get(SKIP_CALL_VALIDATOR) != SKIP_CALL_VALIDATOR_YES: self.error_messages.append( UnexpectedSkipCallValidatorValueError() ) return self.fields_to_update, self.error_messages call_date = None call_value, _ = _get_latest_call_value_and_number(self.fields_to_update) if not call_value: self.error_messages.append( CallValuesMissingError() ) else: try: call_date = iso_string_to_date(call_value) except ValueError: self.error_messages.append( CallValueInvalidError() ) if call_date: last_month_first_day, _ = get_previous_month_date_range(datetime.date.today()) if call_date.replace(day=1) != last_month_first_day: self.error_messages.append(CallNotInLastMonthError()) return self.fields_to_update, self.error_messages class FormatValidator(BaseRowOperation): def run(self): mobile_number = self.fields_to_update.get(MOBILE_NUMBER) if mobile_number and len(str(mobile_number)) != 10: self.error_messages.append(MobileNumberInvalidError()) return self.fields_to_update, self.error_messages class UploadLimitValidator(BaseRowOperation): def __init__(self, import_context=None, domain=None, **kwargs): super(UploadLimitValidator, self).__init__(**kwargs) self.import_context = import_context self.domain = domain def run(self): owner_name = self.fields_to_update.get(OWNER_NAME) _, call_number = _get_latest_call_value_and_number(self.fields_to_update) if owner_name and call_number: if self._upload_limit_reached(owner_name, call_number): self.error_messages.append(UploadLimitReachedError()) else: self._update_counter(owner_name, call_number) else: if not owner_name: self.error_messages.append(OwnerNameMissingError()) if not call_number: self.error_messages.append(CallValuesMissingError()) return self.fields_to_update, self.error_messages def _upload_limit_reached(self, owner_name, call_number): setting_obj, _ = OperatorCallLimitSettings.objects.get_or_create(domain=self.domain) return self._counter()[owner_name][f"Call{call_number}"] >= setting_obj.call_limit def _update_counter(self, owner_name, call_number): self._counter()[owner_name][f"Call{call_number}"] += 1 def _counter(self): if 'counter' not in self.import_context: self.import_context['counter'] = defaultdict(Counter) return self.import_context['counter'] def get_required_columns(columns): if RCH_BENEFICIARY_IDENTIFIER in columns: sheet_specific_columns = RCH_REQUIRED_COLUMNS elif SNCU_BENEFICIARY_IDENTIFIER in columns: sheet_specific_columns = SNCU_REQUIRED_COLUMNS else: raise UnexpectedFileError return REQUIRED_COLUMNS + sheet_specific_columns def _get_latest_call_value_and_number(fields_to_update): # A row is assumed to have call columns named, Call1 till Call6 # return latest call's value and call number latest_call_value = None latest_call_number = None for i in range(1, 7): if fields_to_update.get(f"Call{i}"): latest_call_value = fields_to_update[f"Call{i}"] latest_call_number = i return latest_call_value, latest_call_number
bsd-3-clause
8b5d0033828f1bf467043d8fa6f3186c
34.393013
107
0.639605
4.034345
false
false
false
false
onepercentclub/bluebottle
bluebottle/members/migrations/0005_auto_20160830_0902.py
1
1277
# -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2016-08-30 07:02 from __future__ import unicode_literals from django.db import migrations def add_export_permission(apps, schema_editor): ContentType = apps.get_model("contenttypes", "ContentType") Group = apps.get_model("auth", "Group") Permission = apps.get_model("auth", "Permission") try: content_type = ContentType.objects.get(app_label='sites', model='site') perm, created = Permission.objects.get_or_create(codename='export', name='Can export platform data', content_type=content_type) staff = Group.objects.get(name='Staff') staff.permissions.add(perm) except ContentType.DoesNotExist: pass def remove_export_permission(apps, schema_editor): Permission = apps.get_model("auth", "Permission") Permission.objects.filter(codename='export', name='Can export platform data').delete() class Migration(migrations.Migration): dependencies = [ ('members', '0004_member_verified'), ] operations = [ migrations.RunPython(add_export_permission, remove_export_permission) ]
bsd-3-clause
bc0e201d78943876a1a816c495f93dfd
33.513514
89
0.607674
4.373288
false
false
false
false
dimagi/commcare-hq
corehq/apps/domain/tests/test_delete_domain.py
1
48691
import random import uuid from contextlib import ExitStack from datetime import date, datetime, timedelta from decimal import Decimal from io import BytesIO from django.contrib.auth.models import User from django.core.management import call_command from django.db.transaction import TransactionManagementError from django.test import TestCase from dateutil.relativedelta import relativedelta from unittest.mock import patch from casexml.apps.phone.models import SyncLogSQL from couchforms.models import UnfinishedSubmissionStub from corehq.apps.accounting.models import ( BillingAccount, CreditLine, DefaultProductPlan, FeatureType, SoftwarePlanEdition, Subscription, ) from corehq.apps.aggregate_ucrs.models import ( AggregateTableDefinition, PrimaryColumn, SecondaryColumn, SecondaryTableDefinition, ) from corehq.apps.app_manager.models import ( AppReleaseByLocation, GlobalAppConfig, LatestEnabledBuildProfiles, ) from corehq.apps.app_manager.suite_xml.post_process.resources import ( ResourceOverride, ) from corehq.apps.case_importer.tracking.models import ( CaseUploadFormRecord, CaseUploadRecord, ) from corehq.apps.case_search.models import ( CaseSearchConfig, FuzzyProperties, IgnorePatterns, ) from corehq.apps.cloudcare.dbaccessors import get_application_access_for_domain from corehq.apps.cloudcare.models import ApplicationAccess from corehq.apps.commtrack.models import CommtrackConfig from corehq.apps.consumption.models import DefaultConsumption from corehq.apps.custom_data_fields.models import CustomDataFieldsDefinition from corehq.apps.data_analytics.models import GIRRow, MALTRow from corehq.apps.data_dictionary.models import CaseProperty, CasePropertyAllowedValue, CaseType from corehq.apps.data_interfaces.models import ( AutomaticUpdateRule, CaseRuleAction, CaseRuleCriteria, CaseRuleSubmission, DomainCaseRuleRun, ) from corehq.apps.domain.deletion import DOMAIN_DELETE_OPERATIONS from corehq.apps.domain.models import Domain, TransferDomainRequest from corehq.apps.export.models.new import DataFile, EmailExportWhenDoneRequest from corehq.apps.fixtures.models import ( LookupTable, LookupTableRow, LookupTableRowOwner, OwnerType, ) from corehq.apps.ivr.models import Call from corehq.apps.locations.models import ( LocationFixtureConfiguration, LocationType, SQLLocation, make_location, ) from corehq.apps.ota.models import MobileRecoveryMeasure, SerialIdBucket from corehq.apps.products.models import Product, SQLProduct from corehq.apps.registration.models import RegistrationRequest from corehq.apps.reminders.models import EmailUsage from corehq.apps.reports.models import ( TableauConnectedApp, TableauServer, TableauVisualization, ) from corehq.apps.sms.models import ( SMS, DailyOutboundSMSLimitReached, ExpectedCallback, Keyword, KeywordAction, MessagingEvent, MessagingSubEvent, MobileBackendInvitation, PhoneNumber, QueuedSMS, SQLLastReadMessage, SQLMobileBackend, SQLMobileBackendMapping, ) from corehq.apps.smsforms.models import SQLXFormsSession from corehq.apps.translations.models import SMSTranslations, TransifexBlacklist from corehq.apps.userreports.models import AsyncIndicator from corehq.apps.users.audit.change_messages import UserChangeMessage from corehq.apps.users.models import ( DomainRequest, Invitation, PermissionInfo, HqPermissions, RoleAssignableBy, RolePermission, UserRole, UserHistory, WebUser, ) from corehq.apps.users.util import SYSTEM_USER_ID from corehq.apps.zapier.consts import EventTypes from corehq.apps.zapier.models import ZapierSubscription from corehq.blobs import CODES, NotFound, get_blob_db from corehq.form_processor.backends.sql.dbaccessors import doc_type_to_state from corehq.form_processor.models import CommCareCase, XFormInstance from corehq.form_processor.tests.utils import ( create_case, create_form_for_test, ) from corehq.motech.models import ConnectionSettings, RequestLog from corehq.motech.repeaters.const import RECORD_SUCCESS_STATE from corehq.motech.repeaters.models import ( Repeater, SQLCaseRepeater, SQLRepeater, SQLRepeatRecord, SQLRepeatRecordAttempt, ) from settings import HQ_ACCOUNT_ROOT class TestDeleteDomain(TestCase): def _create_data(self, domain_name, i): product = Product(domain=domain_name, name='test-{}'.format(i)) product.save() location = make_location( domain=domain_name, site_code='testcode-{}'.format(i), name='test-{}'.format(i), location_type='facility' ) location.save() SMS.objects.create(domain=domain_name) Call.objects.create(domain=domain_name) SQLLastReadMessage.objects.create(domain=domain_name) ExpectedCallback.objects.create(domain=domain_name) PhoneNumber.objects.create(domain=domain_name, is_two_way=False, pending_verification=False) event = MessagingEvent.objects.create( domain=domain_name, date=datetime.utcnow(), source=MessagingEvent.SOURCE_REMINDER, content_type=MessagingEvent.CONTENT_SMS, status=MessagingEvent.STATUS_COMPLETED ) MessagingSubEvent.objects.create( parent=event, domain=domain_name, date=datetime.utcnow(), recipient_type=MessagingEvent.RECIPIENT_CASE, content_type=MessagingEvent.CONTENT_SMS, status=MessagingEvent.STATUS_COMPLETED ) backend = SQLMobileBackend.objects.create(domain=domain_name, is_global=False) SQLMobileBackendMapping.objects.create( domain=domain_name, backend_type=SQLMobileBackend.SMS, prefix=str(i), backend=backend ) MobileBackendInvitation.objects.create(domain=domain_name, backend=backend) def setUp(self): super(TestDeleteDomain, self).setUp() self.domain = Domain(name="test", is_active=True) self.domain.save() self.addCleanup(ensure_deleted, self.domain) self.domain.convert_to_commtrack() self.current_subscription = Subscription.new_domain_subscription( BillingAccount.get_or_create_account_by_domain(self.domain.name, created_by='tests')[0], self.domain.name, get_product_plan_version(), date_start=date.today() - relativedelta(days=1), ) self.domain2 = Domain(name="test2", is_active=True) self.domain2.save() self.addCleanup(ensure_deleted, self.domain2) self.domain2.convert_to_commtrack() LocationType.objects.create( domain='test', name='facility', ) LocationType.objects.create( domain='test2', name='facility', ) LocationType.objects.create( domain='test', name='facility2', ) LocationType.objects.create( domain='test2', name='facility2', ) def _assert_sql_counts(self, domain, number): self.assertEqual(SQLLocation.objects.filter(domain=domain).count(), number) self.assertEqual(SQLProduct.objects.filter(domain=domain).count(), number) self.assertEqual(LocationType.objects.filter(domain=domain).count(), number) self.assertEqual(SMS.objects.filter(domain=domain).count(), number) self.assertEqual(Call.objects.filter(domain=domain).count(), number) self.assertEqual(SQLLastReadMessage.objects.filter(domain=domain).count(), number) self.assertEqual(ExpectedCallback.objects.filter(domain=domain).count(), number) self.assertEqual(PhoneNumber.objects.filter(domain=domain).count(), number) self.assertEqual(MessagingEvent.objects.filter(domain=domain).count(), number) self.assertEqual(MessagingSubEvent.objects.filter(parent__domain=domain).count(), number) self.assertEqual(SQLMobileBackend.objects.filter(domain=domain).count(), number) self.assertEqual(SQLMobileBackendMapping.objects.filter(domain=domain).count(), number) self.assertEqual(MobileBackendInvitation.objects.filter(domain=domain).count(), number) def test_sql_objects_deletion(self): for i in range(2): self._create_data('test', i) self._create_data('test2', i) self._assert_sql_counts('test', 2) self._assert_sql_counts('test2', 2) self.domain.delete() self._assert_sql_counts('test', 0) self._assert_sql_counts('test2', 2) def test_active_subscription_terminated(self): self.domain.delete() terminated_subscription = Subscription.visible_objects.get(subscriber__domain=self.domain.name) self.assertFalse(terminated_subscription.is_active) self.assertIsNotNone(terminated_subscription.date_end) def test_accounting_future_subscription_suppressed(self): self.current_subscription.date_end = self.current_subscription.date_start + relativedelta(days=5) self.current_subscription.save() next_subscription = Subscription.new_domain_subscription( self.current_subscription.account, self.domain.name, DefaultProductPlan.get_default_plan_version(edition=SoftwarePlanEdition.PRO), date_start=self.current_subscription.date_end, ) self.domain.delete() self.assertTrue( Subscription.visible_and_suppressed_objects.get( id=next_subscription.id ).is_hidden_to_ops ) def test_active_subscription_credits_transferred_to_account(self): credit_amount = random.randint(1, 10) CreditLine.add_credit( credit_amount, feature_type=FeatureType.SMS, subscription=self.current_subscription, ) self.domain.delete() # Check that get_credits_by_subscription_and_features does not return the old deactivated credit line subscription_credits = CreditLine.get_credits_by_subscription_and_features( self.current_subscription, feature_type=FeatureType.SMS, ) self.assertEqual(len(subscription_credits), 0) # Check that old credit line has been tranferred to accoun account_credits = CreditLine.get_credits_for_account( self.current_subscription.account, feature_type=FeatureType.SMS, ) self.assertEqual(len(account_credits), 1) self.assertEqual(account_credits[0].balance, Decimal(credit_amount)) @patch('corehq.apps.accounting.models.DomainDowngradeActionHandler.get_response') def test_downgraded(self, mock_get_response): mock_get_response.return_value = True self.domain.delete() self.assertEqual(len(mock_get_response.call_args_list), 1) def _test_case_deletion(self): for domain_name in [self.domain.name, self.domain2.name]: create_case(domain_name, save=True) self.assertEqual(len(CommCareCase.objects.get_case_ids_in_domain(domain_name)), 1) self.domain.delete() self.assertEqual(len(CommCareCase.objects.get_case_ids_in_domain(self.domain.name)), 0) self.assertEqual(len(CommCareCase.objects.get_case_ids_in_domain(self.domain2.name)), 1) def test_case_deletion_sql(self): self._test_case_deletion() def test_form_deletion(self): form_states = [state_tuple[0] for state_tuple in XFormInstance.STATES] for domain_name in [self.domain.name, self.domain2.name]: for form_state in form_states: create_form_for_test(domain_name, state=form_state) for doc_type in doc_type_to_state: self.assertEqual( len(XFormInstance.objects.get_form_ids_in_domain(domain_name, doc_type=doc_type)), 1 ) self.domain.delete() for doc_type in doc_type_to_state: self.assertEqual( len(XFormInstance.objects.get_form_ids_in_domain(self.domain.name, doc_type=doc_type)), 0 ) self.assertEqual( len(XFormInstance.objects.get_form_ids_in_domain(self.domain2.name, doc_type=doc_type)), 1 ) def _assert_queryset_count(self, queryset_list, count): for queryset in queryset_list: self.assertEqual(queryset.count(), count) def _assert_aggregate_ucr_count(self, domain_name, count): self._assert_queryset_count([ AggregateTableDefinition.objects.filter(domain=domain_name), PrimaryColumn.objects.filter(table_definition__domain=domain_name), SecondaryTableDefinition.objects.filter(table_definition__domain=domain_name), SecondaryColumn.objects.filter(table_definition__table_definition__domain=domain_name), ], count) def test_aggregate_ucr_delete(self): for domain_name in [self.domain.name, self.domain2.name]: aggregate_table_definition = AggregateTableDefinition.objects.create( domain=domain_name, primary_data_source_id=uuid.uuid4(), table_id=uuid.uuid4().hex, ) secondary_table_definition = SecondaryTableDefinition.objects.create( table_definition=aggregate_table_definition, data_source_id=uuid.uuid4(), ) PrimaryColumn.objects.create(table_definition=aggregate_table_definition) SecondaryColumn.objects.create(table_definition=secondary_table_definition) self._assert_aggregate_ucr_count(domain_name, 1) self.domain.delete() self._assert_aggregate_ucr_count(self.domain.name, 0) self._assert_aggregate_ucr_count(self.domain2.name, 1) self.assertEqual(SecondaryTableDefinition.objects.count(), 1) self.assertEqual( SecondaryTableDefinition.objects.filter(table_definition__domain=self.domain2.name).count(), 1 ) self.assertEqual(PrimaryColumn.objects.count(), 1) self.assertEqual(PrimaryColumn.objects.filter(table_definition__domain=self.domain2.name).count(), 1) self.assertEqual(SecondaryColumn.objects.count(), 1) self.assertEqual( SecondaryColumn.objects.filter(table_definition__table_definition__domain=self.domain2.name).count(), 1 ) def _assert_case_importer_counts(self, domain_name, count): self._assert_queryset_count([ CaseUploadFormRecord.objects.filter(case_upload_record__domain=domain_name), CaseUploadRecord.objects.filter(domain=domain_name), ], count) def test_case_importer(self): for domain_name in [self.domain.name, self.domain2.name]: case_upload_record = CaseUploadRecord.objects.create( domain=domain_name, task_id=uuid.uuid4().hex, upload_id=uuid.uuid4().hex, ) CaseUploadFormRecord.objects.create( case_upload_record=case_upload_record, form_id=uuid.uuid4().hex, ) self._assert_case_importer_counts(domain_name, 1) self.domain.delete() self._assert_case_importer_counts(self.domain.name, 0) self._assert_case_importer_counts(self.domain2.name, 1) self.assertEqual(CaseUploadFormRecord.objects.count(), 1) self.assertEqual( CaseUploadFormRecord.objects.filter(case_upload_record__domain=self.domain2.name).count(), 1 ) def _assert_app_manager_counts(self, domain_name, count): self._assert_queryset_count([ AppReleaseByLocation.objects.filter(domain=domain_name), LatestEnabledBuildProfiles.objects.filter(domain=domain_name), GlobalAppConfig.objects.filter(domain=domain_name), ResourceOverride.objects.filter(domain=domain_name), ], count) def test_app_manager(self): for domain_name in [self.domain.name, self.domain2.name]: location = make_location( domain=domain_name, site_code='testcode', name='test', location_type='facility' ) location.save() AppReleaseByLocation.objects.create(domain=domain_name, app_id='123', build_id='456', version=23, location=location) with patch('corehq.apps.app_manager.models.GlobalAppConfig.by_app_id'): LatestEnabledBuildProfiles.objects.create(domain=domain_name, app_id='123', build_id='456', version=10) GlobalAppConfig.objects.create(domain=domain_name, app_id='123') ResourceOverride.objects.create(domain=domain_name, app_id='123', root_name='test', pre_id='456', post_id='789') self._assert_app_manager_counts(domain_name, 1) self.domain.delete() self._assert_app_manager_counts(self.domain.name, 0) self._assert_app_manager_counts(self.domain2.name, 1) location.delete() def _assert_case_search_counts(self, domain_name, count): self._assert_queryset_count([ CaseSearchConfig.objects.filter(domain=domain_name), FuzzyProperties.objects.filter(domain=domain_name), IgnorePatterns.objects.filter(domain=domain_name), ], count) def test_case_search(self): for domain_name in [self.domain.name, self.domain2.name]: CaseSearchConfig.objects.create(domain=domain_name) FuzzyProperties.objects.create(domain=domain_name) IgnorePatterns.objects.create(domain=domain_name) self._assert_case_search_counts(domain_name, 1) self.domain.delete() self._assert_case_search_counts(self.domain.name, 0) self._assert_case_search_counts(self.domain2.name, 1) def _assert_cloudcare_counts(self, domain_name, count): self._assert_queryset_count([ ApplicationAccess.objects.filter(domain=domain_name), ], count) def test_cloudcare(self): for domain_name in [self.domain.name, self.domain2.name]: get_application_access_for_domain(domain_name) self.domain.delete() self._assert_cloudcare_counts(self.domain.name, 0) self._assert_cloudcare_counts(self.domain2.name, 1) def _assert_consumption_counts(self, domain_name, count): self._assert_queryset_count([ DefaultConsumption.objects.filter(domain=domain_name), ], count) def test_consumption(self): for domain_name in [self.domain.name, self.domain2.name]: DefaultConsumption.objects.create(domain=domain_name) self.domain.delete() self._assert_consumption_counts(self.domain.name, 0) self._assert_consumption_counts(self.domain2.name, 1) def _assert_custom_data_fields_counts(self, domain_name, count): self._assert_queryset_count([ CustomDataFieldsDefinition.objects.filter(domain=domain_name), ], count) def test_custom_data_fields(self): for domain_name in [self.domain.name, self.domain2.name]: CustomDataFieldsDefinition.get_or_create(domain_name, 'UserFields') self.domain.delete() self._assert_custom_data_fields_counts(self.domain.name, 0) self._assert_custom_data_fields_counts(self.domain2.name, 1) def _assert_data_analytics_counts(self, domain_name, count): self._assert_queryset_count([ GIRRow.objects.filter(domain_name=domain_name), MALTRow.objects.filter(domain_name=domain_name), ], count) def test_data_analytics(self): for domain_name in [self.domain.name, self.domain2.name]: GIRRow.objects.create( domain_name=domain_name, month=date.today(), start_date=date.today(), wams_current=1, active_users=1, using_and_performing=1, not_performing=1, inactive_experienced=1, inactive_not_experienced=1, not_experienced=1, not_performing_not_experienced=1, active_ever=1, possibly_exp=1, ever_exp=1, exp_and_active_ever=1, active_in_span=1, eligible_forms=1, ) MALTRow.objects.create( domain_name=domain_name, month=date.today(), num_of_forms=1, ) self._assert_data_analytics_counts(domain_name, 1) self.domain.delete() self._assert_data_analytics_counts(self.domain.name, 0) self._assert_data_analytics_counts(self.domain2.name, 1) def _assert_data_dictionary_counts(self, domain_name, count): self._assert_queryset_count([ CaseType.objects.filter(domain=domain_name), CaseProperty.objects.filter(case_type__domain=domain_name), CasePropertyAllowedValue.objects.filter(case_property__case_type__domain=domain_name), ], count) def test_data_dictionary(self): for domain_name in [self.domain.name, self.domain2.name]: case_type = CaseType.objects.create(domain=domain_name, name='case_type') prop = CaseProperty.objects.create(case_type=case_type, name='case_property', data_type='select') CasePropertyAllowedValue.objects.create(case_property=prop, allowed_value="True") self._assert_data_dictionary_counts(domain_name, 1) self.domain.delete() self._assert_data_dictionary_counts(self.domain.name, 0) self._assert_data_dictionary_counts(self.domain2.name, 1) def _assert_data_interfaces(self, domain_name, count): self._assert_queryset_count([ AutomaticUpdateRule.objects.filter(domain=domain_name), CaseRuleAction.objects.filter(rule__domain=domain_name), CaseRuleCriteria.objects.filter(rule__domain=domain_name), CaseRuleSubmission.objects.filter(domain=domain_name), DomainCaseRuleRun.objects.filter(domain=domain_name), ], count) def test_data_interfaces(self): for domain_name in [self.domain.name, self.domain2.name]: automatic_update_rule = AutomaticUpdateRule.objects.create(domain=domain_name) CaseRuleAction.objects.create(rule=automatic_update_rule) CaseRuleCriteria.objects.create(rule=automatic_update_rule) CaseRuleSubmission.objects.create( created_on=datetime.utcnow(), domain=domain_name, form_id=uuid.uuid4().hex, rule=automatic_update_rule, ) DomainCaseRuleRun.objects.create(domain=domain_name, started_on=datetime.utcnow()) self._assert_data_interfaces(domain_name, 1) self.domain.delete() self._assert_data_interfaces(self.domain.name, 0) self._assert_data_interfaces(self.domain2.name, 1) self.assertEqual(CaseRuleAction.objects.count(), 1) self.assertEqual(CaseRuleAction.objects.filter(rule__domain=self.domain2.name).count(), 1) self.assertEqual(CaseRuleCriteria.objects.count(), 1) self.assertEqual(CaseRuleCriteria.objects.filter(rule__domain=self.domain2.name).count(), 1) def _assert_domain_counts(self, domain_name, count): self._assert_queryset_count([ TransferDomainRequest.objects.filter(domain=domain_name), ], count) def test_delete_domain(self): for domain_name in [self.domain.name, self.domain2.name]: TransferDomainRequest.objects.create(domain=domain_name, to_username='to', from_username='from') self._assert_domain_counts(domain_name, 1) self.domain.delete() self._assert_domain_counts(self.domain.name, 0) self._assert_domain_counts(self.domain2.name, 1) def _assert_export_counts(self, domain_name, count): self._assert_queryset_count([ DataFile.meta_query(domain_name), EmailExportWhenDoneRequest.objects.filter(domain=domain_name), ], count) def test_export_delete(self): blobdb = get_blob_db() data_files = [] for domain_name in [self.domain.name, self.domain2.name]: data_files.append(DataFile.save_blob( BytesIO((domain_name + " csv").encode('utf-8')), domain=domain_name, filename="data.csv", description="data file", content_type="text/csv", delete_after=datetime.utcnow() + timedelta(minutes=10), )) EmailExportWhenDoneRequest.objects.create(domain=domain_name) self._assert_export_counts(domain_name, 1) self.domain.delete() with self.assertRaises(NotFound): blobdb.get(key=data_files[0].blob_id, type_code=CODES.data_file) with blobdb.get(key=data_files[1].blob_id, type_code=CODES.data_file) as f: self.assertEqual(f.read(), (self.domain2.name + " csv").encode('utf-8')) self._assert_export_counts(self.domain.name, 0) self._assert_export_counts(self.domain2.name, 1) def _assert_location_counts(self, domain_name, count): self._assert_queryset_count([ LocationFixtureConfiguration.objects.filter(domain=domain_name) ], count) def test_location_delete(self): for domain_name in [self.domain.name, self.domain2.name]: LocationFixtureConfiguration.objects.create(domain=domain_name) self._assert_location_counts(domain_name, 1) self.domain.delete() self._assert_location_counts(self.domain.name, 0) self._assert_location_counts(self.domain2.name, 1) def _assert_ota_counts(self, domain_name, count): self._assert_queryset_count([ MobileRecoveryMeasure.objects.filter(domain=domain_name), SerialIdBucket.objects.filter(domain=domain_name), ], count) def test_ota_delete(self): for domain_name in [self.domain.name, self.domain2.name]: MobileRecoveryMeasure.objects.create(domain=domain_name) SerialIdBucket.objects.create(domain=domain_name) self._assert_ota_counts(domain_name, 1) self.domain.delete() self._assert_ota_counts(self.domain.name, 0) self._assert_ota_counts(self.domain2.name, 1) def _assert_reports_counts(self, domain_name, count): self._assert_queryset_count([ TableauServer.objects.filter(domain=domain_name), TableauVisualization.objects.filter(domain=domain_name), TableauConnectedApp.objects.filter(server__domain=domain_name), ], count) def test_reports_delete(self): for domain_name in [self.domain.name, self.domain2.name]: server = TableauServer.objects.create( domain=domain_name, server_type='server', server_name='my_server', target_site='my_site', ) TableauVisualization.objects.create( domain=domain_name, server=server, view_url='my_url', ) TableauConnectedApp.objects.create( app_client_id='qwer1234', secret_id='asdf5678', server=server, ) self._assert_reports_counts(domain_name, 1) self.domain.delete() self._assert_reports_counts(self.domain.name, 0) self._assert_reports_counts(self.domain2.name, 1) def _assert_phone_counts(self, domain_name, count): self._assert_queryset_count([ SyncLogSQL.objects.filter(domain=domain_name) ], count) def test_phone_delete(self): for domain_name in [self.domain.name, self.domain2.name]: SyncLogSQL.objects.create( domain=domain_name, doc={}, synclog_id=uuid.uuid4(), user_id=uuid.uuid4(), ) self._assert_phone_counts(domain_name, 1) self.domain.delete() self._assert_phone_counts(self.domain.name, 0) self._assert_phone_counts(self.domain2.name, 1) def _assert_registration_count(self, domain_name, count): self._assert_queryset_count([ RegistrationRequest.objects.filter(domain=domain_name), ], count) def test_registration_delete(self): for domain_name in [self.domain.name, self.domain2.name]: RegistrationRequest.objects.create( domain=domain_name, activation_guid=uuid.uuid4().hex, request_time=datetime.utcnow(), request_ip='12.34.567.8' ) self._assert_registration_count(domain_name, 1) self.domain.delete() self._assert_registration_count(self.domain.name, 0) self._assert_registration_count(self.domain2.name, 1) def _assert_reminders_counts(self, domain_name, count): self._assert_queryset_count([ EmailUsage.objects.filter(domain=domain_name), ], count) def test_reminders_delete(self): for domain_name in [self.domain.name, self.domain2.name]: EmailUsage.objects.create(domain=domain_name, month=7, year=2018) self._assert_reminders_counts(domain_name, 1) self.domain.delete() self._assert_reminders_counts(self.domain.name, 0) self._assert_reminders_counts(self.domain2.name, 1) def _assert_sms_counts(self, domain_name, count): self._assert_queryset_count([ DailyOutboundSMSLimitReached.objects.filter(domain=domain_name), Keyword.objects.filter(domain=domain_name), KeywordAction.objects.filter(keyword__domain=domain_name), QueuedSMS.objects.filter(domain=domain_name) ], count) def test_sms_delete(self): for domain_name in [self.domain.name, self.domain2.name]: DailyOutboundSMSLimitReached.objects.create(domain=domain_name, date=date.today()) keyword = Keyword.objects.create(domain=domain_name) KeywordAction.objects.create(keyword=keyword) QueuedSMS.objects.create(domain=domain_name) self._assert_sms_counts(domain_name, 1) self.domain.delete() self._assert_sms_counts(self.domain.name, 0) self._assert_sms_counts(self.domain2.name, 1) self.assertEqual(KeywordAction.objects.count(), 1) self.assertEqual(KeywordAction.objects.filter(keyword__domain=self.domain2.name).count(), 1) def _assert_smsforms_counts(self, domain_name, count): self._assert_queryset_count([ SQLXFormsSession.objects.filter(domain=domain_name), ], count) def test_smsforms_delete(self): for domain_name in [self.domain.name, self.domain2.name]: SQLXFormsSession.objects.create( domain=domain_name, start_time=datetime.utcnow(), modified_time=datetime.utcnow(), current_action_due=datetime.utcnow(), expire_after=3, ) self._assert_smsforms_counts(domain_name, 1) self.domain.delete() self._assert_smsforms_counts(self.domain.name, 0) self._assert_smsforms_counts(self.domain2.name, 1) def _assert_translations_count(self, domain_name, count): self._assert_queryset_count([ SMSTranslations.objects.filter(domain=domain_name), TransifexBlacklist.objects.filter(domain=domain_name), ], count) def test_translations_delete(self): for domain_name in [self.domain.name, self.domain2.name]: SMSTranslations.objects.create(domain=domain_name, langs=['en'], translations={'a': 'a'}) TransifexBlacklist.objects.create(domain=domain_name, app_id='123', field_name='xyz') self._assert_translations_count(domain_name, 1) self.domain.delete() self._assert_translations_count(self.domain.name, 0) self._assert_translations_count(self.domain2.name, 1) def _assert_userreports_counts(self, domain_name, count): self._assert_queryset_count([ AsyncIndicator.objects.filter(domain=domain_name) ], count) def test_userreports_delete(self): for domain_name in [self.domain.name, self.domain2.name]: AsyncIndicator.objects.create( domain=domain_name, doc_id=uuid.uuid4().hex, doc_type='doc_type', indicator_config_ids=[], ) self._assert_userreports_counts(domain_name, 1) self.domain.delete() self._assert_userreports_counts(self.domain.name, 0) self._assert_userreports_counts(self.domain2.name, 1) def _assert_users_counts(self, domain_name, count): self._assert_queryset_count([ DomainRequest.objects.filter(domain=domain_name), Invitation.objects.filter(domain=domain_name), User.objects.filter(username__contains=f'{domain_name}.{HQ_ACCOUNT_ROOT}') ], count) def test_users_delete(self): for domain_name in [self.domain.name, self.domain2.name]: DomainRequest.objects.create(domain=domain_name, email='user@test.com', full_name='User') Invitation.objects.create(domain=domain_name, email='user@test.com', invited_by='friend@test.com', invited_on=datetime.utcnow()) User.objects.create(username=f'mobileuser@{domain_name}.{HQ_ACCOUNT_ROOT}') self._assert_users_counts(domain_name, 1) self.domain.delete() self._assert_users_counts(self.domain.name, 0) self._assert_users_counts(self.domain2.name, 1) def test_users_domain_membership(self): web_user = WebUser.create(self.domain.name, f'webuser@{self.domain.name}.{HQ_ACCOUNT_ROOT}', '******', created_by=None, created_via=None) another_domain = Domain(name="another-test", is_active=True) another_domain.save() self.addCleanup(another_domain.delete) # add more than 1 domain membership to trigger _log_web_user_membership_removed in tests web_user.add_domain_membership(another_domain.name) web_user.save() self.domain.delete() user_history = UserHistory.objects.last() self.assertEqual(user_history.by_domain, None) self.assertEqual(user_history.for_domain, self.domain.name) self.assertEqual(user_history.changed_by, SYSTEM_USER_ID) self.assertEqual(user_history.user_id, web_user.get_id) self.assertEqual(user_history.change_messages, UserChangeMessage.domain_removal(self.domain.name)) self.assertEqual(user_history.changed_via, 'corehq.apps.domain.deletion._delete_web_user_membership') self.assertEqual(user_history.changes, {}) def _assert_role_counts(self, domain_name, roles, permissions, assignments): self.assertEqual(UserRole.objects.filter(domain=domain_name).count(), roles) self.assertEqual(RolePermission.objects.filter(role__domain=domain_name).count(), permissions) self.assertEqual(RoleAssignableBy.objects.filter(role__domain=domain_name).count(), assignments) def test_roles_delete(self): for domain_name in [self.domain.name, self.domain2.name]: role1 = UserRole.objects.create( domain=domain_name, name="role1" ) role = UserRole.objects.create( domain=domain_name, name="role2" ) role.set_permissions([ PermissionInfo(HqPermissions.view_reports.name, allow=PermissionInfo.ALLOW_ALL) ]) role.set_assignable_by([role1.id]) self._assert_role_counts(domain_name, 2, 1, 1) self.domain.delete() self._assert_role_counts(self.domain.name, 0, 0, 0) self._assert_role_counts(self.domain2.name, 2, 1, 1) def _assert_zapier_counts(self, domain_name, count): self._assert_queryset_count([ ZapierSubscription.objects.filter(domain=domain_name), ], count) def test_zapier_delete(self): for domain_name in [self.domain.name, self.domain2.name]: ZapierSubscription.objects.create( domain=domain_name, case_type='case_type', event_name=EventTypes.NEW_CASE, url='http://%s.com' % domain_name, user_id='user_id', ) self._assert_zapier_counts(domain_name, 1) self.domain.delete() self._assert_zapier_counts(self.domain.name, 0) self._assert_zapier_counts(self.domain2.name, 1) def _assert_motech_count(self, domain_name, count): self._assert_queryset_count([ RequestLog.objects.filter(domain=domain_name), ], count) def test_motech_delete(self): for domain_name in [self.domain.name, self.domain2.name]: RequestLog.objects.create(domain=domain_name) self._assert_motech_count(domain_name, 1) self.domain.delete() self._assert_motech_count(self.domain.name, 0) self._assert_motech_count(self.domain2.name, 1) def _assert_repeaters_count(self, domain_name, count): self._assert_queryset_count([ SQLRepeater.objects.filter(domain=domain_name), SQLRepeatRecord.objects.filter(domain=domain_name), SQLRepeatRecordAttempt.objects.filter(repeat_record__domain=domain_name), ], count) # Repeater.get_class_from_doc_type is patched because while syncing the # SQL object to couch, the Repeater.save was erroring while clearing cache @patch.object(Repeater, 'get_class_from_doc_type') def test_repeaters_delete(self, mock): mock.return_value = Repeater for domain_name in [self.domain.name, self.domain2.name]: conn = ConnectionSettings.objects.create( domain=domain_name, name='To Be Deleted', url="http://localhost/api/" ) repeater = SQLCaseRepeater.objects.create( domain=domain_name, connection_settings=conn ) record = repeater.repeat_records.create( domain=domain_name, registered_at=datetime.utcnow(), ) record.sqlrepeatrecordattempt_set.create( state=RECORD_SUCCESS_STATE, ) self._assert_repeaters_count(domain_name, 1) self.addCleanup(repeater.delete) self.domain.delete() self._assert_repeaters_count(self.domain.name, 0) self._assert_repeaters_count(self.domain2.name, 1) def _assert_couchforms_counts(self, domain_name, count): self._assert_queryset_count([ UnfinishedSubmissionStub.objects.filter(domain=domain_name) ], count) def test_couchforms_delete(self): for domain_name in [self.domain.name, self.domain2.name]: UnfinishedSubmissionStub.objects.create( domain=domain_name, timestamp=datetime.utcnow(), xform_id='xform_id', ) self._assert_couchforms_counts(domain_name, 1) self.domain.delete() self._assert_couchforms_counts(self.domain.name, 0) self._assert_couchforms_counts(self.domain2.name, 1) def test_delete_commtrack_config(self): # Config will have been created by convert_to_commtrack in setUp self.assertIsNotNone(CommtrackConfig.for_domain(self.domain.name)) self.domain.delete() self.assertIsNone(CommtrackConfig.for_domain(self.domain.name)) def test_lookup_tables(self): def count_lookup_tables(domain): return LookupTable.objects.filter(domain=domain).count() def count_lookup_table_rows(domain): return LookupTableRow.objects.filter(domain=domain).count() def count_lookup_table_row_owners(domain): return LookupTableRowOwner.objects.filter(domain=domain).count() for domain_name in [self.domain.name, self.domain2.name]: table = LookupTable.objects.create(domain=domain_name, tag="domain-deletion") row = LookupTableRow.objects.create(domain=domain_name, table_id=table.id, sort_key=0) LookupTableRowOwner.objects.create( domain=domain_name, row_id=row.id, owner_type=OwnerType.User, owner_id="abc", ) self.domain.delete() self.assertEqual(count_lookup_tables(self.domain.name), 0) self.assertEqual(count_lookup_table_rows(self.domain.name), 0) self.assertEqual(count_lookup_table_row_owners(self.domain.name), 0) self.assertEqual(count_lookup_tables(self.domain2.name), 1) self.assertEqual(count_lookup_table_rows(self.domain2.name), 1) self.assertEqual(count_lookup_table_row_owners(self.domain2.name), 1) class HardDeleteFormsAndCasesInDomainTests(TestCase): def test_normal_forms_are_deleted(self): create_form_for_test(self.deleted_domain.name) call_command('hard_delete_forms_and_cases_in_domain', self.deleted_domain.name, noinput=True) self.assertEqual(len(XFormInstance.objects.get_form_ids_in_domain(self.deleted_domain.name)), 0) def test_archived_forms_are_deleted(self): create_form_for_test(self.deleted_domain.name, state=XFormInstance.ARCHIVED) call_command('hard_delete_forms_and_cases_in_domain', self.deleted_domain.name, noinput=True) self.assertEqual(len(XFormInstance.objects.get_form_ids_in_domain(self.deleted_domain.name)), 0) def test_soft_deleted_forms_are_deleted(self): create_form_for_test(self.deleted_domain.name, state=XFormInstance.DELETED) call_command('hard_delete_forms_and_cases_in_domain', self.deleted_domain.name, noinput=True) self.assertEqual(len(XFormInstance.objects.get_form_ids_in_domain(self.deleted_domain.name)), 0) def test_forms_are_deleted_for_specified_domain_only(self): create_form_for_test(self.deleted_domain.name) create_form_for_test(self.extra_deleted_domain.name) call_command('hard_delete_forms_and_cases_in_domain', self.deleted_domain.name, noinput=True) self.addCleanup(self._cleanup_forms_and_cases, self.extra_deleted_domain.name) self.assertEqual(len(XFormInstance.objects.get_form_ids_in_domain(self.deleted_domain.name)), 0) self.assertEqual(len(XFormInstance.objects.get_form_ids_in_domain(self.extra_deleted_domain.name)), 1) def test_cases_are_deleted(self): create_case(self.domain_in_use.name, save=True) call_command('hard_delete_forms_and_cases_in_domain', self.deleted_domain.name, noinput=True) self.assertEqual(len(CommCareCase.objects.get_case_ids_in_domain(self.deleted_domain.name)), 0) def test_soft_deleted_cases_are_deleted(self): case = create_case(self.domain_in_use.name, save=True) CommCareCase.objects.soft_delete_cases(self.deleted_domain.name, [case.case_id]) call_command('hard_delete_forms_and_cases_in_domain', self.deleted_domain.name, noinput=True) self.assertEqual(len(CommCareCase.objects.get_case_ids_in_domain(self.deleted_domain.name)), 0) def test_cases_are_deleted_for_specified_domain_only(self): create_case(self.deleted_domain.name, save=True) create_case(self.extra_deleted_domain.name, save=True) call_command('hard_delete_forms_and_cases_in_domain', self.deleted_domain.name, noinput=True) self.addCleanup(self._cleanup_forms_and_cases, self.extra_deleted_domain.name) self.assertEqual(len(CommCareCase.objects.get_case_ids_in_domain(self.deleted_domain.name)), 0) self.assertEqual(len(CommCareCase.objects.get_case_ids_in_domain(self.extra_deleted_domain.name)), 1) def test_forms_and_cases_are_not_deleted_if_domain_in_use(self): # a form is created as a byproduct of case creation create_case(self.domain_in_use.name, save=True) call_command('hard_delete_forms_and_cases_in_domain', self.domain_in_use.name, noinput=True) self.assertEqual(len(XFormInstance.objects.get_form_ids_in_domain(self.domain_in_use.name)), 1) self.assertEqual(len(CommCareCase.objects.get_case_ids_in_domain(self.domain_in_use.name)), 1) self.assertEqual(len(XFormInstance.objects.get_deleted_form_ids_in_domain(self.domain_in_use.name)), 0) self.assertEqual(len(CommCareCase.objects.get_deleted_case_ids_in_domain(self.domain_in_use.name)), 0) def test_forms_and_cases_are_deleted_if_domain_in_use_but_ignore_flag_is_true(self): # a form is created as a byproduct of case creation create_case(self.domain_in_use.name, save=True) call_command('hard_delete_forms_and_cases_in_domain', self.domain_in_use.name, noinput=True, ignore_domain_in_use=True) self.assertEqual(len(XFormInstance.objects.get_form_ids_in_domain(self.domain_in_use.name)), 0) self.assertEqual(len(CommCareCase.objects.get_case_ids_in_domain(self.domain_in_use.name)), 0) @classmethod def setUpClass(cls): super().setUpClass() cls.deleted_domain = Domain(name='deleted-domain') cls.deleted_domain.save() cls.deleted_domain.delete(leave_tombstone=True) cls.addClassCleanup(ensure_deleted, cls.deleted_domain) cls.extra_deleted_domain = Domain(name='extra-deleted-domain') cls.extra_deleted_domain.save() cls.extra_deleted_domain.delete(leave_tombstone=True) cls.addClassCleanup(ensure_deleted, cls.extra_deleted_domain) cls.domain_in_use = Domain(name='domain-in-use') cls.domain_in_use.save() cls.addClassCleanup(ensure_deleted, cls.domain_in_use) def _cleanup_forms_and_cases(self, domain_name): call_command('hard_delete_forms_and_cases_in_domain', domain_name, noinput=True, ignore_domain_in_use=True) def tearDown(self): for domain in [self.deleted_domain, self.extra_deleted_domain, self.domain_in_use]: self._cleanup_forms_and_cases(domain.name) super().tearDown() def ensure_deleted(domain): def make_exe(op): """Make execute function that ignores failed SQL deletions and continue deleting other related entities, especially Couch objects. SQL entities that cannot be deleted will be cleaned up on rollback at the end of the transaction. """ def execute(domain_name): try: real_execute(domain_name) except TransactionManagementError as err: if cant_execute not in str(err): raise real_execute = op.execute return execute cant_execute = "can't execute queries until the end of the 'atomic' block." if domain and domain._rev: with ExitStack() as stack: for op in DOMAIN_DELETE_OPERATIONS: stack.enter_context(patch.object(op, "execute", make_exe(op))) domain.delete() def get_product_plan_version(edition=SoftwarePlanEdition.ADVANCED): """Work around AccountingError: No default product plan was set up, did you forget to run migrations? """ from corehq.apps.accounting.exceptions import AccountingError from corehq.apps.accounting.tests.generator import bootstrap_test_software_plan_versions try: return DefaultProductPlan.get_default_plan_version(edition) except AccountingError: call_command('cchq_prbac_bootstrap') bootstrap_test_software_plan_versions() return DefaultProductPlan.get_default_plan_version(edition)
bsd-3-clause
0d5a50cf316467c812f2a0f26f5f4690
40.545222
115
0.651578
3.8201
false
true
false
false
dimagi/commcare-hq
corehq/apps/hqcase/management/commands/ptop_preindex.py
1
5839
# http://www.gevent.org/gevent.monkey.html#module-gevent.monkey from datetime import datetime from django.conf import settings from django.core.mail import mail_admins from django.core.management.base import BaseCommand import gevent from gevent import monkey from corehq.apps.hqcase.management.commands.ptop_reindexer_v2 import ( DomainReindexerFactory, UserReindexerFactory, GroupReindexerFactory, GroupToUserReindexerFactory, SqlCaseReindexerFactory, SqlFormReindexerFactory, CaseSearchReindexerFactory, SmsReindexerFactory, AppReindexerFactory, ) from corehq.elastic import get_es_new from corehq.pillows.user import add_demo_user_to_user_index from corehq.pillows.utils import get_all_expected_es_indices from corehq.util.log import get_traceback_string from pillowtop.es_utils import ( XFORM_HQ_INDEX_NAME, CASE_HQ_INDEX_NAME, USER_HQ_INDEX_NAME, DOMAIN_HQ_INDEX_NAME, APP_HQ_INDEX_NAME, GROUP_HQ_INDEX_NAME, SMS_HQ_INDEX_NAME, CASE_SEARCH_HQ_INDEX_NAME, ) from pillowtop.reindexer.reindexer import ReindexerFactory monkey.patch_all() def get_reindex_commands(hq_index_name): """Return a list of ``ReindexerFactory`` classes or functions that are used to rebuild the index from scratch. :param hq_index_name: ``str`` name of the Elastic index alias""" pillow_command_map = { DOMAIN_HQ_INDEX_NAME: [DomainReindexerFactory], CASE_HQ_INDEX_NAME: [SqlCaseReindexerFactory], XFORM_HQ_INDEX_NAME: [SqlFormReindexerFactory], # groupstousers indexing must happen after all users are indexed USER_HQ_INDEX_NAME: [ UserReindexerFactory, add_demo_user_to_user_index, GroupToUserReindexerFactory, ], APP_HQ_INDEX_NAME: [AppReindexerFactory], GROUP_HQ_INDEX_NAME: [GroupReindexerFactory], CASE_SEARCH_HQ_INDEX_NAME: [CaseSearchReindexerFactory], SMS_HQ_INDEX_NAME: [SmsReindexerFactory], } return pillow_command_map.get(hq_index_name, []) def do_reindex(hq_index_name, reset): print("Starting pillow preindex %s" % hq_index_name) reindex_commands = get_reindex_commands(hq_index_name) for factory_or_func in reindex_commands: if isinstance(factory_or_func, type): if not issubclass(factory_or_func, ReindexerFactory): raise ValueError(f"expected ReindexerFactory, got: {factory_or_func!r}") kwargs = {} reindex_args = ReindexerFactory.resumable_reindexer_args if reset \ and factory_or_func.arg_contributors \ and reindex_args in factory_or_func.arg_contributors: kwargs["reset"] = True factory_or_func(**kwargs).build().reindex() else: factory_or_func() print("Pillow preindex finished %s" % hq_index_name) class Command(BaseCommand): help = """ Create all ES indexes that exist in code, but do not exist in the cluster. For indexes that already exist, this command does nothing unless --reset is passed """ def add_arguments(self, parser): parser.add_argument( '--reset', action='store_true', dest='reset', default=False, help='Reset resumable indices. Results in all documents being reprocessed for this index', ) def handle(self, **options): runs = [] all_es_indices = list(get_all_expected_es_indices()) es = get_es_new() if options['reset']: indices_needing_reindex = all_es_indices else: indices_needing_reindex = [info for info in all_es_indices if not es.indices.exists(info.index)] if not indices_needing_reindex: print('Nothing needs to be reindexed') return print("Reindexing:\n\t", end=' ') print('\n\t'.join(map(str, indices_needing_reindex))) preindex_message = """ Heads up! %s is going to start preindexing the following indices:\n %s This may take a while, so don't deploy until all these have reported finishing. """ % ( settings.EMAIL_SUBJECT_PREFIX, '\n\t'.join(map(str, indices_needing_reindex)) ) mail_admins("Pillow preindexing starting", preindex_message) start = datetime.utcnow() for index_info in indices_needing_reindex: # loop through pillows once before running greenlets # to fail hard on misconfigured pillows reindex_commands = get_reindex_commands(index_info.hq_index_name) if not reindex_commands: raise Exception( "Error, pillow [%s] is not configured " "with its own management command reindex command " "- it needs one" % index_info.hq_index_name ) for index_info in indices_needing_reindex: print(index_info.hq_index_name) g = gevent.spawn(do_reindex, index_info.hq_index_name, options['reset']) runs.append(g) if len(indices_needing_reindex) > 0: gevent.joinall(runs) try: for job in runs: job.get() except Exception: mail_admins("Pillow preindexing failed", get_traceback_string()) raise else: mail_admins( "Pillow preindexing completed", "Reindexing %s took %s seconds" % ( ', '.join(map(str, indices_needing_reindex)), (datetime.utcnow() - start).seconds ) ) print("All pillowtop reindexing jobs completed")
bsd-3-clause
c11dbf496c47932b26a06a25d3afe984
35.267081
108
0.6174
3.931987
false
false
false
false
dimagi/commcare-hq
corehq/apps/es/forms.py
1
3323
""" FormES -------- """ from django.conf import settings from corehq.pillows.mappings.const import NULL_VALUE from . import filters from .client import ElasticDocumentAdapter, create_document_adapter from .es_query import HQESQuery from .transient_util import get_adapter_mapping, from_dict_with_possible_id class FormES(HQESQuery): index = 'forms' default_filters = { 'is_xform_instance': filters.term("doc_type", "xforminstance"), 'has_xmlns': filters.exists("xmlns"), 'has_user': filters.exists("form.meta.userID"), 'has_domain': filters.exists("domain"), } @property def builtin_filters(self): return [ form_ids, xmlns, app, submitted, completed, user_id, user_type, user_ids_handle_unknown, j2me_submissions, updating_cases, ] + super(FormES, self).builtin_filters def user_aggregation(self): return self.terms_aggregation('form.meta.userID', 'user') def domain_aggregation(self): return self.terms_aggregation('domain.exact', 'domain') def only_archived(self): """Include only archived forms, which are normally excluded""" return (self.remove_default_filter('is_xform_instance') .filter(filters.doc_type('xformarchived'))) class ElasticForm(ElasticDocumentAdapter): @property def mapping(self): return get_adapter_mapping(self) @classmethod def from_python(cls, doc): return from_dict_with_possible_id(doc) form_adapter = create_document_adapter( ElasticForm, getattr(settings, "ES_XFORM_INDEX_NAME", "xforms_2016-07-07"), "xform", ) def form_ids(form_ids): return filters.term('_id', form_ids) def xmlns(xmlnss): return filters.term('xmlns.exact', xmlnss) def app(app_ids): return filters.term('app_id', app_ids) def submitted(gt=None, gte=None, lt=None, lte=None): return filters.date_range('received_on', gt, gte, lt, lte) def completed(gt=None, gte=None, lt=None, lte=None): return filters.date_range('form.meta.timeEnd', gt, gte, lt, lte) def user_id(user_ids): if not isinstance(user_ids, (list, set, tuple)): user_ids = [user_ids] return filters.term( 'form.meta.userID', [x if x is not None else NULL_VALUE for x in user_ids] ) def user_type(user_types): return filters.term("user_type", user_types) def user_ids_handle_unknown(user_ids): missing_users = None in user_ids user_ids = [_f for _f in user_ids if _f] if not missing_users: user_filter = user_id(user_ids) elif user_ids and missing_users: user_filter = filters.OR( user_id(user_ids), filters.missing('form.meta.userID'), ) else: user_filter = filters.missing('form.meta.userID') return user_filter def j2me_submissions(gt=None, gte=None, lt=None, lte=None): return filters.AND( filters.regexp("form.meta.appVersion", "v2+.[0-9]+.*"), submitted(gt, gte, lt, lte) ) def updating_cases(case_ids): """return only those forms that have case blocks that touch the cases listed in `case_ids` """ return filters.term("__retrieved_case_ids", case_ids)
bsd-3-clause
3d59a4fa4576818db595d46979e06ff8
24.75969
94
0.629251
3.443523
false
false
false
false
dimagi/commcare-hq
corehq/apps/ivr/models.py
1
2975
from django.db import models from corehq.apps.sms.mixin import UnrecognizedBackendException from corehq.apps.sms.models import OUTGOING, Log, SQLMobileBackend from corehq.util.mixin import UUIDGeneratorMixin class SQLIVRBackend(SQLMobileBackend): """ IVR Functionality has been removed, but this model is being kept in order to preserve foreign key references in the Call model history. """ class Meta(object): app_label = 'sms' proxy = True class Call(UUIDGeneratorMixin, Log): """ IVR Functionality has been removed, but this model is being kept in order to preserve the call history. """ UUIDS_TO_GENERATE = ['couch_id'] couch_id = models.CharField(max_length=126, null=True, db_index=True) """ Call Metadata """ # True if the call was answered, False if not answered = models.BooleanField(null=True, default=False) # Length of the call in seconds duration = models.IntegerField(null=True) # The session id returned from the backend, with the backend's hq api id # and a hyphen prepended. For example: TWILIO-xxxxxxxxxx gateway_session_id = models.CharField(max_length=126, null=True, db_index=True) """ Advanced IVR Options """ # If True, on hangup, a partial form submission will occur if the # survey is not yet completed submit_partial_form = models.BooleanField(null=True, default=False) # Only matters when submit_partial_form is True. # If True, case side effects are applied to any partial form submissions, # otherwise they are excluded. include_case_side_effects = models.BooleanField(null=True, default=False) # The maximum number of times to retry a question with an invalid response # before hanging up max_question_retries = models.IntegerField(null=True) # A count of the number of invalid responses for the current question current_question_retry_count = models.IntegerField(default=0, null=True) """ IVR Framework Properties """ # The session id from touchforms xforms_session_id = models.CharField(max_length=126, null=True) # Error message from the gateway, if any error_message = models.TextField(null=True) # This is set to True by the framework if the backend is preparing the first # IVR response when initiating the call. If True, then first_response is # the prepared first response use_precached_first_response = models.BooleanField(null=True, default=False) first_response = models.TextField(null=True) # The case id of the case to submit the form against case_id = models.CharField(max_length=126, null=True) case_for_case_submission = models.BooleanField(null=True, default=False) # The form unique id of the form that plays the survey for the call app_id = models.CharField(max_length=126, null=True) form_unique_id = models.CharField(max_length=126, null=True) class Meta(object): app_label = 'ivr'
bsd-3-clause
703d1f93da93b860b8dd4a35464a58f9
36.1875
83
0.715966
3.929987
false
false
false
false
dimagi/commcare-hq
corehq/motech/repeaters/views/repeaters.py
1
11058
from collections import namedtuple from django.contrib import messages from django.http import Http404, HttpResponseRedirect from django.urls import reverse from django.utils.decorators import method_decorator from django.utils.translation import gettext as _ from django.utils.translation import gettext_lazy from django.views.decorators.http import require_POST from memoized import memoized from corehq import privileges, toggles from corehq.apps.accounting.decorators import requires_privilege_with_fallback from corehq.apps.domain.decorators import domain_admin_required from corehq.apps.domain.views.settings import BaseAdminProjectSettingsView from corehq.apps.users.decorators import ( require_can_edit_web_users, require_permission, ) from corehq.apps.users.models import HqPermissions from corehq.motech.const import PASSWORD_PLACEHOLDER from ..forms import CaseRepeaterForm, FormRepeaterForm, GenericRepeaterForm from ..models import ( Repeater, RepeatRecord, are_repeat_records_migrated, get_all_repeater_types, ) RepeaterTypeInfo = namedtuple('RepeaterTypeInfo', 'class_name friendly_name has_config instances') class DomainForwardingOptionsView(BaseAdminProjectSettingsView): urlname = 'domain_forwarding' page_title = gettext_lazy("Data Forwarding") template_name = 'repeaters/repeaters.html' @method_decorator(require_permission(HqPermissions.edit_motech)) @method_decorator(requires_privilege_with_fallback(privileges.DATA_FORWARDING)) def dispatch(self, request, *args, **kwargs): return super().dispatch(request, *args, **kwargs) @property def repeater_types_info(self): return [ RepeaterTypeInfo( class_name=r.__name__, friendly_name=r.friendly_name, has_config=r._has_config, instances=r.by_domain(self.domain), ) for r in get_all_repeater_types().values() if r.available_for_domain(self.domain) ] @property def page_context(self): if are_repeat_records_migrated(self.domain): report = 'repeat_record_report' else: report = 'couch_repeat_record_report' return { 'report': report, 'repeater_types_info': self.repeater_types_info, 'pending_record_count': RepeatRecord.count(self.domain), 'user_can_configure': ( self.request.couch_user.is_superuser or self.request.couch_user.can_edit_motech() or toggles.IS_CONTRACTOR.enabled(self.request.couch_user.username) ) } class BaseRepeaterView(BaseAdminProjectSettingsView): page_title = gettext_lazy("Forward Data") repeater_form_class = GenericRepeaterForm template_name = 'repeaters/add_form_repeater.html' @method_decorator(require_permission(HqPermissions.edit_motech)) @method_decorator(requires_privilege_with_fallback(privileges.DATA_FORWARDING)) def dispatch(self, request, *args, **kwargs): return super(BaseRepeaterView, self).dispatch(request, *args, **kwargs) @property def page_url(self): return reverse(self.urlname, args=[self.domain, self.repeater_type]) @property def parent_pages(self): return [{ 'title': DomainForwardingOptionsView.page_title, 'url': reverse(DomainForwardingOptionsView.urlname, args=[self.domain]), }] @property def repeater_type(self): return self.kwargs['repeater_type'] @property def page_name(self): return self.repeater_class.friendly_name @property @memoized def repeater_class(self): try: return get_all_repeater_types()[self.repeater_type] except KeyError: raise Http404( "No such repeater {}. Valid types: {}".format( self.repeater_type, list(get_all_repeater_types()) ) ) @property def add_repeater_form(self): return None @property def page_context(self): return { 'form': self.add_repeater_form, 'repeater_type': self.repeater_type, } def initialize_repeater(self): raise NotImplementedError def make_repeater(self): repeater = self.initialize_repeater() return self.set_repeater_attr(repeater, self.add_repeater_form.cleaned_data) def set_repeater_attr(self, repeater, cleaned_data): repeater.domain = self.domain repeater.connection_settings_id = int(cleaned_data['connection_settings_id']) repeater.request_method = cleaned_data['request_method'] repeater.format = cleaned_data['format'] return repeater def post_save(self, request, repeater): pass def post(self, request, *args, **kwargs): if self.add_repeater_form.is_valid(): repeater = self.make_repeater() repeater.save() return self.post_save(request, repeater) return self.get(request, *args, **kwargs) class AddRepeaterView(BaseRepeaterView): urlname = 'add_repeater' @property @memoized def add_repeater_form(self): if self.request.method == 'POST': return self.repeater_form_class( self.request.POST, domain=self.domain, repeater_class=self.repeater_class ) return self.repeater_form_class( domain=self.domain, repeater_class=self.repeater_class ) def initialize_repeater(self): return self.repeater_class() def post_save(self, request, repeater): messages.success(request, _("Forwarding set up to {}").format(repeater.name)) return HttpResponseRedirect( reverse(DomainForwardingOptionsView.urlname, args=[self.domain]) ) class EditRepeaterView(BaseRepeaterView): urlname = 'edit_repeater' template_name = 'repeaters/add_form_repeater.html' @property def repeater_id(self): return self.kwargs['repeater_id'] @property def page_url(self): # The EditRepeaterView url routes to the correct edit form for # its subclasses. It does this with `repeater_type` in # r'^forwarding/(?P<repeater_type>\w+)/edit/(?P<repeater_id>\w+)/$' # See corehq/apps/domain/urls.py for details. return reverse(EditRepeaterView.urlname, args=[self.domain, self.repeater_type, self.repeater_id]) @property @memoized def add_repeater_form(self): if self.request.method == 'POST': return self.repeater_form_class( self.request.POST, domain=self.domain, repeater_class=self.repeater_class ) else: repeater_id = self.kwargs['repeater_id'] repeater = Repeater.get(repeater_id) data = repeater.to_json() data['password'] = PASSWORD_PLACEHOLDER return self.repeater_form_class( domain=self.domain, repeater_class=self.repeater_class, data=data, submit_btn_text=_("Update Repeater"), ) @method_decorator(domain_admin_required) def dispatch(self, request, *args, **kwargs): if self.request.GET.get('repeater_type'): self.kwargs['repeater_type'] = self.request.GET['repeater_type'] return super(EditRepeaterView, self).dispatch(request, *args, **kwargs) def initialize_repeater(self): return Repeater.get(self.kwargs['repeater_id']) def post_save(self, request, repeater): messages.success(request, _("Repeater Successfully Updated")) if self.request.GET.get('repeater_type'): return HttpResponseRedirect( reverse(self.urlname, args=[self.domain, repeater.get_id]) + '?repeater_type=' + self.kwargs['repeater_type'] ) else: return HttpResponseRedirect( reverse(self.urlname, args=[self.domain, repeater.get_id]) ) class AddFormRepeaterView(AddRepeaterView): urlname = 'add_form_repeater' repeater_form_class = FormRepeaterForm @property def page_url(self): return reverse(self.urlname, args=[self.domain]) def set_repeater_attr(self, repeater, cleaned_data): repeater = super().set_repeater_attr(repeater, cleaned_data) repeater.include_app_id_param = ( self.add_repeater_form.cleaned_data['include_app_id_param']) repeater.user_blocklist = ( self.add_repeater_form.cleaned_data['user_blocklist']) return repeater class EditFormRepeaterView(EditRepeaterView, AddFormRepeaterView): urlname = 'edit_form_repeater' page_title = gettext_lazy("Edit Form Repeater") @property def page_url(self): return reverse(AddFormRepeaterView.urlname, args=[self.domain]) class AddCaseRepeaterView(AddRepeaterView): urlname = 'add_case_repeater' repeater_form_class = CaseRepeaterForm @property def page_url(self): return reverse(self.urlname, args=[self.domain]) def set_repeater_attr(self, repeater, cleaned_data): repeater = super().set_repeater_attr(repeater, cleaned_data) repeater.white_listed_case_types = ( self.add_repeater_form.cleaned_data['white_listed_case_types']) repeater.black_listed_users = ( self.add_repeater_form.cleaned_data['black_listed_users']) return repeater class EditCaseRepeaterView(EditRepeaterView, AddCaseRepeaterView): urlname = 'edit_case_repeater' page_title = gettext_lazy("Edit Case Repeater") @property def page_url(self): return reverse(AddCaseRepeaterView.urlname, args=[self.domain]) @require_POST @require_can_edit_web_users @requires_privilege_with_fallback(privileges.DATA_FORWARDING) def drop_repeater(request, domain, repeater_id): rep = Repeater.get(repeater_id) rep.retire() messages.success(request, "Forwarding stopped!") return HttpResponseRedirect( reverse(DomainForwardingOptionsView.urlname, args=[domain]) ) @require_POST @require_can_edit_web_users @requires_privilege_with_fallback(privileges.DATA_FORWARDING) def pause_repeater(request, domain, repeater_id): rep = Repeater.get(repeater_id) rep.pause() messages.success(request, "Forwarding paused!") return HttpResponseRedirect( reverse(DomainForwardingOptionsView.urlname, args=[domain]) ) @require_POST @require_can_edit_web_users @requires_privilege_with_fallback(privileges.DATA_FORWARDING) def resume_repeater(request, domain, repeater_id): rep = Repeater.get(repeater_id) rep.resume() messages.success(request, "Forwarding resumed!") return HttpResponseRedirect( reverse(DomainForwardingOptionsView.urlname, args=[domain]) )
bsd-3-clause
fd125ac583cdb9d5351d86599277853c
32.920245
85
0.65482
3.838251
false
false
false
false
dimagi/commcare-hq
corehq/apps/data_interfaces/migrations/0001_initial.py
1
2411
import django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='AutomaticUpdateRule', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('domain', models.CharField(max_length=126, db_index=True)), ('name', models.CharField(max_length=126)), ('case_type', models.CharField(max_length=126)), ('active', models.BooleanField(default=False)), ('deleted', models.BooleanField(default=False)), ('last_run', models.DateTimeField(null=True)), ('server_modified_boundary', models.IntegerField()), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='AutomaticUpdateAction', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('action', models.CharField(max_length=10, choices=[('UPDATE', 'UPDATE'), ('CLOSE', 'CLOSE')])), ('property_name', models.CharField(max_length=126, null=True)), ('property_value', models.CharField(max_length=126, null=True)), ('rule', models.ForeignKey(to='data_interfaces.AutomaticUpdateRule', on_delete=django.db.models.deletion.PROTECT)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='AutomaticUpdateRuleCriteria', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('property_name', models.CharField(max_length=126)), ('property_value', models.CharField(max_length=126, null=True)), ('match_type', models.CharField(max_length=10, choices=[('DAYS', 'DAYS'), ('EQUAL', 'EQUAL'), ('NOT_EQUAL', 'NOT_EQUAL'), ('EXISTS', 'EXISTS')])), ('rule', models.ForeignKey(to='data_interfaces.AutomaticUpdateRule', on_delete=django.db.models.deletion.PROTECT)), ], options={ }, bases=(models.Model,), ), ]
bsd-3-clause
d45ef6cd8f8b971ad7173e0cd7704139
44.490566
162
0.554542
4.431985
false
false
false
false
dimagi/commcare-hq
corehq/apps/app_manager/tests/test_apps.py
1
14209
import json import os import uuid from django.test import TestCase from collections import namedtuple from memoized import memoized from unittest.mock import patch from corehq.apps.app_manager.dbaccessors import get_app, get_build_ids from corehq.apps.app_manager.models import ( Application, ApplicationBase, DetailColumn, LinkedApplication, Module, ReportAppConfig, ReportModule, import_app, ) from corehq.apps.app_manager.tasks import ( autogenerate_build, prune_auto_generated_builds, ) from corehq.apps.app_manager.tests.app_factory import AppFactory from corehq.apps.app_manager.tests.util import ( TestXmlMixin, add_build, patch_default_builds, get_simple_form, patch_validate_xform, ) from corehq.apps.app_manager.util import add_odk_profile_after_build from corehq.apps.app_manager.views.apps import load_app_from_slug from corehq.apps.app_manager.views.utils import update_linked_app from corehq.apps.builds.models import BuildSpec from corehq.apps.domain.shortcuts import create_domain from corehq.apps.linked_domain.applications import link_app from corehq.apps.userreports.tests.utils import get_sample_report_config MockRequest = namedtuple('MockRequest', ['status', 'data']) @patch_validate_xform() class AppManagerTest(TestCase, TestXmlMixin): file_path = ('data',) min_paths = ( 'files/profile.ccpr', 'files/profile.xml', 'files/suite.xml', 'files/media_suite.xml', 'files/modules-0/forms-0.xml', ) @classmethod def setUpClass(cls): super(AppManagerTest, cls).setUpClass() cls.build1 = {'version': '1.2.0', 'build_number': 7106} cls.build2 = {'version': '2.7.0', 'build_number': 20655} add_build(**cls.build1) add_build(**cls.build2) cls.domain = 'test-domain' create_domain(cls.domain) cls.xform_str = cls.get_xml('very_simple_form').decode('utf-8') def setUp(self): super(AppManagerTest, self).setUp() self.app = Application.new_app(self.domain, "TestApp") for i in range(3): module = self.app.add_module(Module.new_module("Module%d" % i, "en")) for j in range(3): self.app.new_form(module.id, name="Form%s-%s" % (i, j), attachment=self.xform_str, lang="en") module = self.app.get_module(i) detail = module.ref_details.short detail.columns.append( DetailColumn(header={"en": "test 字 unicode"}, model="case", field="test", format="plain") ) detail.columns.append( DetailColumn(header={"en": "age"}, model="case", field="age", format="years-ago") ) self.app.save() def test_last_modified(self): lm = self.app.last_modified self.app.save() app = Application.get(self.app._id) self.assertGreater(app.last_modified, lm) def test_last_modified_bulk(self): lm = self.app.last_modified Application.save_docs([self.app]) app = Application.get(self.app._id) self.assertGreater(app.last_modified, lm) lm = self.app.last_modified Application.bulk_save([self.app]) app = Application.get(self.app._id) self.assertGreater(app.last_modified, lm) def test_increment_version(self): old_version = self.app.version self.app.save() self.assertEqual(self.app.version, old_version + 1) def tearDown(self): self.app.delete() super(AppManagerTest, self).tearDown() def testSetUp(self): self.assertEqual(len(self.app.modules), 3) for module in self.app.get_modules(): self.assertEqual(len(module.forms), 3) def testDeleteForm(self): self.app.delete_form(self.app.modules[0].unique_id, self.app.modules[0].forms[0].unique_id) self.assertEqual(len(self.app.modules), 3) for module, i in zip(self.app.get_modules(), [2, 3, 3]): self.assertEqual(len(module.forms), i) def testDeleteModule(self): self.app.delete_module(self.app.modules[0].unique_id) self.assertEqual(len(self.app.modules), 2) def assertModuleOrder(self, actual_modules, expected_modules): self.assertEqual([m.name['en'] for m in actual_modules], [m.name['en'] for m in expected_modules]) def testSwapModules(self): m0, m1, m2 = self.app.modules self.app.rearrange_modules(1, 0) self.assertModuleOrder(self.app.modules, [m1, m0, m2]) def testRearrangeModuleWithChildrenHigher(self): m0, m1, m2 = self.app.modules m2.root_module_id = m1.unique_id self.app.rearrange_modules(1, 0) # m2 is a child of m1, so when m1 moves to the top, m2 should follow self.assertModuleOrder(self.app.modules, [m1, m2, m0]) def testRearrangeModuleWithChildrenLower(self): m0, m1, m2 = self.app.modules m1.root_module_id = m0.unique_id self.app.rearrange_modules(0, 1) self.assertModuleOrder(self.app.modules, [m2, m0, m1]) @patch_default_builds def _test_import_app(self, app_id_or_source): new_app = import_app(app_id_or_source, self.domain) self.assertEqual(set(new_app.blobs.keys()).intersection(list(self.app.blobs.keys())), set()) new_forms = list(new_app.get_forms()) old_forms = list(self.app.get_forms()) for new_form, old_form in zip(new_forms, old_forms): self.assertEqual(new_form.source, old_form.source) self.assertNotEqual(new_form.unique_id, old_form.unique_id) for new_module, old_module in zip(new_app.get_modules(), self.app.get_modules()): if isinstance(old_module, ReportModule): old_config_ids = {config.uuid for config in old_module.report_configs} new_config_ids = {config.uuid for config in new_module.report_configs} self.assertEqual(old_config_ids.intersection(new_config_ids), set()) return new_app def testImportApp_from_id(self): self.assertTrue(self.app.blobs) imported_app = self._test_import_app(self.app.id) self.assertEqual(imported_app.family_id, self.app.id) @patch('corehq.apps.app_manager.models.ReportAppConfig.report') def testImportApp_from_source(self, report_mock): report_mock.return_value = get_sample_report_config() report_module = self.app.add_module(ReportModule.new_module('Reports', None)) report_module.report_configs = [ ReportAppConfig(report_id='config_id1', header={'en': 'CommBugz'}), ReportAppConfig(report_id='config_id2', header={'en': 'CommBugz'}) ] app_source = self.app.export_json(dump_json=False) self._test_import_app(app_source) def testAppsBrief(self): """Test that ApplicationBase can wrap the truncated version returned by applications_brief """ self.app.save() apps = ApplicationBase.get_db().view('app_manager/applications_brief', startkey=[self.domain], limit=1, ).all() self.assertEqual(len(apps), 1) @property @memoized def _yesno_source(self): # this app fixture uses both the (new) '_attachment' # and the (old) 'contents' conventions, to test that both work with open(os.path.join(os.path.dirname(__file__), 'data', 'yesno.json'), encoding='utf-8') as f: return json.load(f) def _check_has_build_files(self, build, paths): for path in paths: self.assertTrue(build.fetch_attachment(path)) def _app_strings_files(self, build): paths = ['files/default/app_strings.txt'] for lang in build.langs: paths.append('files/{}/app_strings.txt'.format(lang)) return paths def _check_legacy_odk_files(self, build): self.assertTrue(build.copy_of) with self.assertRaises(AttributeError): build.odk_profile_created_after_build path = 'files/profile.ccpr' build_version = build.version build.delete_attachment(path) add_odk_profile_after_build(build) build.save() build = Application.get(build.get_id) self.assertEqual(build.version, build_version) self.assertTrue(build.fetch_attachment(path)) self.assertEqual(build.odk_profile_created_after_build, True) def testBuildApp(self): # do it from a NOT-SAVED app; # regression test against case where contents gets lazy-put w/o saving app = Application.wrap(self._yesno_source) self.assertEqual(app['_id'], None) # i.e. hasn't been saved app._id = uuid.uuid4().hex copy = app.make_build() copy.save() self._check_has_build_files(copy, self.min_paths) app_strings_files = self._app_strings_files(copy) self._check_has_build_files(copy, app_strings_files) for path in app_strings_files: lang = path.split("/")[1] data_path = os.path.join(os.path.dirname(__file__), 'data', 'yesno_{}_app_strings.txt'.format(lang)) with open(data_path, encoding='utf-8') as f: self.assertEqual(f.read().strip(), copy.fetch_attachment(path).decode('utf-8').strip()) self._check_legacy_odk_files(copy) @patch_default_builds def testBuildImportedApp(self): app = import_app(self._yesno_source, self.domain) copy = app.make_build() copy.save() self._check_has_build_files(copy, self.min_paths) self._check_legacy_odk_files(copy) @patch('urllib3.PoolManager.request') def testBuildTemplateApps(self, request_mock): image_path = os.path.join('corehq', 'apps', 'hqwebapp', 'static', 'hqwebapp', 'images', 'commcare-hq-logo.png') with open(image_path, 'rb') as f: request_mock.return_value = MockRequest(status=200, data=f.read()) # Tests that these apps successfully build for slug in ['agriculture', 'health', 'wash']: self.assertIsNotNone(load_app_from_slug(self.domain, 'username', slug)) def testGetLatestBuild(self): factory = AppFactory(build_version='2.40.0') m0, f0 = factory.new_basic_module('register', 'case') f0.source = get_simple_form(xmlns=f0.unique_id) app = factory.app app.save() build1 = app.make_build() build1.save() # ensure that there was no previous version used during the build process self.assertEqual(app.get_latest_build.get_cache(app), {}) self.assertEqual(build1.get_latest_build.get_cache(build1), {(): None}) app.save() build2 = app.make_build() build2.save() # ensure that there was no previous version used during the build process self.assertEqual(app.get_latest_build.get_cache(app), {}) self.assertEqual(build2.get_latest_build.get_cache(build2)[()].id, build1.id) def testPruneAutoGeneratedBuilds(self): # Build #1, manually generated app = import_app(self._yesno_source, self.domain) for module in app.modules: module.get_or_create_unique_id() app.save() build1 = app.make_build() build1.save() self.assertFalse(build1.is_auto_generated) # Build #2, auto-generated app.save() autogenerate_build(app, "username") build_ids = get_build_ids(app.domain, app.id) self.assertEqual(len(build_ids), 2) self.assertEqual(build_ids[1], build1.id) build2 = get_app(app.domain, build_ids[0]) self.assertTrue(build2.is_auto_generated) # First prune: delete nothing because the auto build is the most recent prune_auto_generated_builds(self.domain, app.id) self.assertEqual(len(get_build_ids(app.domain, app.id)), 2) # Build #3, manually generated app.save() build3 = app.make_build() build3.save() # Release the auto-generated build and prune again, should still delete nothing build2.is_released = True build2.save() prune_auto_generated_builds(self.domain, app.id) self.assertEqual(len(get_build_ids(app.domain, app.id)), 3) # Un-release the auto-generated build and prune again, which should delete it build2.is_released = False build2.save() prune_auto_generated_builds(self.domain, app.id) build_ids = get_build_ids(app.domain, app.id) self.assertEqual(len(build_ids), 2) self.assertNotIn(build2.id, build_ids) def testRevertToCopy(self): old_name = 'old name' new_name = 'new name' app = Application.wrap(self._yesno_source) app.name = old_name app.save() copy = app.make_build() copy.save() self.assertEqual(copy.name, old_name) app.name = new_name app.save() app = Application.get(app.get_id) self.assertEqual(app.name, new_name) app = app.make_reversion_to_copy(copy) app.save() self.assertEqual(app.name, old_name) def testConvertToApplication(self): factory = AppFactory(build_version='2.40.0') m0, f0 = factory.new_basic_module('register', 'case') f0.source = get_simple_form(xmlns=f0.unique_id) factory.app.save() self.addCleanup(factory.app.delete) build = factory.app.make_build() build.is_released = True build.save() self.addCleanup(build.delete) linked_app = LinkedApplication() linked_app.domain = 'other-domain' linked_app.save() self.addCleanup(linked_app.delete) link_app(linked_app, factory.app.domain, factory.app.id) update_linked_app(linked_app, factory.app.id, 'system') unlinked_doc = linked_app.convert_to_application().to_json() self.assertEqual(unlinked_doc['doc_type'], 'Application') self.assertFalse(hasattr(unlinked_doc, 'linked_app_attrs'))
bsd-3-clause
9b87bc381a5a9e4fa3962c7b7205a5cc
37.501355
112
0.62793
3.530567
false
true
false
false
dimagi/commcare-hq
corehq/apps/users/cases.py
1
1370
import numbers from couchdbkit import ResourceNotFound from corehq.apps.groups.models import Group from corehq.apps.locations.models import SQLLocation from corehq.apps.users.models import CommCareUser, CouchUser, WebUser def user_db(): return CouchUser.get_db() def get_owner_id(case): return case.owner_id or case.modified_by def get_wrapped_owner(owner_id, support_deleted=False): """ Returns the wrapped user or group object for a given ID, or None if the id isn't a known owner type. """ if not owner_id: return None if isinstance(owner_id, numbers.Number): return None def _get_class(doc_type): return { 'CommCareUser': CommCareUser, 'WebUser': WebUser, 'Group': Group, }.get(doc_type) def _get_deleted_class(doc_type): return { 'Group-Deleted': Group, }.get(doc_type) try: return SQLLocation.objects.get(location_id=owner_id) except SQLLocation.DoesNotExist: pass try: owner_doc = user_db().get(owner_id) except ResourceNotFound: pass else: cls = _get_class(owner_doc['doc_type']) if support_deleted and cls is None: cls = _get_deleted_class(owner_doc['doc_type']) return cls.wrap(owner_doc) if cls else None return None
bsd-3-clause
df934eb37a3248f2ae9691505db76559
23.464286
69
0.629927
3.653333
false
false
false
false
dimagi/commcare-hq
corehq/apps/domain/management/commands/delete_domain.py
1
1908
import textwrap from django.core.management.base import BaseCommand from corehq.apps.domain.dbaccessors import iter_all_domains_and_deleted_domains_with_name class Command(BaseCommand): help = "Deletes the given domain and its contents" def add_arguments(self, parser): parser.add_argument( 'domain_name', ) parser.add_argument( '--noinput', action='store_true', dest='noinput', default=False, help='Skip important confirmation warnings.', ) def handle(self, domain_name, **options): domain_objs = list(iter_all_domains_and_deleted_domains_with_name(domain_name)) if not domain_objs: print('domain with name "{}" not found'.format(domain_name)) return if len(domain_objs) > 1: print("FYI: There are multiple domain objects for this domain" "and they will all be soft-deleted.") if not options['noinput']: confirm = input(textwrap.dedent( f""" Are you sure you want to delete the domain "{domain_name}" and all of it's data? This operation is not reversible and all forms and cases will be PERMANENTLY deleted. Type the domain's name again to continue, or anything else to cancel: """ )) if confirm != domain_name: print("\n\t\tDomain deletion cancelled.") return print(f"Soft-Deleting domain {domain_name} " "(i.e. switching its type to Domain-Deleted, " "which will prevent anyone from reusing that domain)") for domain_obj in domain_objs: assert domain_obj.name == domain_name # Just to be really sure! domain_obj.delete(leave_tombstone=True) print("Operation completed")
bsd-3-clause
0687600940e048a93bbcce17389a5b4c
37.938776
101
0.58805
4.468384
false
false
false
false
dimagi/commcare-hq
corehq/apps/app_manager/tests/test_build_errors.py
1
11081
import json import os from unittest.mock import patch from django.test import SimpleTestCase from corehq.apps.app_manager.const import ( REGISTRY_WORKFLOW_LOAD_CASE, REGISTRY_WORKFLOW_SMART_LINK, WORKFLOW_MODULE, ) from corehq.apps.app_manager.models import ( Application, CaseList, CaseSearch, CaseSearchLabel, CaseSearchProperty, DetailColumn, Module, ) from corehq.apps.app_manager.tests.app_factory import AppFactory from corehq.util.test_utils import flag_enabled @patch('corehq.apps.app_manager.models.validate_xform', return_value=None) @patch('corehq.apps.app_manager.helpers.validators.domain_has_privilege', return_value=True) @patch('corehq.apps.builds.models.BuildSpec.supports_j2me', return_value=False) class BuildErrorsTest(SimpleTestCase): @staticmethod def _clean_unique_id(errors): for error in errors: if 'form' in error and 'unique_id' in error['form']: del error['form']['unique_id'] if 'module' in error and 'unique_id' in error['module']: del error['module']['unique_id'] def test_subcase_errors(self, *args): with open(os.path.join(os.path.dirname(__file__), 'data', 'subcase-details.json'), encoding='utf-8') as f: source = json.load(f) app = Application.wrap(source) errors = app.validate_app() update_path_error = { 'type': 'path error', 'path': '/data/parent_age', 'form_type': 'module_form', 'module': {'name': {'en': "Parent"}, 'id': 0}, 'form': {'id': 0, 'name': {'en': "Register"}}, } subcase_path_error = { 'type': 'path error', 'path': '/data/child_age', 'form_type': 'module_form', 'module': {'name': {'en': "Parent"}, 'id': 0}, 'form': {'id': 0, 'name': {'en': "Register"}}, } self._clean_unique_id(errors) self.assertIn(update_path_error, errors) self.assertIn(subcase_path_error, errors) form = app.get_module(0).get_form(0) errors = form.validate_for_build() self._clean_unique_id(errors) self.assertIn(update_path_error, errors) self.assertIn(subcase_path_error, errors) def test_empty_module_errors(self, *args): factory = AppFactory(build_version='2.24.0') app = factory.app m1 = factory.new_basic_module('register', 'case', with_form=False) factory.new_advanced_module('update', 'case', with_form=False) m2 = factory.new_basic_module('update', 'case', with_form=False) m2.case_list = CaseList(show=True, label={'en': "case_list"}) factory.new_shadow_module('update', m1, with_form=False) errors = app.validate_app() standard_module_error = { 'type': 'no forms or case list', 'module': {'id': 0, 'name': {'en': 'register module'}}, } advanced_module_error = { 'type': 'no forms or case list', 'module': {'id': 1, 'name': {'en': 'update module'}}, } self._clean_unique_id(errors) self.assertEqual(len(errors), 2) self.assertIn(standard_module_error, errors) self.assertIn(advanced_module_error, errors) def test_dof_session_endpoint_error(self, *args): factory = AppFactory(build_version='2.51.0') app = factory.app m0 = factory.new_basic_module('first', 'case', with_form=False) m0.put_in_root = True m0.session_endpoint_id = "this_is_m0" m1 = factory.new_basic_module('second', 'case', with_form=False) m1.session_endpoint_id = "this_is_m1" with patch.object(Application, 'enable_practice_users', return_value=False): # avoid db errors = app.validate_app() self._clean_unique_id(errors) self.assertEqual(len(errors), 3) self.assertIn({ 'type': 'endpoint to display only forms', 'module': {'id': 0, 'name': {'en': 'first module'}}, }, errors) self.assertIn({ 'type': 'no forms or case list', 'module': {'id': 0, 'name': {'en': 'first module'}}, }, errors) self.assertIn({ 'type': 'no forms or case list', 'module': {'id': 1, 'name': {'en': 'second module'}}, }, errors) def test_parent_cycle_in_app(self, *args): cycle_error = { 'type': 'parent cycle', } with open(os.path.join(os.path.dirname(__file__), 'data', 'cyclical-app.json')) as f: source = json.load(f) app = Application.wrap(source) errors = app.validate_app() self._clean_unique_id(errors) self.assertIn(cycle_error, errors) def test_case_tile_configuration_errors(self, *args): case_tile_error = { 'type': "invalid tile configuration", 'module': {'id': 0, 'name': {'en': 'View'}}, 'reason': 'A case property must be assigned to the "sex" tile field.' } with open(os.path.join( os.path.dirname(__file__), 'data', 'bad_case_tile_config.json' )) as f: source = json.load(f) app = Application.wrap(source) errors = app.validate_app() self._clean_unique_id(errors) self.assertIn(case_tile_error, errors) def test_case_list_form_advanced_module_different_case_config(self, *args): case_tile_error = { 'type': "all forms in case list module must load the same cases", 'module': {'id': 1, 'name': {'en': 'update module'}}, 'form': {'id': 1, 'name': {'en': 'update form 1'}}, } factory = AppFactory(build_version='2.11.0') m0, m0f0 = factory.new_basic_module('register', 'person') factory.form_opens_case(m0f0) m1, m1f0 = factory.new_advanced_module('update', 'person', case_list_form=m0f0) factory.form_requires_case(m1f0, case_type='house') factory.form_requires_case(m1f0, parent_case_type='house') m1f1 = factory.new_form(m1) factory.form_requires_case(m1f1) # only loads a person case and not a house case errors = factory.app.validate_app() self._clean_unique_id(errors) self.assertIn(case_tile_error, errors) @patch('corehq.apps.app_manager.models.domain_has_privilege', return_value=True) def test_training_module_as_parent(self, *args): factory = AppFactory(build_version='2.43.0') app = factory.app training_module = Module.new_training_module('training', 'en') app.add_module(training_module) child_module, _ = factory.new_basic_module('child', 'case_type', parent_module=training_module) self.assertIn({ 'type': 'training module parent', 'module': {'id': 1, 'unique_id': 'child_module', 'name': {'en': 'child module'}} }, app.validate_app()) @patch('corehq.apps.app_manager.models.domain_has_privilege', return_value=True) def test_training_module_as_child(self, *args): factory = AppFactory(build_version='2.43.0') app = factory.app parent_module = Module.new_module('parent', 'en') app.add_module(parent_module) training_module, _ = factory.new_basic_module('training', 'case_type', parent_module=parent_module) training_module.is_training_module = True self.assertIn({ 'type': 'training module child', 'module': {'id': 1, 'unique_id': 'training_module', 'name': {'en': 'training module'}} }, app.validate_app()) @flag_enabled('DATA_REGISTRY') @patch.object(Application, 'supports_data_registry', lambda: True) def test_multi_select_module_errors(self, *args): factory = AppFactory() module, form = factory.new_basic_module('basic', 'person') factory.form_requires_case(form, 'person') module.case_details.short.multi_select = True module.search_config = CaseSearch( search_label=CaseSearchLabel(label={'en': 'Search'}), properties=[CaseSearchProperty(name=field) for field in ['name', 'greatest_fear']], data_registry="so_many_cases", data_registry_workflow=REGISTRY_WORKFLOW_LOAD_CASE, ) self.assertIn({ 'type': 'data registry multi select', 'module': {'id': 0, 'unique_id': 'basic_module', 'name': {'en': 'basic module'}} }, factory.app.validate_app()) module.search_config.data_registry_workflow = REGISTRY_WORKFLOW_SMART_LINK self.assertIn({ 'type': 'smart links multi select', 'module': {'id': 0, 'unique_id': 'basic_module', 'name': {'en': 'basic module'}} }, factory.app.validate_app()) def test_search_module_errors_instances(self, *args): factory = AppFactory() module, form = factory.new_basic_module('basic', 'person') factory.form_requires_case(form, 'person') module.case_details.long.columns.extend([ DetailColumn.wrap(dict( header={"en": "name"}, model="case", format="plain", useXpathExpression=True, field="instance('results')/results", )), DetailColumn.wrap(dict( header={"en": "age"}, model="case", format="plain", useXpathExpression=True, field="instance('search-input:results')/input", )) ]) module.search_config = CaseSearch( search_label=CaseSearchLabel(label={'en': 'Search'}), properties=[CaseSearchProperty(name='name')], ) errors = [(error['type'], error.get('details', '')) for error in factory.app.validate_app()] self.assertIn(('case search instance used in casedb case details', 'results'), errors) self.assertIn(('case search instance used in casedb case details', 'search-input:results'), errors) module.search_config.auto_launch = True self.assertNotIn( 'case search instance used in casedb case details', [error['type'] for error in factory.app.validate_app()] ) @flag_enabled('FORM_LINK_WORKFLOW') def test_form_module_validation(self, *args): factory = AppFactory(build_version='2.24.0') app = factory.app m0, m0f0 = factory.new_basic_module('register', 'case') m0f0.post_form_workflow = WORKFLOW_MODULE m1 = factory.new_shadow_module('shadow', m0, with_form=False) m1.put_in_root = True errors = app.validate_app() self._clean_unique_id(errors) self.assertIn({ 'type': 'form link to display only forms', 'form_type': 'module_form', 'module': {'id': 1, 'name': {'en': 'shadow module'}}, 'form': {'id': 0, 'name': {'en': 'register form 0'}}, }, errors)
bsd-3-clause
ffa62e75823ad1d277212163e807d83e
38.294326
114
0.578197
3.659511
false
true
false
false
dimagi/commcare-hq
corehq/ex-submodules/dimagi/test_utils/base.py
1
5633
from django.conf import settings from six.moves import range if not settings.configured: settings.configure(DEBUG=True) from unittest.mock import MagicMock, NonCallableMock, patch from unittest import TestCase from memoized import memoized from dimagi.utils.chunked import chunked from dimagi.utils.read_only import ReadOnlyObject from dimagi.utils.couch.sync_docs import sync_design_docs, copy_designs class DimagiUtilsTestCase(TestCase): def test_memoized_function(self): @memoized def f(n=0): return n**2 self.assertEqual(f(), 0) self.assertEqual(f.get_cache(), {(0,): 0}) self.assertEqual(f(0), 0) self.assertEqual(f.get_cache(), {(0,): 0}) self.assertEqual(f(2), 4) self.assertEqual(sorted(f.get_cache().items()), [((0,), 0), ((2,), 4)]) def test_memoized_class(self): calls = {'get_full_name': 0, 'full_name': 0, 'complicated_method': 0} @memoized class Person(object): get_full_name_calls = 0 full_name_calls = 0 complicated_method_calls = 0 def __init__(self, first_name, last_name): self.first_name = first_name self.last_name = last_name @property @memoized def full_name(self): calls['full_name'] = calls['full_name'] + 1 return "%s %s" % (self.first_name, self.last_name) @memoized def get_full_name(self): calls['get_full_name'] = calls['get_full_name'] + 1 return "%s %s" % (self.first_name, self.last_name) def __repr__(self): return "%s(%r, %r)" % (self.__class__.__name__, self.first_name, self.last_name) @memoized def complicated_method(self, a, b=10, *args, **kwargs): calls['complicated_method'] = calls['complicated_method'] + 1 return a, b, args, kwargs p = Person("Danny", "Roberts") self.assertEqual(p.get_full_name(), 'Danny Roberts') self.assertEqual(calls['get_full_name'], 1) self.assertEqual(p.get_full_name(), 'Danny Roberts') self.assertEqual(calls['get_full_name'], 1) self.assertEqual(p.full_name, 'Danny Roberts') self.assertEqual(calls['full_name'], 1) self.assertEqual(p.full_name, 'Danny Roberts') self.assertEqual(calls['full_name'], 1) self.assertEqual(Person("Danny", "Roberts")._full_name_cache, {(): 'Danny Roberts'}) self.assertEqual(Person.get_full_name.get_cache(p), {(): 'Danny Roberts'}) self.assertEqual(p.complicated_method(5), (5, 10, (), {})) self.assertEqual(calls['complicated_method'], 1) self.assertEqual(p.complicated_method(5), (5, 10, (), {})) self.assertEqual(calls['complicated_method'], 1) self.assertEqual(p.complicated_method(1, 2, 3, 4, 5, foo='bar'), (1, 2, (3, 4, 5), {'foo': 'bar'})) self.assertEqual(calls['complicated_method'], 2) q = Person("Joe", "Schmoe") self.assertEqual(q.get_full_name(), 'Joe Schmoe') self.assertEqual(calls['get_full_name'], 2) def test_chunked(self): self.assertEqual(list(chunked(list(range(10)), 4)), [ (0, 1, 2, 3), (4, 5, 6, 7), (8, 9) ]) def test_chunked_stop(self): def list_or_stop(items): items = list(items) if items[0] == 0: return items raise StopIteration self.assertEqual(list(chunked(range(10), 2, list_or_stop)), [[0, 1]]) def test_ReadOnlyObject(self): from couchdbkit import Document, StringListProperty log = [] def read_log(): x = log[:] del log[:] return x class Thing(Document): words = StringListProperty() @property def calc(self): for i, word in enumerate(self.words): log.append(i) yield word + '!' thing = Thing(words=['danny', 'is', 'so', 'clever']) thing = ReadOnlyObject(thing) self.assertEqual(thing.words, ['danny', 'is', 'so', 'clever']) self.assertEqual(thing.words, ['danny', 'is', 'so', 'clever']) self.assertIs(thing.words, thing.words) self.assertEqual(thing.calc, ['danny!', 'is!', 'so!', 'clever!']) self.assertEqual(read_log(), [0, 1, 2, 3]) self.assertEqual(thing.calc, ['danny!', 'is!', 'so!', 'clever!']) self.assertEqual(read_log(), []) self.assertIs(thing.calc, thing.calc) def test_sync_design_docs(self): db = NonCallableMock() with patch('dimagi.utils.couch.sync_docs.push', MagicMock()) as mock_push: sync_design_docs(db, 'design_dir', 'design_name') mock_push.assert_called_with('design_dir', db, docid='_design/design_name', force=True) def test_sync_design_docs_tmp(self): db = MagicMock() with patch('dimagi.utils.couch.sync_docs.push', MagicMock()) as mock_push: sync_design_docs(db, 'design_dir', 'design_name', temp='tmp') mock_push.assert_called_with('design_dir', db, docid='_design/design_name-tmp', force=True) self.assertEqual(len(db.mock_calls), 4) def test_copy_designs(self): db = MagicMock() copy_designs(db, 'design_name') db.copy_doc.assert_called_once_with('_design/design_name-tmp', '_design/design_name') db.__delitem__.assert_called_with('_design/design_name-tmp')
bsd-3-clause
f05ba71a12fc173fcce573cb19056672
36.059211
107
0.566661
3.514036
false
true
false
false
dimagi/commcare-hq
corehq/apps/zapier/tests/test_fields.py
1
7789
from django.test.client import Client from django.test.testcases import SimpleTestCase, TestCase from tastypie.resources import Resource from corehq.apps.app_manager.tests.app_factory import AppFactory from corehq.apps.zapier.api.v0_5 import ZapierCustomFieldCaseResource from corehq.apps.zapier.util import remove_advanced_fields class TestRemoveAdvancedFields(SimpleTestCase): def test_form(self): form = { "build_id": "de9553b384b1ff3acaceaed4a217f277", "domain": "test", "form": { "#type": "data", "@name": "Test", "@uiVersion": "1", "@version": "6", "@xmlns": "http://openrosa.org/formdesigner/test", "age": "3.052703627652293", "case": { "@case_id": "67dfe2a9-9413-4811-b5f5-a7c841085e9e", "@date_modified": "2016-12-20T12:13:23.870000Z", "@user_id": "cff3d2fb45eafd1abbc595ae89f736a6", "@xmlns": "http://commcarehq.org/case/transaction/v2", "update": { "test": "" } }, "dob": "2013-12-01", "dose_counter": "0", "follow_up_test_date": "", "follow_up_test_type": "", "grp_archive_person": { "archive_person": { "case": { "@case_id": "d2fcfa48-5286-4623-a209-6a9c30781b3d", "@date_modified": "2016-12-20T12:13:23.870000Z", "@user_id": "cff3d2fb45eafd1abbc595ae89f736a6", "@xmlns": "http://commcarehq.org/case/transaction/v2", "update": { "archive_reason": "not_evaluated", "owner_id": "_archive_" } } }, "close_episode": { "case": { "@case_id": "67dfe2a9-9413-4811-b5f5-a7c841085e9e", "@date_modified": "2016-12-20T12:13:23.870000Z", "@user_id": "cff3d2fb45eafd1abbc595ae89f736a6", "@xmlns": "http://commcarehq.org/case/transaction/v2", "close": "" } }, "close_occurrence": { "case": { "@case_id": "912d0ec6-709f-4d82-81d8-6a5aa163e2fb", "@date_modified": "2016-12-20T12:13:23.870000Z", "@user_id": "cff3d2fb45eafd1abbc595ae89f736a6", "@xmlns": "http://commcarehq.org/case/transaction/v2", "close": "" } }, "close_referrals": { "@count": "0", "@current_index": "0", "@ids": "" } }, "lbl_form_end": "OK", "length_of_cp": "", "length_of_ip": "", "meta": { "@xmlns": "http://openrosa.org/jr/xforms", "appVersion": "CommCare Android, version \"2.31.0\"(423345). " "App v59. CommCare Version 2.31. Build 423345, built on: 2016-11-02", "app_build_version": 59, "commcare_version": "2.31.0", "deviceID": "359872069029881", "geo_point": None, "instanceID": "2d0e138e-c9b0-4998-a7fb-06b7109e0bf7", "location": { "#text": "54.4930116 18.5387613 0.0 21.56", "@xmlns": "http://commcarehq.org/xforms" }, "timeEnd": "2016-12-20T12:13:23.870000Z", "timeStart": "2016-12-20T12:13:08.346000Z", "userID": "cff3d2fb45eafd1abbc595ae89f736a6", "username": "test" }, } } remove_advanced_fields(form_dict=form) self.assertIsNone(form['form']['meta'].get('userID')) self.assertIsNone(form.get('xmlns')) self.assertIsNone(form['form'].get('@name')) self.assertIsNone(form['form']['meta'].get('appVersion')) self.assertIsNone(form['form']['meta'].get('deviceID')) self.assertIsNone(form['form']['meta'].get('location')) self.assertIsNone(form.get('app_id')) self.assertIsNone(form.get('build_id')) self.assertIsNone(form['form'].get('@version')) self.assertIsNone(form.get('doc_type')) self.assertIsNone(form.get('last_sync_token')) self.assertIsNone(form.get('partial_submission')) self.assertIsNotNone(form['domain']) class TestZapierCustomFields(TestCase): @classmethod def setUpClass(cls): super(TestZapierCustomFields, cls).setUpClass() cls.test_url = "http://commcarehq.org/?domain=joto&case_type=teddiursa" def setUp(self): self.domain = "joto" self.case_type = 'teddiursa' app_factory = AppFactory(self.domain) m, f = app_factory.new_basic_module('m', self.case_type) app_factory.form_requires_case(f, case_type=self.case_type, update={ 'prop1': '/data/prop1', 'move_type': '/data/move_type', 'mood': '/data/mood', 'level': '/data/level' }) app_factory.app.save() def test_case_fields(self): expected_fields = [ {"help_text": "", "key": "properties__level", "label": "Level", "type": "unicode"}, {"help_text": "", "key": "properties__mood", "label": "Mood", "type": "unicode"}, {"help_text": "", "key": "properties__move_type", "label": "Move type", "type": "unicode"}, {"help_text": "", "key": "properties__name", "label": "Name", "type": "unicode"}, {"help_text": "", "key": "properties__prop1", "label": "Prop1", "type": "unicode"}, {"help_text": "", "key": "properties__type", "label": "Type", "type": "unicode"}, {"help_text": "", "key": "date_closed", "label": "Date closed", "type": "unicode"}, {"help_text": "", "key": "xform_ids", "label": "XForm IDs", "type": "unicode"}, {"help_text": "", "key": "properties__date_opened", "label": "Date opened", "type": "unicode"}, {"help_text": "", "key": "properties__external_id", "label": "External ID", "type": "unicode"}, {"help_text": "", "key": "properties__case_name", "label": "Case name", "type": "unicode"}, {"help_text": "", "key": "properties__case_type", "label": "Case type", "type": "unicode"}, {"help_text": "", "key": "user_id", "label": "User ID", "type": "unicode"}, {"help_text": "", "key": "date_modified", "label": "Date modified", "type": "unicode"}, {"help_text": "", "key": "case_id", "label": "Case ID", "type": "unicode"}, {"help_text": "", "key": "properties__owner_id", "label": "Owner ID", "type": "unicode"}, {"help_text": "", "key": "resource_uri", "label": "Resource URI", "type": "unicode"} ] request = Client().get(self.test_url).wsgi_request bundle = Resource().build_bundle(data={}, request=request) actual_fields = [field.get_content() for field in ZapierCustomFieldCaseResource().obj_get_list(bundle)] self.assertItemsEqual(expected_fields, actual_fields)
bsd-3-clause
eb110e6ef4a3c1212177e87ca6a58628
47.68125
111
0.474515
3.696725
false
true
false
false
dimagi/commcare-hq
corehq/apps/reports/migrations/0004_tableauserver_tableauvisualization.py
1
1457
# Generated by Django 2.2.20 on 2021-05-03 17:25 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('reports', '0003_multiselect_report_filters_are_lists'), ] operations = [ migrations.CreateModel( name='TableauServer', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('server_type', models.CharField(choices=[('server', 'Tableau Server'), ('online', 'Tableau Online')], default='server', max_length=6)), ('server_name', models.CharField(max_length=128)), ('target_site', models.CharField(default='Default', max_length=64)), ('domain_username', models.CharField(max_length=64)), ('allow_domain_username_override', models.BooleanField(default=False)), ], ), migrations.CreateModel( name='TableauVisualization', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('project', models.CharField(max_length=64)), ('view_url', models.CharField(max_length=256)), ('server', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reports.TableauServer')), ], ), ]
bsd-3-clause
0bdc83061d94a7062beb99bcefa7563f
41.852941
152
0.59094
4.223188
false
false
false
false
dimagi/commcare-hq
corehq/apps/fixtures/tests/test_fixture_data.py
1
12633
from xml.etree import cElementTree as ElementTree from django.test import TestCase from casexml.apps.case.tests.util import check_xml_line_by_line from casexml.apps.phone.tests.utils import \ call_fixture_generator as call_fixture_generator_raw from corehq.apps.fixtures import fixturegenerators from corehq.apps.fixtures.dbaccessors import delete_all_fixture_data from corehq.apps.fixtures.models import ( FIXTURE_BUCKET, Field, LookupTable, LookupTableRow, LookupTableRowOwner, OwnerType, TypeField, ) from corehq.apps.users.models import CommCareUser from corehq.blobs import get_blob_db def call_fixture_generator(user): return [ElementTree.fromstring(f) if isinstance(f, bytes) else f for f in call_fixture_generator_raw(fixturegenerators.item_lists, user)] class FixtureDataTest(TestCase): def setUp(self): super(FixtureDataTest, self).setUp() self.domain = 'qwerty' self.tag = "district" delete_all_fixture_data() self.data_type = LookupTable( domain=self.domain, tag=self.tag, description="Districts", fields=[ TypeField(name="state_name"), TypeField(name="district_name", properties=["lang"]), TypeField(name="district_id"), ], item_attributes=[], ) self.data_type.save() self.addCleanup(self.data_type._migration_get_couch_object().delete) self.data_item = LookupTableRow( domain=self.domain, table_id=self.data_type.id, fields={ "state_name": [ Field(value="Delhi_state") ], "district_name": [ Field(value="Delhi_in_HIN", properties={"lang": "hin"}), Field(value="Delhi_in_ENG", properties={"lang": "eng"}) ], "district_id": [ Field(value="Delhi_id") ] }, item_attributes={}, sort_key=0, ) self.data_item.save() self.addCleanup(self.data_item._migration_get_couch_object().delete) self.user = CommCareUser.create(self.domain, 'to_delete', '***', None, None) self.addCleanup(self.user.delete, self.domain, deleted_by=None) self.ownership = LookupTableRowOwner( domain=self.domain, owner_id=self.user.get_id, owner_type=OwnerType.User, row_id=self.data_item.id, ) self.ownership.save(sync_to_couch=False) self.addCleanup(get_blob_db().delete, key=FIXTURE_BUCKET + '/' + self.domain) def test_xml(self): check_xml_line_by_line(self, """ <district> <state_name>Delhi_state</state_name> <district_name lang="hin">Delhi_in_HIN</district_name> <district_name lang="eng">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id> </district> """, ElementTree.tostring(fixturegenerators.item_lists.to_xml( self.data_item, self.data_type), encoding='utf-8')) def test_ownership(self): row_ids = [r.id for r in LookupTableRow.objects.iter_by_user(self.user)] self.assertItemsEqual([self.data_item.id], row_ids) fixture, = call_fixture_generator(self.user.to_ota_restore_user(self.domain)) check_xml_line_by_line(self, """ <fixture id="item-list:district" user_id="%s"> <district_list> <district> <state_name>Delhi_state</state_name> <district_name lang="hin">Delhi_in_HIN</district_name> <district_name lang="eng">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id> </district> </district_list> </fixture> """ % self.user.user_id, ElementTree.tostring(fixture, encoding='utf-8')) def test_fixture_removal(self): """ An empty fixture list should be generated for each fixture that the use does not have access to (within the domain). """ self.ownership.delete() self.ownership = None fixtures = call_fixture_generator(self.user.to_ota_restore_user(self.domain)) self.assertEqual(1, len(fixtures)) check_xml_line_by_line( self, """ <fixture id="item-list:district" user_id="{}"> <district_list /> </fixture> """.format(self.user.user_id), ElementTree.tostring(fixtures[0], encoding='utf-8') ) def test_get_item_by_field_value(self): self.assertEqual( LookupTableRow.objects.with_value( self.domain, self.data_type.id, 'state_name', 'Delhi_state').get().id, self.data_item.id ) def test_fixture_is_indexed(self): self.data_type.fields[2].is_indexed = True # Set "district_id" as indexed self.data_type.save() fixtures = call_fixture_generator(self.user.to_ota_restore_user(self.domain)) self.assertEqual(len(fixtures), 2) check_xml_line_by_line( self, """ <fixtures> <schema id="item-list:district"> <indices> <index>district_id</index> </indices> </schema> <fixture id="item-list:district" indexed="true" user_id="{}"> <district_list> <district> <state_name>Delhi_state</state_name> <district_name lang="hin">Delhi_in_HIN</district_name> <district_name lang="eng">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id> </district> </district_list> </fixture> </fixtures> """.format(self.user.user_id), """ <fixtures> {} {} </fixtures> """.format(*[ElementTree.tostring(fixture, encoding='utf-8').decode('utf-8') for fixture in fixtures]) ) def test_empty_data_types(self): empty_data_type = LookupTable( domain=self.domain, tag='blank', description="blank", fields=[TypeField(name="name")], item_attributes=[], ) empty_data_type.save() self.addCleanup(empty_data_type._migration_get_couch_object().delete) fixtures = call_fixture_generator(self.user.to_ota_restore_user(self.domain)) self.assertEqual(2, len(fixtures)) check_xml_line_by_line( self, """ <f> <fixture id="item-list:blank" user_id="{0}"> <blank_list/> </fixture> <fixture id="item-list:district" user_id="{0}"> <district_list> <district> <state_name>Delhi_state</state_name> <district_name lang="hin">Delhi_in_HIN</district_name> <district_name lang="eng">Delhi_in_ENG</district_name> <district_id>Delhi_id</district_id> </district> </district_list> </fixture> </f> """.format(self.user.user_id), '<f>{}\n{}\n</f>'.format(*[ ElementTree.tostring(fixture, encoding='utf-8').decode('utf-8') for fixture in fixtures ]) ) def test_user_data_type_with_item(self): cookie = self.make_data_type("cookie", is_global=False) latte = self.make_data_type("latte", is_global=True) self.make_data_item(cookie, "2.50") self.make_data_item(latte, "5.75") fixtures = call_fixture_generator(self.user.to_ota_restore_user(self.domain)) # make sure each fixture is there, and only once self.assertEqual( [item.attrib['id'] for item in fixtures], [ 'item-list:latte-index', 'item-list:cookie-index', 'item-list:district', ], ) def test_empty_user_data_types(self): self.make_data_type("cookie", is_global=False) fixtures = call_fixture_generator(self.user.to_ota_restore_user(self.domain)) # make sure each fixture is there, and only once self.assertEqual( [item.attrib['id'] for item in fixtures], [ 'item-list:cookie-index', 'item-list:district', ], ) def test_cached_global_fixture_user_id(self): sandwich = self.make_data_type("sandwich", is_global=True) self.make_data_item(sandwich, "7.39") frank = self.user.to_ota_restore_user(self.domain) sammy_ = CommCareUser.create(self.domain, 'sammy', '***', None, None) self.addCleanup(sammy_.delete, self.domain, deleted_by=None) sammy = sammy_.to_ota_restore_user(self.domain) fixtures = call_fixture_generator(frank) self.assertEqual({item.attrib['user_id'] for item in fixtures}, {frank.user_id}) self.assertTrue(get_blob_db().exists(key=FIXTURE_BUCKET + '/' + self.domain)) fixtures = call_fixture_generator(sammy) self.assertEqual({item.attrib['user_id'] for item in fixtures}, {sammy.user_id}) def make_data_type(self, name, is_global): data_type = LookupTable( domain=self.domain, tag="{}-index".format(name), is_global=is_global, description=name.title(), fields=[ TypeField(name="cost", properties=[]), ], item_attributes=[], ) data_type.save() self.addCleanup(data_type._migration_get_couch_object().delete) return data_type def make_data_item(self, data_type, cost): data_item = LookupTableRow( domain=self.domain, table_id=data_type.id, fields={"cost": [Field(value=cost)]}, item_attributes={}, sort_key=0, ) data_item.save() self.addCleanup(data_item._migration_get_couch_object().delete) return data_item class TestFixtureOrdering(TestCase): @classmethod def setUpClass(cls): super(TestFixtureOrdering, cls).setUpClass() cls.domain = "TestFixtureOrdering" cls.user = CommCareUser.create(cls.domain, 'george', '***', None, None) cls.addClassCleanup(cls.user.delete, cls.domain, deleted_by=None) cls.data_type = LookupTable( domain=cls.domain, tag="houses-of-westeros", is_global=True, description="Great Houses of Westeros", fields=[ TypeField(name="name"), TypeField(name="seat"), TypeField(name="sigil"), ], item_attributes=[], ) cls.data_type.save() cls.data_items = [ cls._make_data_item(4, "Tyrell", "Highgarden", "Rose"), cls._make_data_item(6, "Martell", "Sunspear", "Sun and Spear"), cls._make_data_item(3, "Lannister", "Casterly Rock", "Lion"), cls._make_data_item(1, "Targaryen", "Dragonstone", "Dragon"), cls._make_data_item(5, "Tully", "Riverrun", "Trout"), cls._make_data_item(2, "Stark", "Winterfell", "Direwolf"), cls._make_data_item(7, "Baratheon", "Storm's End", "Stag"), ] cls.addClassCleanup(delete_all_fixture_data, cls.domain) @classmethod def _make_data_item(cls, sort_key, name, seat, sigil): data_item = LookupTableRow( domain=cls.domain, table_id=cls.data_type.id, fields={ "name": [Field(value=name)], "seat": [Field(value=seat)], "sigil": [Field(value=sigil)], }, item_attributes={}, sort_key=sort_key, ) data_item.save() return data_item def test_fixture_order(self): (fixture,) = call_fixture_generator(self.user.to_ota_restore_user(self.domain)) actual_names = [row[0].text for row in fixture[0]] self.assertEqual( ["Targaryen", "Stark", "Lannister", "Tyrell", "Tully", "Martell", "Baratheon"], actual_names )
bsd-3-clause
fd5f3d30460cb058312132f08f99e625
35.938596
114
0.543972
3.792555
false
true
false
false
dimagi/commcare-hq
corehq/apps/case_importer/tracking/models.py
1
2916
from django.db import models from jsonfield import JSONField from memoized import memoized from dimagi.utils.logging import notify_exception from soil.progress import STATES from soil.util import get_task from corehq.apps.case_importer.tracking.task_status import ( TaskStatus, get_task_status_json, ) MAX_COMMENT_LENGTH = 2048 class CaseUploadRecord(models.Model): domain = models.CharField(max_length=256) created = models.DateTimeField(auto_now_add=True) upload_id = models.UUIDField( unique=True, help_text="An HQ-level ID that is used to provide a link to this upload record" ) task_id = models.UUIDField( unique=True, help_text="The celery task id that handled this upload" ) task_status_json = JSONField(null=True) couch_user_id = models.CharField(max_length=256) case_type = models.CharField(max_length=256) comment = models.TextField(null=True) upload_file_meta = models.ForeignKey('CaseUploadFileMeta', null=True, on_delete=models.CASCADE) class Meta(object): index_together = ('domain', 'created') @property @memoized def task(self): return get_task(self.task_id) @memoized def get_task_status_json(self): if self.task_status_json: return TaskStatus.wrap(self.task_status_json) else: return get_task_status_json(str(self.task_id)) def save_task_status_json(self, task_status_json): if self.task_status_json is not None: notify_exception(None, "CaseUploadRecord task_status_json already set", { 'new_task_status_json': task_status_json, 'existing_task_status_json': self.task_status_json, 'upload_id': self.upload_id, }) self.task_status_json = task_status_json self.save() def save_task_status_json_if_failed(self): """ Set task_status_json based on self.task_id if the task has failed """ if self.task_status_json is None: # intentionally routing through method to prime local cache task_status_json = self.get_task_status_json() if task_status_json.state == STATES.failed: self.task_status_json = task_status_json self.save() def get_tempfile_ref_for_upload_ref(self): from .filestorage import persistent_file_store return persistent_file_store.get_tempfile_ref_for_contents(self.upload_file_meta.identifier) class CaseUploadFileMeta(models.Model): identifier = models.CharField(max_length=256, unique=True) filename = models.CharField(max_length=256) length = models.IntegerField() class CaseUploadFormRecord(models.Model): case_upload_record = models.ForeignKey(CaseUploadRecord, related_name='form_records', on_delete=models.CASCADE) form_id = models.CharField(max_length=256, unique=True)
bsd-3-clause
3d1b84c3f053ee689bd828449b3f2737
33.714286
115
0.673868
3.695817
false
false
false
false
dimagi/commcare-hq
corehq/motech/repeaters/tests/test_proxy_models.py
1
15357
import inspect from uuid import uuid4 from django.db import models from django.test import SimpleTestCase, TestCase from dimagi.ext.couchdbkit import Document from dimagi.utils.couch.migration import ( SyncCouchToSQLMixin, SyncSQLToCouchMixin, ) from corehq.motech.dhis2.repeaters import ( Dhis2EntityRepeater, Dhis2Repeater, SQLDhis2EntityRepeater, SQLDhis2Repeater, ) from corehq.motech.fhir.repeaters import FHIRRepeater, SQLFHIRRepeater from corehq.motech.models import ConnectionSettings from corehq.motech.openmrs.repeaters import OpenmrsRepeater, SQLOpenmrsRepeater from corehq.motech.repeaters.dbaccessors import ( delete_all_repeaters, get_all_repeater_docs, ) from corehq.motech.repeaters.expression.repeaters import ( CaseExpressionRepeater, SQLCaseExpressionRepeater, ) from custom.cowin.repeaters import ( BeneficiaryRegistrationRepeater, BeneficiaryVaccinationRepeater, SQLBeneficiaryRegistrationRepeater, SQLBeneficiaryVaccinationRepeater, ) from ..models import ( AppStructureRepeater, CaseRepeater, CreateCaseRepeater, DataRegistryCaseUpdateRepeater, FormRepeater, LocationRepeater, ReferCaseRepeater, Repeater, ShortFormRepeater, SQLAppStructureRepeater, SQLCaseRepeater, SQLCreateCaseRepeater, SQLDataRegistryCaseUpdateRepeater, SQLFormRepeater, SQLLocationRepeater, SQLReferCaseRepeater, SQLRepeater, SQLShortFormRepeater, SQLUpdateCaseRepeater, SQLUserRepeater, UpdateCaseRepeater, UserRepeater, ) DOMAIN = 'test-domain' class RepeaterProxyTests(TestCase): def setUp(self): self.url = "http://example.com" self.conn = ConnectionSettings.objects.create(domain=DOMAIN, name=self.url, url=self.url) self.repeater_data = { "domain": DOMAIN, "connection_settings": self.conn, "white_listed_case_types": ['white_case', 'black_case'], "black_listed_users": ['user1'], "is_paused": False, "format": 'case_json', } super().setUp() def tearDown(self): delete_all_repeaters() return super().tearDown() class TestSQLRepeaterCreatesCorrectRepeaterObjects(RepeaterProxyTests): def setUp(self): super().setUp() self.repeater_classes = [ SQLDhis2EntityRepeater, SQLCaseExpressionRepeater, SQLCaseRepeater, SQLDataRegistryCaseUpdateRepeater, SQLOpenmrsRepeater] for r in self.repeater_classes: mock_data = self.repeater_data r( domain=mock_data['domain'], connection_settings=self.conn, repeater_id=uuid4().hex ).save(sync_to_couch=False) def test_repeater_all_returns_correct_instance(self): all_repeaters = SQLRepeater.objects.all() self.assertEqual( {r.__class__.__name__ for r in all_repeaters}, {r.__name__ for r in self.repeater_classes}, ) class TestSQLCreateCaseRepeaterSubModels(RepeaterProxyTests): def setUp(self): super().setUp() self.createcase_repeater_obj = SQLCreateCaseRepeater(**self.repeater_data) self.case_repeater_obj = SQLCaseRepeater(**self.repeater_data) self.refercase_repeater_obj = SQLReferCaseRepeater(**self.repeater_data) self.dataregistry_repeater_obj = SQLDataRegistryCaseUpdateRepeater(**self.repeater_data) self.case_repeater_obj.save() self.createcase_repeater_obj.save() self.refercase_repeater_obj.save() self.dataregistry_repeater_obj.save() def test_model_instance_is_correct(self): self.assertEqual(self.createcase_repeater_obj.repeater_type, "CreateCaseRepeater") self.assertEqual(self.case_repeater_obj.repeater_type, "CaseRepeater") self.assertIsInstance(self.createcase_repeater_obj, SQLCreateCaseRepeater) self.assertIsInstance(self.case_repeater_obj, SQLCaseRepeater) def test_repeat_records_refer_correct_model_class(self): self.createcase_repeater_obj.repeat_records.create( domain=DOMAIN, payload_id='r2d2', registered_at='1977-01-01', ) self.case_repeater_obj.repeat_records.create( domain=DOMAIN, payload_id='darth', registered_at='1980-01-01', ) createcase_repeat_records = self.createcase_repeater_obj.repeat_records.all() case_repeat_records = self.case_repeater_obj.repeat_records.all() self.assertEqual(len(createcase_repeat_records), 1) self.assertEqual(len(case_repeat_records), 1) self.assertIsInstance(case_repeat_records[0].repeater, SQLCaseRepeater) self.assertIsInstance(createcase_repeat_records[0].repeater, SQLCreateCaseRepeater) def test_repeaters_are_synced_to_couch(self): repeaters = get_all_repeater_docs() self.assertEqual(len(repeaters), 4) self.assertEqual( { r['_id'] for r in repeaters }, { self.createcase_repeater_obj.repeater_id, self.case_repeater_obj.repeater_id, self.refercase_repeater_obj.repeater_id, self.dataregistry_repeater_obj.repeater_id } ) self.assertEqual( { Repeater.wrap(r).repeater_type for r in repeaters }, { self.createcase_repeater_obj.repeater_type, self.case_repeater_obj.repeater_type, self.refercase_repeater_obj.repeater_type, self.dataregistry_repeater_obj.repeater_type } ) def test_query_results_are_correct(self): self.assertEqual(len(SQLCreateCaseRepeater.objects.all()), 1) self.assertEqual(len(SQLCaseRepeater.objects.all()), 1) self.assertEqual(len(SQLReferCaseRepeater.objects.all()), 1) self.assertEqual(len(SQLDataRegistryCaseUpdateRepeater.objects.all()), 1) self.assertEqual(len(SQLRepeater.objects.all()), 4) class TestSQLRepeaterSubClasses(RepeaterProxyTests): def setUp(self): super().setUp() appstructure_repeater_obj = SQLAppStructureRepeater( domain=DOMAIN, connection_settings=self.conn, is_paused=False, format='app_structure_xml', ) shortform_repeater_obj = SQLShortFormRepeater( domain=DOMAIN, connection_settings=self.conn, is_paused=False, format='short_form_json', ) user_repeater_obj = SQLUserRepeater( domain=DOMAIN, connection_settings=self.conn, is_paused=False, format='', ) location_repeater_obj = SQLLocationRepeater( domain=DOMAIN, connection_settings=self.conn, is_paused=False, format='', ) self.all_repeaters = [ appstructure_repeater_obj, shortform_repeater_obj, user_repeater_obj, location_repeater_obj ] for r in self.all_repeaters: r.save() def test_repeaters_are_synced_to_couch(self): repeaters = get_all_repeater_docs() self.assertEqual(len(repeaters), 4) self.assertEqual( {r['_id'] for r in repeaters}, {r.repeater_id for r in self.all_repeaters} ) self.assertEqual( {Repeater.wrap(r).repeater_type for r in repeaters}, {r.repeater_type for r in self.all_repeaters} ) class ModelAttrEqualityHelper(SimpleTestCase): class DummySQLModel(models.Model, SyncSQLToCouchMixin): pass class DummyCouchModel(Document, SyncCouchToSQLMixin): pass @classmethod def _get_user_defined_attrs(cls, model_cls, dummy_model): model_attrs = dir(dummy_model) return {item[0] for item in inspect.getmembers(model_cls) if item[0] not in model_attrs} @classmethod def get_sql_attrs(cls, model_cls): return cls._get_user_defined_attrs(model_cls, cls.DummySQLModel) @classmethod def get_cleaned_couch_attrs(cls, couch_model_cls): couch_attrs = cls._get_user_defined_attrs(couch_model_cls, cls.DummyCouchModel) extra_attrs = cls._couch_only_attrs() new_attrs = cls._sql_only_attrs() return (couch_attrs - extra_attrs).union(new_attrs) @classmethod def _couch_only_attrs(cls): return set() @classmethod def _sql_only_attrs(cls): return set() class TestRepeaterModelsAttrEquality(ModelAttrEqualityHelper): def test_have_same_attrs(self): couch_attrs = self.get_cleaned_couch_attrs(Repeater) sql_attrs = self.get_sql_attrs(SQLRepeater) self.assertEqual(couch_attrs ^ sql_attrs, set()) @classmethod def get_sql_attrs(cls, model_cls): sql_attrs = cls._get_user_defined_attrs(model_cls, cls.DummySQLModel) return sql_attrs @classmethod def _couch_only_attrs(cls): return { # removed 'last_success_at', 'sql_repeater', 'failure_streak', 'started_at', # renamed 'paused', # connection setting props 'plaintext_password', 'username', 'notify_addresses_str', 'create_connection_settings', 'name', 'url', 'verify', 'skip_cert_verify', 'password', 'auth_type', # not required in sql 'by_domain', 'base_doc', 'get_class_from_doc_type', 'started_at', '_get_connection_settings', 'clear_caches', # will see if we need it as per requirement # not used 'get_attempt_info' } @classmethod def _sql_only_attrs(cls): return { 'repeater_id', 'set_next_attempt', 'next_attempt_at', 'is_ready', 'options', '_repeater_type', 'last_attempt_at', 'repeat_records_ready', 'repeat_records', 'all_objects', 'reset_next_attempt', 'is_deleted', 'PROXY_FIELD_NAME', 'Meta', 'repeater', # added by django choicefield in models 'get_request_method_display', 'to_json', '_convert_to_serializable', '_optionvalue_fields', '_wrap_schema_attrs' } class TestCaseRepeaterAttrEquality(TestRepeaterModelsAttrEquality): def test_have_same_attrs(self): couch_attrs = self.get_cleaned_couch_attrs(CaseRepeater) sql_attrs = self.get_sql_attrs(SQLCaseRepeater) self.assertEqual(couch_attrs ^ sql_attrs, set()) class TestFormRepeaterAttrEquality(TestRepeaterModelsAttrEquality): def test_have_same_attrs(self): couch_attrs = self.get_cleaned_couch_attrs(FormRepeater) sql_attrs = self.get_sql_attrs(SQLFormRepeater) self.assertEqual(couch_attrs ^ sql_attrs, set()) class TestCreateCaseRepeater(TestRepeaterModelsAttrEquality): def test_have_same_attrs(self): couch_attrs = self.get_cleaned_couch_attrs(CreateCaseRepeater) sql_attrs = self.get_sql_attrs(SQLCreateCaseRepeater) self.assertEqual(couch_attrs ^ sql_attrs, set()) class TestUpdateCaseRepeater(TestRepeaterModelsAttrEquality): def test_have_same_attrs(self): couch_attrs = self.get_cleaned_couch_attrs(UpdateCaseRepeater) sql_attrs = self.get_sql_attrs(SQLUpdateCaseRepeater) self.assertEqual(couch_attrs ^ sql_attrs, set()) class TestReferCaseRepeater(TestRepeaterModelsAttrEquality): def test_have_same_attrs(self): couch_attrs = self.get_cleaned_couch_attrs(ReferCaseRepeater) sql_attrs = self.get_sql_attrs(SQLReferCaseRepeater) self.assertEqual(couch_attrs ^ sql_attrs, set()) class TestDataRegistryRepeater(TestRepeaterModelsAttrEquality): def test_have_same_attrs(self): couch_attrs = self.get_cleaned_couch_attrs(DataRegistryCaseUpdateRepeater) sql_attrs = self.get_sql_attrs(SQLDataRegistryCaseUpdateRepeater) self.assertEqual(couch_attrs ^ sql_attrs, set()) class TestShorFormRepeater(TestRepeaterModelsAttrEquality): def test_have_same_attrs(self): couch_attrs = self.get_cleaned_couch_attrs(ShortFormRepeater) sql_attrs = self.get_sql_attrs(SQLShortFormRepeater) self.assertEqual(couch_attrs ^ sql_attrs, set()) class TestAppStructureRepeater(TestRepeaterModelsAttrEquality): def test_have_same_attrs(self): couch_attrs = self.get_cleaned_couch_attrs(AppStructureRepeater) sql_attrs = self.get_sql_attrs(SQLAppStructureRepeater) self.assertEqual(couch_attrs ^ sql_attrs, set()) class TestUserRepeater(TestRepeaterModelsAttrEquality): def test_have_same_attrs(self): couch_attrs = self.get_cleaned_couch_attrs(UserRepeater) sql_attrs = self.get_sql_attrs(SQLUserRepeater) self.assertEqual(couch_attrs ^ sql_attrs, set()) class TestLocationRepeater(TestRepeaterModelsAttrEquality): def test_have_same_attrs(self): couch_attrs = self.get_cleaned_couch_attrs(LocationRepeater) sql_attrs = self.get_sql_attrs(SQLLocationRepeater) self.assertEqual(couch_attrs ^ sql_attrs, set()) class TestDhsi2Repeater(TestRepeaterModelsAttrEquality): def test_have_same_attrs(self): couch_attrs = self.get_cleaned_couch_attrs(Dhis2Repeater) sql_attrs = self.get_sql_attrs(SQLDhis2Repeater) self.assertEqual(couch_attrs ^ sql_attrs, set()) class TestDhis2EntityRepeater(TestRepeaterModelsAttrEquality): def test_have_same_attrs(self): couch_attrs = self.get_cleaned_couch_attrs(Dhis2EntityRepeater) sql_attrs = self.get_sql_attrs(SQLDhis2EntityRepeater) self.assertEqual(couch_attrs ^ sql_attrs, set()) class TestOpenMRSRepeater(TestRepeaterModelsAttrEquality): def test_have_same_attrs(self): couch_attrs = self.get_cleaned_couch_attrs(OpenmrsRepeater) sql_attrs = self.get_sql_attrs(SQLOpenmrsRepeater) self.assertEqual(couch_attrs ^ sql_attrs, set()) class TestCaseExpresionRepeater(TestRepeaterModelsAttrEquality): def test_have_same_attrs(self): couch_attrs = self.get_cleaned_couch_attrs(CaseExpressionRepeater) sql_attrs = self.get_sql_attrs(SQLCaseExpressionRepeater) self.assertEqual(couch_attrs ^ sql_attrs, set()) class TestFHIRRepeater(TestRepeaterModelsAttrEquality): def test_have_same_attrs(self): couch_attrs = self.get_cleaned_couch_attrs(FHIRRepeater) sql_attrs = self.get_sql_attrs(SQLFHIRRepeater) self.assertEqual(couch_attrs ^ sql_attrs, set()) class TestBeneficiaryRegistrationRepeater(TestRepeaterModelsAttrEquality): def test_have_same_attrs(self): couch_attrs = self.get_cleaned_couch_attrs(BeneficiaryRegistrationRepeater) sql_attrs = self.get_sql_attrs(SQLBeneficiaryRegistrationRepeater) self.assertEqual(couch_attrs ^ sql_attrs, set()) class TestBeneficiaryVaccinationRepeater(TestRepeaterModelsAttrEquality): def test_have_same_attrs(self): couch_attrs = self.get_cleaned_couch_attrs(BeneficiaryVaccinationRepeater) sql_attrs = self.get_sql_attrs(SQLBeneficiaryVaccinationRepeater) self.assertEqual(couch_attrs ^ sql_attrs, set())
bsd-3-clause
1f4dd0fdd5dc4f7c8baf008186d22e34
35.564286
114
0.667513
3.578886
false
true
false
false
dimagi/commcare-hq
corehq/apps/case_importer/tracking/task_status.py
1
4123
from dimagi.ext import jsonobject from dimagi.utils.logging import notify_exception from soil.progress import STATES, get_task_status from soil.util import get_task class TaskStatus(jsonobject.StrictJsonObject): # takes on values of soil.progress.STATES state = jsonobject.IntegerProperty() progress = jsonobject.ObjectProperty(lambda: TaskStatusProgress) result = jsonobject.ObjectProperty(lambda: TaskStatusResult) def is_finished(self): return self.state in (STATES.success, STATES.failed) class TaskStatusProgress(jsonobject.StrictJsonObject): percent = jsonobject.IntegerProperty() class TaskStatusResult(jsonobject.StrictJsonObject): match_count = jsonobject.IntegerProperty() created_count = jsonobject.IntegerProperty() num_chunks = jsonobject.IntegerProperty() errors = jsonobject.ListProperty(lambda: TaskStatusResultError) class TaskStatusResultError(jsonobject.StrictJsonObject): title = jsonobject.StringProperty() description = jsonobject.StringProperty() column = jsonobject.StringProperty() # usually an int, but field has been hijacked to include other debug info # search 'row_number=' in tasks.py # longer-term solution would be to have another field for debug info rows = jsonobject.ListProperty() sample = jsonobject.StringProperty() def normalize_task_status_result(result): if result: return TaskStatusResult( match_count=result['match_count'], created_count=result['created_count'], num_chunks=result['num_chunks'], errors=normalize_task_status_result_errors(result), ) else: return None def normalize_task_status_result_errors(result): """ result is the return value of do_import it is important that when changes are made to the return value of do_import this function remains backwards compatible, i.e. compatible with old return values of do_import, because those values are saved directly in the database, and we need to be able to process them in the future """ result_errors = [] for _, columns_to_error_value in result['errors'].items(): for column_name, error_value in columns_to_error_value.items(): result_errors.append(TaskStatusResultError( title=str(error_value['error']), description=str(error_value['description']), column=column_name, rows=error_value['rows'], sample=error_value.get('sample', '') )) return result_errors def get_task_status_json(task_id): try: task_status = get_task_status(get_task(task_id)) except Exception: # There was a period of time where the format of metadata we were setting # from the task would cause a celery-internal failure notify_exception(None, "Error fetching task") return TaskStatus( state=STATES.failed, progress=None, result=TaskStatusResult(errors=[TaskStatusResultError(description='Unknown Failure')]), ) if task_status.state == STATES.failed: errors = ( task_status.error if isinstance(task_status.error, (list, tuple)) else [task_status.error] ) return TaskStatus( state=task_status.state, progress=TaskStatusProgress( percent=task_status.progress.percent, ), result=TaskStatusResult(errors=[TaskStatusResultError(description=error) for error in errors]), ) else: return TaskStatus( state=task_status.state, progress=TaskStatusProgress( percent=task_status.progress.percent, ), result=normalize_task_status_result(task_status.result), ) def make_task_status_success(result): return TaskStatus( state=STATES.success, progress=TaskStatusProgress( percent=0, ), result=normalize_task_status_result(result), )
bsd-3-clause
0cc9f5d1b8ce1a9a9f492b4e7cf3e2a1
34.239316
99
0.657288
4.367585
false
false
false
false
dimagi/commcare-hq
corehq/apps/zapier/tests/test_utils.py
1
1194
from collections import namedtuple from corehq.apps.accounting.models import ( BillingAccount, DefaultProductPlan, SoftwarePlanEdition, Subscription, ) from corehq.apps.domain.models import Domain from corehq.apps.users.models import HQApiKey, WebUser from corehq.apps.zapier.consts import CASE_TYPE_REPEATER_CLASS_MAP from corehq.motech.repeaters.models import FormRepeater, RepeatRecord ZapierDomainConfig = namedtuple('ZapierDomainConfig', 'domain web_user api_key') def bootrap_domain_for_zapier(domain_name): domain_object = Domain.get_or_create_with_name(domain_name, is_active=True) account = BillingAccount.get_or_create_account_by_domain(domain_name, created_by="automated-test")[0] plan = DefaultProductPlan.get_default_plan_version(edition=SoftwarePlanEdition.STANDARD) subscription = Subscription.new_domain_subscription(account, domain_name, plan) subscription.is_active = True subscription.save() web_user = WebUser.create(domain_name, 'test', '******', None, None) api_key_object, _ = HQApiKey.objects.get_or_create(user=web_user.get_django_user()) return ZapierDomainConfig(domain_object, web_user, api_key_object.key)
bsd-3-clause
6f822bc9857965b2973dcdee2f70a878
40.172414
105
0.766332
3.36338
false
true
false
false
dimagi/commcare-hq
corehq/motech/migrations/0006_connection_settings.py
1
2006
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('motech', '0005_requestlog_request_body'), ] operations = [ migrations.AddField( model_name='connectionsettings', name='api_auth_settings', field=models.CharField(blank=True, choices=[ (None, '(Not Applicable)'), ('dhis2_auth_settings', 'DHIS2 OAuth 2.0'), ('moveit_automation_settings', 'MOVEit Automation') ], max_length=64, null=True), ), migrations.AddField( model_name='connectionsettings', name='client_id', field=models.CharField(blank=True, max_length=255, null=True), ), migrations.AddField( model_name='connectionsettings', name='client_secret', field=models.CharField(blank=True, max_length=255), ), migrations.AddField( model_name='connectionsettings', name='last_token_aes', field=models.TextField(blank=True, default=''), ), migrations.AlterField( model_name='connectionsettings', name='auth_type', field=models.CharField(blank=True, choices=[ (None, 'None'), ('basic', 'HTTP Basic'), ('digest', 'HTTP Digest'), ('oauth1', 'OAuth1'), ('bearer', 'Ipswitch MOVEit Automation Bearer Token'), ('oauth2_pwd', 'OAuth 2.0 Password Grant') ], max_length=16, null=True), ), migrations.AlterField( model_name='connectionsettings', name='password', field=models.CharField(blank=True, max_length=255), ), migrations.AlterField( model_name='connectionsettings', name='username', field=models.CharField(blank=True, max_length=255, null=True), ), ]
bsd-3-clause
02e8d38cf7bff7855bde972b2fa88744
34.192982
74
0.534397
4.457778
false
false
false
false
dimagi/commcare-hq
corehq/apps/accounting/tests/test_renew_subscription.py
1
3162
import datetime from corehq.apps.accounting.models import ( BillingAccount, DefaultProductPlan, SoftwarePlanEdition, Subscription, ) from corehq.apps.accounting.tests import generator from corehq.apps.accounting.tests.base_tests import BaseAccountingTest from corehq.apps.domain.models import Domain class TestRenewSubscriptions(BaseAccountingTest): def setUp(self): super(TestRenewSubscriptions, self).setUp() self.domain = Domain( name="test-domain-sub", is_active=True, ) self.domain.save() self.admin_username = generator.create_arbitrary_web_user_name() self.account = BillingAccount.get_or_create_account_by_domain( self.domain.name, created_by=self.admin_username)[0] self.standard_plan = DefaultProductPlan.get_default_plan_version(edition=SoftwarePlanEdition.STANDARD) today = datetime.date.today() yesterday = today + datetime.timedelta(days=-1) tomorrow = today + datetime.timedelta(days=1) self.subscription = Subscription.new_domain_subscription( self.account, self.domain.name, self.standard_plan, web_user=self.admin_username, date_start=yesterday, date_end=tomorrow, ) self.subscription.save() def tearDown(self): self.domain.delete() super(TestRenewSubscriptions, self).tearDown() def test_simple_renewal(self): self.renewed_subscription = self.subscription.renew_subscription() self.assertEqual(self.renewed_subscription.date_end, None) self.assertEqual(self.renewed_subscription.date_start, self.subscription.date_end) self.assertEqual(self.renewed_subscription.plan_version, self.subscription.plan_version) def test_change_plan_on_renewal(self): new_edition = SoftwarePlanEdition.ADVANCED new_plan = DefaultProductPlan.get_default_plan_version(new_edition) self.renewed_subscription = self.subscription.renew_subscription( new_version=new_plan ) self.assertEqual(self.renewed_subscription.plan_version, new_plan) def test_next_subscription_filter(self): """ If subscription.next_subscription is None then subscription.is_renewed should be False """ self.renewed_subscription = self.subscription.renew_subscription() self.renewed_subscription.date_end = self.renewed_subscription.date_start # Not a valid subscription self.renewed_subscription.save() self.assertIsNone(self.subscription.next_subscription) self.assertFalse(self.subscription.is_renewed) def test_next_subscription_filter_no_end_date(self): next_subscription = Subscription( account=self.subscription.account, plan_version=self.subscription.plan_version, subscriber=self.subscription.subscriber, date_start=self.subscription.date_end, date_end=None, ) next_subscription.save() self.assertEqual(next_subscription, self.subscription.next_subscription)
bsd-3-clause
37a4dbbf8252a39c8a544739be136858
35.344828
110
0.683112
4.08
false
true
false
false